ntb_hw.c revision 291031
1/*- 2 * Copyright (C) 2013 Intel Corporation 3 * Copyright (C) 2015 EMC Corporation 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: head/sys/dev/ntb/ntb_hw/ntb_hw.c 291031 2015-11-18 22:20:21Z cem $"); 30 31#include <sys/param.h> 32#include <sys/kernel.h> 33#include <sys/systm.h> 34#include <sys/bus.h> 35#include <sys/endian.h> 36#include <sys/malloc.h> 37#include <sys/module.h> 38#include <sys/queue.h> 39#include <sys/rman.h> 40#include <sys/sbuf.h> 41#include <sys/sysctl.h> 42#include <vm/vm.h> 43#include <vm/pmap.h> 44#include <machine/bus.h> 45#include <machine/pmap.h> 46#include <machine/resource.h> 47#include <dev/pci/pcireg.h> 48#include <dev/pci/pcivar.h> 49 50#include "ntb_regs.h" 51#include "ntb_hw.h" 52 53/* 54 * The Non-Transparent Bridge (NTB) is a device on some Intel processors that 55 * allows you to connect two systems using a PCI-e link. 56 * 57 * This module contains the hardware abstraction layer for the NTB. It allows 58 * you to send and recieve interrupts, map the memory windows and send and 59 * receive messages in the scratch-pad registers. 60 * 61 * NOTE: Much of the code in this module is shared with Linux. Any patches may 62 * be picked up and redistributed in Linux with a dual GPL/BSD license. 63 */ 64 65#define MAX_MSIX_INTERRUPTS MAX(XEON_DB_COUNT, ATOM_DB_COUNT) 66 67#define NTB_HB_TIMEOUT 1 /* second */ 68#define ATOM_LINK_RECOVERY_TIME 500 /* ms */ 69 70#define DEVICE2SOFTC(dev) ((struct ntb_softc *) device_get_softc(dev)) 71 72enum ntb_device_type { 73 NTB_XEON, 74 NTB_ATOM 75}; 76 77/* ntb_conn_type are hardware numbers, cannot change. */ 78enum ntb_conn_type { 79 NTB_CONN_TRANSPARENT = 0, 80 NTB_CONN_B2B = 1, 81 NTB_CONN_RP = 2, 82}; 83 84enum ntb_b2b_direction { 85 NTB_DEV_USD = 0, 86 NTB_DEV_DSD = 1, 87}; 88 89enum ntb_bar { 90 NTB_CONFIG_BAR = 0, 91 NTB_B2B_BAR_1, 92 NTB_B2B_BAR_2, 93 NTB_B2B_BAR_3, 94 NTB_MAX_BARS 95}; 96 97/* Device features and workarounds */ 98#define HAS_FEATURE(feature) \ 99 ((ntb->features & (feature)) != 0) 100 101struct ntb_hw_info { 102 uint32_t device_id; 103 const char *desc; 104 enum ntb_device_type type; 105 uint32_t features; 106}; 107 108struct ntb_pci_bar_info { 109 bus_space_tag_t pci_bus_tag; 110 bus_space_handle_t pci_bus_handle; 111 int pci_resource_id; 112 struct resource *pci_resource; 113 vm_paddr_t pbase; 114 caddr_t vbase; 115 vm_size_t size; 116 bool mapped_wc : 1; 117 118 /* Configuration register offsets */ 119 uint32_t psz_off; 120 uint32_t ssz_off; 121 uint32_t pbarxlat_off; 122}; 123 124struct ntb_int_info { 125 struct resource *res; 126 int rid; 127 void *tag; 128}; 129 130struct ntb_vec { 131 struct ntb_softc *ntb; 132 uint32_t num; 133}; 134 135struct ntb_reg { 136 uint32_t ntb_ctl; 137 uint32_t lnk_sta; 138 uint8_t db_size; 139 unsigned mw_bar[NTB_MAX_BARS]; 140}; 141 142struct ntb_alt_reg { 143 uint32_t db_bell; 144 uint32_t db_mask; 145 uint32_t spad; 146}; 147 148struct ntb_xlat_reg { 149 uint32_t bar0_base; 150 uint32_t bar2_base; 151 uint32_t bar4_base; 152 uint32_t bar5_base; 153 154 uint32_t bar2_xlat; 155 uint32_t bar4_xlat; 156 uint32_t bar5_xlat; 157 158 uint32_t bar2_limit; 159 uint32_t bar4_limit; 160 uint32_t bar5_limit; 161}; 162 163struct ntb_b2b_addr { 164 uint64_t bar0_addr; 165 uint64_t bar2_addr64; 166 uint64_t bar4_addr64; 167 uint64_t bar4_addr32; 168 uint64_t bar5_addr32; 169}; 170 171struct ntb_softc { 172 device_t device; 173 enum ntb_device_type type; 174 uint32_t features; 175 176 struct ntb_pci_bar_info bar_info[NTB_MAX_BARS]; 177 struct ntb_int_info int_info[MAX_MSIX_INTERRUPTS]; 178 uint32_t allocated_interrupts; 179 180 struct callout heartbeat_timer; 181 struct callout lr_timer; 182 183 void *ntb_ctx; 184 const struct ntb_ctx_ops *ctx_ops; 185 struct ntb_vec *msix_vec; 186#define CTX_LOCK(sc) mtx_lock(&(sc)->ctx_lock) 187#define CTX_UNLOCK(sc) mtx_unlock(&(sc)->ctx_lock) 188#define CTX_ASSERT(sc,f) mtx_assert(&(sc)->ctx_lock, (f)) 189 struct mtx ctx_lock; 190 191 uint32_t ppd; 192 enum ntb_conn_type conn_type; 193 enum ntb_b2b_direction dev_type; 194 195 /* Offset of peer bar0 in B2B BAR */ 196 uint64_t b2b_off; 197 /* Memory window used to access peer bar0 */ 198#define B2B_MW_DISABLED UINT8_MAX 199 uint8_t b2b_mw_idx; 200 201 uint8_t mw_count; 202 uint8_t spad_count; 203 uint8_t db_count; 204 uint8_t db_vec_count; 205 uint8_t db_vec_shift; 206 207 /* Protects local db_mask. */ 208#define DB_MASK_LOCK(sc) mtx_lock_spin(&(sc)->db_mask_lock) 209#define DB_MASK_UNLOCK(sc) mtx_unlock_spin(&(sc)->db_mask_lock) 210#define DB_MASK_ASSERT(sc,f) mtx_assert(&(sc)->db_mask_lock, (f)) 211 struct mtx db_mask_lock; 212 213 volatile uint32_t ntb_ctl; 214 volatile uint32_t lnk_sta; 215 216 uint64_t db_valid_mask; 217 uint64_t db_link_mask; 218 uint64_t db_mask; 219 220 int last_ts; /* ticks @ last irq */ 221 222 const struct ntb_reg *reg; 223 const struct ntb_alt_reg *self_reg; 224 const struct ntb_alt_reg *peer_reg; 225 const struct ntb_xlat_reg *xlat_reg; 226}; 227 228#ifdef __i386__ 229static __inline uint64_t 230bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle, 231 bus_size_t offset) 232{ 233 234 return (bus_space_read_4(tag, handle, offset) | 235 ((uint64_t)bus_space_read_4(tag, handle, offset + 4)) << 32); 236} 237 238static __inline void 239bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t handle, 240 bus_size_t offset, uint64_t val) 241{ 242 243 bus_space_write_4(tag, handle, offset, val); 244 bus_space_write_4(tag, handle, offset + 4, val >> 32); 245} 246#endif 247 248#define ntb_bar_read(SIZE, bar, offset) \ 249 bus_space_read_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \ 250 ntb->bar_info[(bar)].pci_bus_handle, (offset)) 251#define ntb_bar_write(SIZE, bar, offset, val) \ 252 bus_space_write_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \ 253 ntb->bar_info[(bar)].pci_bus_handle, (offset), (val)) 254#define ntb_reg_read(SIZE, offset) ntb_bar_read(SIZE, NTB_CONFIG_BAR, offset) 255#define ntb_reg_write(SIZE, offset, val) \ 256 ntb_bar_write(SIZE, NTB_CONFIG_BAR, offset, val) 257#define ntb_mw_read(SIZE, offset) \ 258 ntb_bar_read(SIZE, ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), offset) 259#define ntb_mw_write(SIZE, offset, val) \ 260 ntb_bar_write(SIZE, ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), \ 261 offset, val) 262 263static int ntb_probe(device_t device); 264static int ntb_attach(device_t device); 265static int ntb_detach(device_t device); 266static inline enum ntb_bar ntb_mw_to_bar(struct ntb_softc *, unsigned mw); 267static inline bool bar_is_64bit(struct ntb_softc *, enum ntb_bar); 268static inline void bar_get_xlat_params(struct ntb_softc *, enum ntb_bar, 269 uint32_t *base, uint32_t *xlat, uint32_t *lmt); 270static int ntb_map_pci_bars(struct ntb_softc *ntb); 271static void print_map_success(struct ntb_softc *, struct ntb_pci_bar_info *, 272 const char *); 273static int map_mmr_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar); 274static int map_memory_window_bar(struct ntb_softc *ntb, 275 struct ntb_pci_bar_info *bar); 276static void ntb_unmap_pci_bar(struct ntb_softc *ntb); 277static int ntb_remap_msix(device_t, uint32_t desired, uint32_t avail); 278static int ntb_init_isr(struct ntb_softc *ntb); 279static int ntb_setup_legacy_interrupt(struct ntb_softc *ntb); 280static int ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors); 281static void ntb_teardown_interrupts(struct ntb_softc *ntb); 282static inline uint64_t ntb_vec_mask(struct ntb_softc *, uint64_t db_vector); 283static void ntb_interrupt(struct ntb_softc *, uint32_t vec); 284static void ndev_vec_isr(void *arg); 285static void ndev_irq_isr(void *arg); 286static inline uint64_t db_ioread(struct ntb_softc *, uint64_t regoff); 287static inline void db_iowrite(struct ntb_softc *, uint64_t regoff, uint64_t); 288static inline void db_iowrite_raw(struct ntb_softc *, uint64_t regoff, uint64_t); 289static int ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors); 290static void ntb_free_msix_vec(struct ntb_softc *ntb); 291static struct ntb_hw_info *ntb_get_device_info(uint32_t device_id); 292static void ntb_detect_max_mw(struct ntb_softc *ntb); 293static int ntb_detect_xeon(struct ntb_softc *ntb); 294static int ntb_detect_atom(struct ntb_softc *ntb); 295static int ntb_xeon_init_dev(struct ntb_softc *ntb); 296static int ntb_atom_init_dev(struct ntb_softc *ntb); 297static void ntb_teardown_xeon(struct ntb_softc *ntb); 298static void configure_atom_secondary_side_bars(struct ntb_softc *ntb); 299static void xeon_reset_sbar_size(struct ntb_softc *, enum ntb_bar idx, 300 enum ntb_bar regbar); 301static void xeon_set_sbar_base_and_limit(struct ntb_softc *, 302 uint64_t base_addr, enum ntb_bar idx, enum ntb_bar regbar); 303static void xeon_set_pbar_xlat(struct ntb_softc *, uint64_t base_addr, 304 enum ntb_bar idx); 305static int xeon_setup_b2b_mw(struct ntb_softc *, 306 const struct ntb_b2b_addr *addr, const struct ntb_b2b_addr *peer_addr); 307static inline bool link_is_up(struct ntb_softc *ntb); 308static inline bool atom_link_is_err(struct ntb_softc *ntb); 309static inline enum ntb_speed ntb_link_sta_speed(struct ntb_softc *); 310static inline enum ntb_width ntb_link_sta_width(struct ntb_softc *); 311static void atom_link_hb(void *arg); 312static void ntb_db_event(struct ntb_softc *ntb, uint32_t vec); 313static void recover_atom_link(void *arg); 314static bool ntb_poll_link(struct ntb_softc *ntb); 315static void save_bar_parameters(struct ntb_pci_bar_info *bar); 316static void ntb_sysctl_init(struct ntb_softc *); 317static int sysctl_handle_features(SYSCTL_HANDLER_ARGS); 318static int sysctl_handle_link_status(SYSCTL_HANDLER_ARGS); 319static int sysctl_handle_register(SYSCTL_HANDLER_ARGS); 320 321static unsigned g_ntb_hw_debug_level; 322SYSCTL_UINT(_hw_ntb, OID_AUTO, debug_level, CTLFLAG_RWTUN, 323 &g_ntb_hw_debug_level, 0, "ntb_hw log level -- higher is more verbose"); 324#define ntb_printf(lvl, ...) do { \ 325 if ((lvl) <= g_ntb_hw_debug_level) { \ 326 device_printf(ntb->device, __VA_ARGS__); \ 327 } \ 328} while (0) 329 330static unsigned g_ntb_enable_wc = 1; 331SYSCTL_UINT(_hw_ntb, OID_AUTO, enable_writecombine, CTLFLAG_RDTUN, 332 &g_ntb_enable_wc, 0, "Set to 1 to map memory windows write combining"); 333 334static struct ntb_hw_info pci_ids[] = { 335 /* XXX: PS/SS IDs left out until they are supported. */ 336 { 0x0C4E8086, "BWD Atom Processor S1200 Non-Transparent Bridge B2B", 337 NTB_ATOM, 0 }, 338 339 { 0x37258086, "JSF Xeon C35xx/C55xx Non-Transparent Bridge B2B", 340 NTB_XEON, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 }, 341 { 0x3C0D8086, "SNB Xeon E5/Core i7 Non-Transparent Bridge B2B", 342 NTB_XEON, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 }, 343 { 0x0E0D8086, "IVT Xeon E5 V2 Non-Transparent Bridge B2B", NTB_XEON, 344 NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 | 345 NTB_SB01BASE_LOCKUP | NTB_BAR_SIZE_4K }, 346 { 0x2F0D8086, "HSX Xeon E5 V3 Non-Transparent Bridge B2B", NTB_XEON, 347 NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 | 348 NTB_SB01BASE_LOCKUP }, 349 { 0x6F0D8086, "BDX Xeon E5 V4 Non-Transparent Bridge B2B", NTB_XEON, 350 NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 | 351 NTB_SB01BASE_LOCKUP }, 352 353 { 0x00000000, NULL, NTB_ATOM, 0 } 354}; 355 356static const struct ntb_reg atom_reg = { 357 .ntb_ctl = ATOM_NTBCNTL_OFFSET, 358 .lnk_sta = ATOM_LINK_STATUS_OFFSET, 359 .db_size = sizeof(uint64_t), 360 .mw_bar = { NTB_B2B_BAR_1, NTB_B2B_BAR_2 }, 361}; 362 363static const struct ntb_alt_reg atom_pri_reg = { 364 .db_bell = ATOM_PDOORBELL_OFFSET, 365 .db_mask = ATOM_PDBMSK_OFFSET, 366 .spad = ATOM_SPAD_OFFSET, 367}; 368 369static const struct ntb_alt_reg atom_b2b_reg = { 370 .db_bell = ATOM_B2B_DOORBELL_OFFSET, 371 .spad = ATOM_B2B_SPAD_OFFSET, 372}; 373 374static const struct ntb_xlat_reg atom_sec_xlat = { 375#if 0 376 /* "FIXME" says the Linux driver. */ 377 .bar0_base = ATOM_SBAR0BASE_OFFSET, 378 .bar2_base = ATOM_SBAR2BASE_OFFSET, 379 .bar4_base = ATOM_SBAR4BASE_OFFSET, 380 381 .bar2_limit = ATOM_SBAR2LMT_OFFSET, 382 .bar4_limit = ATOM_SBAR4LMT_OFFSET, 383#endif 384 385 .bar2_xlat = ATOM_SBAR2XLAT_OFFSET, 386 .bar4_xlat = ATOM_SBAR4XLAT_OFFSET, 387}; 388 389static const struct ntb_reg xeon_reg = { 390 .ntb_ctl = XEON_NTBCNTL_OFFSET, 391 .lnk_sta = XEON_LINK_STATUS_OFFSET, 392 .db_size = sizeof(uint16_t), 393 .mw_bar = { NTB_B2B_BAR_1, NTB_B2B_BAR_2, NTB_B2B_BAR_3 }, 394}; 395 396static const struct ntb_alt_reg xeon_pri_reg = { 397 .db_bell = XEON_PDOORBELL_OFFSET, 398 .db_mask = XEON_PDBMSK_OFFSET, 399 .spad = XEON_SPAD_OFFSET, 400}; 401 402static const struct ntb_alt_reg xeon_b2b_reg = { 403 .db_bell = XEON_B2B_DOORBELL_OFFSET, 404 .spad = XEON_B2B_SPAD_OFFSET, 405}; 406 407static const struct ntb_xlat_reg xeon_sec_xlat = { 408 .bar0_base = XEON_SBAR0BASE_OFFSET, 409 .bar2_base = XEON_SBAR2BASE_OFFSET, 410 .bar4_base = XEON_SBAR4BASE_OFFSET, 411 .bar5_base = XEON_SBAR5BASE_OFFSET, 412 413 .bar2_limit = XEON_SBAR2LMT_OFFSET, 414 .bar4_limit = XEON_SBAR4LMT_OFFSET, 415 .bar5_limit = XEON_SBAR5LMT_OFFSET, 416 417 .bar2_xlat = XEON_SBAR2XLAT_OFFSET, 418 .bar4_xlat = XEON_SBAR4XLAT_OFFSET, 419 .bar5_xlat = XEON_SBAR5XLAT_OFFSET, 420}; 421 422static struct ntb_b2b_addr xeon_b2b_usd_addr = { 423 .bar0_addr = XEON_B2B_BAR0_ADDR, 424 .bar2_addr64 = XEON_B2B_BAR2_ADDR64, 425 .bar4_addr64 = XEON_B2B_BAR4_ADDR64, 426 .bar4_addr32 = XEON_B2B_BAR4_ADDR32, 427 .bar5_addr32 = XEON_B2B_BAR5_ADDR32, 428}; 429 430static struct ntb_b2b_addr xeon_b2b_dsd_addr = { 431 .bar0_addr = XEON_B2B_BAR0_ADDR, 432 .bar2_addr64 = XEON_B2B_BAR2_ADDR64, 433 .bar4_addr64 = XEON_B2B_BAR4_ADDR64, 434 .bar4_addr32 = XEON_B2B_BAR4_ADDR32, 435 .bar5_addr32 = XEON_B2B_BAR5_ADDR32, 436}; 437 438SYSCTL_NODE(_hw_ntb, OID_AUTO, xeon_b2b, CTLFLAG_RW, 0, 439 "B2B MW segment overrides -- MUST be the same on both sides"); 440 441SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar2_addr64, CTLFLAG_RDTUN, 442 &xeon_b2b_usd_addr.bar2_addr64, 0, "If using B2B topology on Xeon " 443 "hardware, use this 64-bit address on the bus between the NTB devices for " 444 "the window at BAR2, on the upstream side of the link. MUST be the same " 445 "address on both sides."); 446SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar4_addr64, CTLFLAG_RDTUN, 447 &xeon_b2b_usd_addr.bar4_addr64, 0, "See usd_bar2_addr64, but BAR4."); 448SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar4_addr32, CTLFLAG_RDTUN, 449 &xeon_b2b_usd_addr.bar4_addr32, 0, "See usd_bar2_addr64, but BAR4 " 450 "(split-BAR mode)."); 451SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar5_addr32, CTLFLAG_RDTUN, 452 &xeon_b2b_usd_addr.bar5_addr32, 0, "See usd_bar2_addr64, but BAR5 " 453 "(split-BAR mode)."); 454 455SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar2_addr64, CTLFLAG_RDTUN, 456 &xeon_b2b_dsd_addr.bar2_addr64, 0, "If using B2B topology on Xeon " 457 "hardware, use this 64-bit address on the bus between the NTB devices for " 458 "the window at BAR2, on the downstream side of the link. MUST be the same" 459 " address on both sides."); 460SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar4_addr64, CTLFLAG_RDTUN, 461 &xeon_b2b_dsd_addr.bar4_addr64, 0, "See dsd_bar2_addr64, but BAR4."); 462SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar4_addr32, CTLFLAG_RDTUN, 463 &xeon_b2b_dsd_addr.bar4_addr32, 0, "See dsd_bar2_addr64, but BAR4 " 464 "(split-BAR mode)."); 465SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar5_addr32, CTLFLAG_RDTUN, 466 &xeon_b2b_dsd_addr.bar5_addr32, 0, "See dsd_bar2_addr64, but BAR5 " 467 "(split-BAR mode)."); 468 469/* 470 * OS <-> Driver interface structures 471 */ 472MALLOC_DEFINE(M_NTB, "ntb_hw", "ntb_hw driver memory allocations"); 473 474static device_method_t ntb_pci_methods[] = { 475 /* Device interface */ 476 DEVMETHOD(device_probe, ntb_probe), 477 DEVMETHOD(device_attach, ntb_attach), 478 DEVMETHOD(device_detach, ntb_detach), 479 DEVMETHOD_END 480}; 481 482static driver_t ntb_pci_driver = { 483 "ntb_hw", 484 ntb_pci_methods, 485 sizeof(struct ntb_softc), 486}; 487 488static devclass_t ntb_devclass; 489DRIVER_MODULE(ntb_hw, pci, ntb_pci_driver, ntb_devclass, NULL, NULL); 490MODULE_VERSION(ntb_hw, 1); 491 492SYSCTL_NODE(_hw, OID_AUTO, ntb, CTLFLAG_RW, 0, "NTB sysctls"); 493 494/* 495 * OS <-> Driver linkage functions 496 */ 497static int 498ntb_probe(device_t device) 499{ 500 struct ntb_hw_info *p; 501 502 p = ntb_get_device_info(pci_get_devid(device)); 503 if (p == NULL) 504 return (ENXIO); 505 506 device_set_desc(device, p->desc); 507 return (0); 508} 509 510static int 511ntb_attach(device_t device) 512{ 513 struct ntb_softc *ntb; 514 struct ntb_hw_info *p; 515 int error; 516 517 ntb = DEVICE2SOFTC(device); 518 p = ntb_get_device_info(pci_get_devid(device)); 519 520 ntb->device = device; 521 ntb->type = p->type; 522 ntb->features = p->features; 523 ntb->b2b_mw_idx = B2B_MW_DISABLED; 524 525 /* Heartbeat timer for NTB_ATOM since there is no link interrupt */ 526 callout_init(&ntb->heartbeat_timer, 1); 527 callout_init(&ntb->lr_timer, 1); 528 mtx_init(&ntb->db_mask_lock, "ntb hw bits", NULL, MTX_SPIN); 529 mtx_init(&ntb->ctx_lock, "ntb ctx", NULL, MTX_DEF); 530 531 if (ntb->type == NTB_ATOM) 532 error = ntb_detect_atom(ntb); 533 else 534 error = ntb_detect_xeon(ntb); 535 if (error != 0) 536 goto out; 537 538 ntb_detect_max_mw(ntb); 539 540 pci_enable_busmaster(ntb->device); 541 542 error = ntb_map_pci_bars(ntb); 543 if (error != 0) 544 goto out; 545 if (ntb->type == NTB_ATOM) 546 error = ntb_atom_init_dev(ntb); 547 else 548 error = ntb_xeon_init_dev(ntb); 549 if (error != 0) 550 goto out; 551 552 ntb_poll_link(ntb); 553 554 ntb_sysctl_init(ntb); 555 556out: 557 if (error != 0) 558 ntb_detach(device); 559 return (error); 560} 561 562static int 563ntb_detach(device_t device) 564{ 565 struct ntb_softc *ntb; 566 567 ntb = DEVICE2SOFTC(device); 568 569 if (ntb->self_reg != NULL) 570 ntb_db_set_mask(ntb, ntb->db_valid_mask); 571 callout_drain(&ntb->heartbeat_timer); 572 callout_drain(&ntb->lr_timer); 573 pci_disable_busmaster(ntb->device); 574 if (ntb->type == NTB_XEON) 575 ntb_teardown_xeon(ntb); 576 ntb_teardown_interrupts(ntb); 577 578 mtx_destroy(&ntb->db_mask_lock); 579 mtx_destroy(&ntb->ctx_lock); 580 581 /* 582 * Redetect total MWs so we unmap properly -- in case we lowered the 583 * maximum to work around Xeon errata. 584 */ 585 ntb_detect_max_mw(ntb); 586 ntb_unmap_pci_bar(ntb); 587 588 return (0); 589} 590 591/* 592 * Driver internal routines 593 */ 594static inline enum ntb_bar 595ntb_mw_to_bar(struct ntb_softc *ntb, unsigned mw) 596{ 597 598 KASSERT(mw < ntb->mw_count || 599 (mw != B2B_MW_DISABLED && mw == ntb->b2b_mw_idx), 600 ("%s: mw:%u > count:%u", __func__, mw, (unsigned)ntb->mw_count)); 601 KASSERT(ntb->reg->mw_bar[mw] != 0, ("invalid mw")); 602 603 return (ntb->reg->mw_bar[mw]); 604} 605 606static inline bool 607bar_is_64bit(struct ntb_softc *ntb, enum ntb_bar bar) 608{ 609 /* XXX This assertion could be stronger. */ 610 KASSERT(bar < NTB_MAX_BARS, ("bogus bar")); 611 return (bar < NTB_B2B_BAR_2 || !HAS_FEATURE(NTB_SPLIT_BAR)); 612} 613 614static inline void 615bar_get_xlat_params(struct ntb_softc *ntb, enum ntb_bar bar, uint32_t *base, 616 uint32_t *xlat, uint32_t *lmt) 617{ 618 uint32_t basev, lmtv, xlatv; 619 620 switch (bar) { 621 case NTB_B2B_BAR_1: 622 basev = ntb->xlat_reg->bar2_base; 623 lmtv = ntb->xlat_reg->bar2_limit; 624 xlatv = ntb->xlat_reg->bar2_xlat; 625 break; 626 case NTB_B2B_BAR_2: 627 basev = ntb->xlat_reg->bar4_base; 628 lmtv = ntb->xlat_reg->bar4_limit; 629 xlatv = ntb->xlat_reg->bar4_xlat; 630 break; 631 case NTB_B2B_BAR_3: 632 basev = ntb->xlat_reg->bar5_base; 633 lmtv = ntb->xlat_reg->bar5_limit; 634 xlatv = ntb->xlat_reg->bar5_xlat; 635 break; 636 default: 637 KASSERT(bar >= NTB_B2B_BAR_1 && bar < NTB_MAX_BARS, 638 ("bad bar")); 639 basev = lmtv = xlatv = 0; 640 break; 641 } 642 643 if (base != NULL) 644 *base = basev; 645 if (xlat != NULL) 646 *xlat = xlatv; 647 if (lmt != NULL) 648 *lmt = lmtv; 649} 650 651static int 652ntb_map_pci_bars(struct ntb_softc *ntb) 653{ 654 int rc; 655 656 ntb->bar_info[NTB_CONFIG_BAR].pci_resource_id = PCIR_BAR(0); 657 rc = map_mmr_bar(ntb, &ntb->bar_info[NTB_CONFIG_BAR]); 658 if (rc != 0) 659 goto out; 660 661 ntb->bar_info[NTB_B2B_BAR_1].pci_resource_id = PCIR_BAR(2); 662 rc = map_memory_window_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_1]); 663 if (rc != 0) 664 goto out; 665 ntb->bar_info[NTB_B2B_BAR_1].psz_off = XEON_PBAR23SZ_OFFSET; 666 ntb->bar_info[NTB_B2B_BAR_1].ssz_off = XEON_SBAR23SZ_OFFSET; 667 ntb->bar_info[NTB_B2B_BAR_1].pbarxlat_off = XEON_PBAR2XLAT_OFFSET; 668 669 ntb->bar_info[NTB_B2B_BAR_2].pci_resource_id = PCIR_BAR(4); 670 /* XXX Are shared MW B2Bs write-combining? */ 671 if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP) && !HAS_FEATURE(NTB_SPLIT_BAR)) 672 rc = map_mmr_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_2]); 673 else 674 rc = map_memory_window_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_2]); 675 ntb->bar_info[NTB_B2B_BAR_2].psz_off = XEON_PBAR4SZ_OFFSET; 676 ntb->bar_info[NTB_B2B_BAR_2].ssz_off = XEON_SBAR4SZ_OFFSET; 677 ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off = XEON_PBAR4XLAT_OFFSET; 678 679 if (!HAS_FEATURE(NTB_SPLIT_BAR)) 680 goto out; 681 682 ntb->bar_info[NTB_B2B_BAR_3].pci_resource_id = PCIR_BAR(5); 683 if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) 684 rc = map_mmr_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_3]); 685 else 686 rc = map_memory_window_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_3]); 687 ntb->bar_info[NTB_B2B_BAR_3].psz_off = XEON_PBAR5SZ_OFFSET; 688 ntb->bar_info[NTB_B2B_BAR_3].ssz_off = XEON_SBAR5SZ_OFFSET; 689 ntb->bar_info[NTB_B2B_BAR_3].pbarxlat_off = XEON_PBAR5XLAT_OFFSET; 690 691out: 692 if (rc != 0) 693 device_printf(ntb->device, 694 "unable to allocate pci resource\n"); 695 return (rc); 696} 697 698static void 699print_map_success(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar, 700 const char *kind) 701{ 702 703 device_printf(ntb->device, 704 "Mapped BAR%d v:[%p-%p] p:[%p-%p] (0x%jx bytes) (%s)\n", 705 PCI_RID2BAR(bar->pci_resource_id), bar->vbase, 706 (char *)bar->vbase + bar->size - 1, 707 (void *)bar->pbase, (void *)(bar->pbase + bar->size - 1), 708 (uintmax_t)bar->size, kind); 709} 710 711static int 712map_mmr_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar) 713{ 714 715 bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY, 716 &bar->pci_resource_id, RF_ACTIVE); 717 if (bar->pci_resource == NULL) 718 return (ENXIO); 719 720 save_bar_parameters(bar); 721 print_map_success(ntb, bar, "mmr"); 722 return (0); 723} 724 725static int 726map_memory_window_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar) 727{ 728 int rc; 729 uint8_t bar_size_bits = 0; 730 731 bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY, 732 &bar->pci_resource_id, RF_ACTIVE); 733 734 if (bar->pci_resource == NULL) 735 return (ENXIO); 736 737 save_bar_parameters(bar); 738 /* 739 * Ivytown NTB BAR sizes are misreported by the hardware due to a 740 * hardware issue. To work around this, query the size it should be 741 * configured to by the device and modify the resource to correspond to 742 * this new size. The BIOS on systems with this problem is required to 743 * provide enough address space to allow the driver to make this change 744 * safely. 745 * 746 * Ideally I could have just specified the size when I allocated the 747 * resource like: 748 * bus_alloc_resource(ntb->device, 749 * SYS_RES_MEMORY, &bar->pci_resource_id, 0ul, ~0ul, 750 * 1ul << bar_size_bits, RF_ACTIVE); 751 * but the PCI driver does not honor the size in this call, so we have 752 * to modify it after the fact. 753 */ 754 if (HAS_FEATURE(NTB_BAR_SIZE_4K)) { 755 if (bar->pci_resource_id == PCIR_BAR(2)) 756 bar_size_bits = pci_read_config(ntb->device, 757 XEON_PBAR23SZ_OFFSET, 1); 758 else 759 bar_size_bits = pci_read_config(ntb->device, 760 XEON_PBAR45SZ_OFFSET, 1); 761 762 rc = bus_adjust_resource(ntb->device, SYS_RES_MEMORY, 763 bar->pci_resource, bar->pbase, 764 bar->pbase + (1ul << bar_size_bits) - 1); 765 if (rc != 0) { 766 device_printf(ntb->device, 767 "unable to resize bar\n"); 768 return (rc); 769 } 770 771 save_bar_parameters(bar); 772 } 773 774 print_map_success(ntb, bar, "mw"); 775 if (g_ntb_enable_wc == 0) 776 return (0); 777 778 /* Mark bar region as write combining to improve performance. */ 779 rc = pmap_change_attr((vm_offset_t)bar->vbase, bar->size, 780 VM_MEMATTR_WRITE_COMBINING); 781 if (rc == 0) { 782 bar->mapped_wc = true; 783 device_printf(ntb->device, 784 "Marked BAR%d v:[%p-%p] p:[%p-%p] as " 785 "WRITE_COMBINING.\n", 786 PCI_RID2BAR(bar->pci_resource_id), bar->vbase, 787 (char *)bar->vbase + bar->size - 1, 788 (void *)bar->pbase, (void *)(bar->pbase + bar->size - 1)); 789 } else 790 device_printf(ntb->device, 791 "Unable to mark BAR%d v:[%p-%p] p:[%p-%p] as " 792 "WRITE_COMBINING: %d\n", 793 PCI_RID2BAR(bar->pci_resource_id), bar->vbase, 794 (char *)bar->vbase + bar->size - 1, 795 (void *)bar->pbase, (void *)(bar->pbase + bar->size - 1), 796 rc); 797 /* Proceed anyway */ 798 return (0); 799} 800 801static void 802ntb_unmap_pci_bar(struct ntb_softc *ntb) 803{ 804 struct ntb_pci_bar_info *current_bar; 805 int i; 806 807 for (i = 0; i < NTB_MAX_BARS; i++) { 808 current_bar = &ntb->bar_info[i]; 809 if (current_bar->pci_resource != NULL) 810 bus_release_resource(ntb->device, SYS_RES_MEMORY, 811 current_bar->pci_resource_id, 812 current_bar->pci_resource); 813 } 814} 815 816static int 817ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors) 818{ 819 uint32_t i; 820 int rc; 821 822 for (i = 0; i < num_vectors; i++) { 823 ntb->int_info[i].rid = i + 1; 824 ntb->int_info[i].res = bus_alloc_resource_any(ntb->device, 825 SYS_RES_IRQ, &ntb->int_info[i].rid, RF_ACTIVE); 826 if (ntb->int_info[i].res == NULL) { 827 device_printf(ntb->device, 828 "bus_alloc_resource failed\n"); 829 return (ENOMEM); 830 } 831 ntb->int_info[i].tag = NULL; 832 ntb->allocated_interrupts++; 833 rc = bus_setup_intr(ntb->device, ntb->int_info[i].res, 834 INTR_MPSAFE | INTR_TYPE_MISC, NULL, ndev_vec_isr, 835 &ntb->msix_vec[i], &ntb->int_info[i].tag); 836 if (rc != 0) { 837 device_printf(ntb->device, "bus_setup_intr failed\n"); 838 return (ENXIO); 839 } 840 } 841 return (0); 842} 843 844/* 845 * The Linux NTB driver drops from MSI-X to legacy INTx if a unique vector 846 * cannot be allocated for each MSI-X message. JHB seems to think remapping 847 * should be okay. This tunable should enable us to test that hypothesis 848 * when someone gets their hands on some Xeon hardware. 849 */ 850static int ntb_force_remap_mode; 851SYSCTL_INT(_hw_ntb, OID_AUTO, force_remap_mode, CTLFLAG_RDTUN, 852 &ntb_force_remap_mode, 0, "If enabled, force MSI-X messages to be remapped" 853 " to a smaller number of ithreads, even if the desired number are " 854 "available"); 855 856/* 857 * In case it is NOT ok, give consumers an abort button. 858 */ 859static int ntb_prefer_intx; 860SYSCTL_INT(_hw_ntb, OID_AUTO, prefer_intx_to_remap, CTLFLAG_RDTUN, 861 &ntb_prefer_intx, 0, "If enabled, prefer to use legacy INTx mode rather " 862 "than remapping MSI-X messages over available slots (match Linux driver " 863 "behavior)"); 864 865/* 866 * Remap the desired number of MSI-X messages to available ithreads in a simple 867 * round-robin fashion. 868 */ 869static int 870ntb_remap_msix(device_t dev, uint32_t desired, uint32_t avail) 871{ 872 u_int *vectors; 873 uint32_t i; 874 int rc; 875 876 if (ntb_prefer_intx != 0) 877 return (ENXIO); 878 879 vectors = malloc(desired * sizeof(*vectors), M_NTB, M_ZERO | M_WAITOK); 880 881 for (i = 0; i < desired; i++) 882 vectors[i] = (i % avail) + 1; 883 884 rc = pci_remap_msix(dev, desired, vectors); 885 free(vectors, M_NTB); 886 return (rc); 887} 888 889static int 890ntb_init_isr(struct ntb_softc *ntb) 891{ 892 uint32_t desired_vectors, num_vectors; 893 int rc; 894 895 ntb->allocated_interrupts = 0; 896 ntb->last_ts = ticks; 897 898 /* 899 * Mask all doorbell interrupts. 900 */ 901 ntb_db_set_mask(ntb, ntb->db_valid_mask); 902 903 num_vectors = desired_vectors = MIN(pci_msix_count(ntb->device), 904 ntb->db_count); 905 if (desired_vectors >= 1) { 906 rc = pci_alloc_msix(ntb->device, &num_vectors); 907 908 if (ntb_force_remap_mode != 0 && rc == 0 && 909 num_vectors == desired_vectors) 910 num_vectors--; 911 912 if (rc == 0 && num_vectors < desired_vectors) { 913 rc = ntb_remap_msix(ntb->device, desired_vectors, 914 num_vectors); 915 if (rc == 0) 916 num_vectors = desired_vectors; 917 else 918 pci_release_msi(ntb->device); 919 } 920 if (rc != 0) 921 num_vectors = 1; 922 } else 923 num_vectors = 1; 924 925 if (ntb->type == NTB_XEON && num_vectors < ntb->db_vec_count) { 926 ntb->db_vec_count = 1; 927 ntb->db_vec_shift = XEON_DB_TOTAL_SHIFT; 928 rc = ntb_setup_legacy_interrupt(ntb); 929 } else { 930 ntb_create_msix_vec(ntb, num_vectors); 931 rc = ntb_setup_msix(ntb, num_vectors); 932 } 933 if (rc != 0) { 934 device_printf(ntb->device, 935 "Error allocating interrupts: %d\n", rc); 936 ntb_free_msix_vec(ntb); 937 } 938 939 return (rc); 940} 941 942static int 943ntb_setup_legacy_interrupt(struct ntb_softc *ntb) 944{ 945 int rc; 946 947 ntb->int_info[0].rid = 0; 948 ntb->int_info[0].res = bus_alloc_resource_any(ntb->device, SYS_RES_IRQ, 949 &ntb->int_info[0].rid, RF_SHAREABLE|RF_ACTIVE); 950 if (ntb->int_info[0].res == NULL) { 951 device_printf(ntb->device, "bus_alloc_resource failed\n"); 952 return (ENOMEM); 953 } 954 955 ntb->int_info[0].tag = NULL; 956 ntb->allocated_interrupts = 1; 957 958 rc = bus_setup_intr(ntb->device, ntb->int_info[0].res, 959 INTR_MPSAFE | INTR_TYPE_MISC, NULL, ndev_irq_isr, 960 ntb, &ntb->int_info[0].tag); 961 if (rc != 0) { 962 device_printf(ntb->device, "bus_setup_intr failed\n"); 963 return (ENXIO); 964 } 965 966 return (0); 967} 968 969static void 970ntb_teardown_interrupts(struct ntb_softc *ntb) 971{ 972 struct ntb_int_info *current_int; 973 int i; 974 975 for (i = 0; i < ntb->allocated_interrupts; i++) { 976 current_int = &ntb->int_info[i]; 977 if (current_int->tag != NULL) 978 bus_teardown_intr(ntb->device, current_int->res, 979 current_int->tag); 980 981 if (current_int->res != NULL) 982 bus_release_resource(ntb->device, SYS_RES_IRQ, 983 rman_get_rid(current_int->res), current_int->res); 984 } 985 986 ntb_free_msix_vec(ntb); 987 pci_release_msi(ntb->device); 988} 989 990/* 991 * Doorbell register and mask are 64-bit on Atom, 16-bit on Xeon. Abstract it 992 * out to make code clearer. 993 */ 994static inline uint64_t 995db_ioread(struct ntb_softc *ntb, uint64_t regoff) 996{ 997 998 if (ntb->type == NTB_ATOM) 999 return (ntb_reg_read(8, regoff)); 1000 1001 KASSERT(ntb->type == NTB_XEON, ("bad ntb type")); 1002 1003 return (ntb_reg_read(2, regoff)); 1004} 1005 1006static inline void 1007db_iowrite(struct ntb_softc *ntb, uint64_t regoff, uint64_t val) 1008{ 1009 1010 KASSERT((val & ~ntb->db_valid_mask) == 0, 1011 ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__, 1012 (uintmax_t)(val & ~ntb->db_valid_mask), 1013 (uintmax_t)ntb->db_valid_mask)); 1014 1015 if (regoff == ntb->self_reg->db_mask) 1016 DB_MASK_ASSERT(ntb, MA_OWNED); 1017 db_iowrite_raw(ntb, regoff, val); 1018} 1019 1020static inline void 1021db_iowrite_raw(struct ntb_softc *ntb, uint64_t regoff, uint64_t val) 1022{ 1023 1024 if (ntb->type == NTB_ATOM) { 1025 ntb_reg_write(8, regoff, val); 1026 return; 1027 } 1028 1029 KASSERT(ntb->type == NTB_XEON, ("bad ntb type")); 1030 ntb_reg_write(2, regoff, (uint16_t)val); 1031} 1032 1033void 1034ntb_db_set_mask(struct ntb_softc *ntb, uint64_t bits) 1035{ 1036 1037 DB_MASK_LOCK(ntb); 1038 ntb->db_mask |= bits; 1039 db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask); 1040 DB_MASK_UNLOCK(ntb); 1041} 1042 1043void 1044ntb_db_clear_mask(struct ntb_softc *ntb, uint64_t bits) 1045{ 1046 1047 KASSERT((bits & ~ntb->db_valid_mask) == 0, 1048 ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__, 1049 (uintmax_t)(bits & ~ntb->db_valid_mask), 1050 (uintmax_t)ntb->db_valid_mask)); 1051 1052 DB_MASK_LOCK(ntb); 1053 ntb->db_mask &= ~bits; 1054 db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask); 1055 DB_MASK_UNLOCK(ntb); 1056} 1057 1058uint64_t 1059ntb_db_read(struct ntb_softc *ntb) 1060{ 1061 1062 return (db_ioread(ntb, ntb->self_reg->db_bell)); 1063} 1064 1065void 1066ntb_db_clear(struct ntb_softc *ntb, uint64_t bits) 1067{ 1068 1069 KASSERT((bits & ~ntb->db_valid_mask) == 0, 1070 ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__, 1071 (uintmax_t)(bits & ~ntb->db_valid_mask), 1072 (uintmax_t)ntb->db_valid_mask)); 1073 1074 db_iowrite(ntb, ntb->self_reg->db_bell, bits); 1075} 1076 1077static inline uint64_t 1078ntb_vec_mask(struct ntb_softc *ntb, uint64_t db_vector) 1079{ 1080 uint64_t shift, mask; 1081 1082 shift = ntb->db_vec_shift; 1083 mask = (1ull << shift) - 1; 1084 return (mask << (shift * db_vector)); 1085} 1086 1087static void 1088ntb_interrupt(struct ntb_softc *ntb, uint32_t vec) 1089{ 1090 uint64_t vec_mask; 1091 1092 ntb->last_ts = ticks; 1093 vec_mask = ntb_vec_mask(ntb, vec); 1094 1095 if ((vec_mask & ntb->db_link_mask) != 0) { 1096 if (ntb_poll_link(ntb)) 1097 ntb_link_event(ntb); 1098 } 1099 1100 if ((vec_mask & ntb->db_valid_mask) != 0) 1101 ntb_db_event(ntb, vec); 1102} 1103 1104static void 1105ndev_vec_isr(void *arg) 1106{ 1107 struct ntb_vec *nvec = arg; 1108 1109 ntb_interrupt(nvec->ntb, nvec->num); 1110} 1111 1112static void 1113ndev_irq_isr(void *arg) 1114{ 1115 /* If we couldn't set up MSI-X, we only have the one vector. */ 1116 ntb_interrupt(arg, 0); 1117} 1118 1119static int 1120ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors) 1121{ 1122 uint32_t i; 1123 1124 ntb->msix_vec = malloc(num_vectors * sizeof(*ntb->msix_vec), M_NTB, 1125 M_ZERO | M_WAITOK); 1126 for (i = 0; i < num_vectors; i++) { 1127 ntb->msix_vec[i].num = i; 1128 ntb->msix_vec[i].ntb = ntb; 1129 } 1130 1131 return (0); 1132} 1133 1134static void 1135ntb_free_msix_vec(struct ntb_softc *ntb) 1136{ 1137 1138 if (ntb->msix_vec == NULL) 1139 return; 1140 1141 free(ntb->msix_vec, M_NTB); 1142 ntb->msix_vec = NULL; 1143} 1144 1145static struct ntb_hw_info * 1146ntb_get_device_info(uint32_t device_id) 1147{ 1148 struct ntb_hw_info *ep = pci_ids; 1149 1150 while (ep->device_id) { 1151 if (ep->device_id == device_id) 1152 return (ep); 1153 ++ep; 1154 } 1155 return (NULL); 1156} 1157 1158static void 1159ntb_teardown_xeon(struct ntb_softc *ntb) 1160{ 1161 1162 if (ntb->reg != NULL) 1163 ntb_link_disable(ntb); 1164} 1165 1166static void 1167ntb_detect_max_mw(struct ntb_softc *ntb) 1168{ 1169 1170 if (ntb->type == NTB_ATOM) { 1171 ntb->mw_count = ATOM_MW_COUNT; 1172 return; 1173 } 1174 1175 if (HAS_FEATURE(NTB_SPLIT_BAR)) 1176 ntb->mw_count = XEON_HSX_SPLIT_MW_COUNT; 1177 else 1178 ntb->mw_count = XEON_SNB_MW_COUNT; 1179} 1180 1181static int 1182ntb_detect_xeon(struct ntb_softc *ntb) 1183{ 1184 uint8_t ppd, conn_type; 1185 1186 ppd = pci_read_config(ntb->device, NTB_PPD_OFFSET, 1); 1187 ntb->ppd = ppd; 1188 1189 if ((ppd & XEON_PPD_DEV_TYPE) != 0) 1190 ntb->dev_type = NTB_DEV_DSD; 1191 else 1192 ntb->dev_type = NTB_DEV_USD; 1193 1194 if ((ppd & XEON_PPD_SPLIT_BAR) != 0) 1195 ntb->features |= NTB_SPLIT_BAR; 1196 1197 /* SB01BASE_LOCKUP errata is a superset of SDOORBELL errata */ 1198 if (HAS_FEATURE(NTB_SB01BASE_LOCKUP)) 1199 ntb->features |= NTB_SDOORBELL_LOCKUP; 1200 1201 conn_type = ppd & XEON_PPD_CONN_TYPE; 1202 switch (conn_type) { 1203 case NTB_CONN_B2B: 1204 ntb->conn_type = conn_type; 1205 break; 1206 case NTB_CONN_RP: 1207 case NTB_CONN_TRANSPARENT: 1208 default: 1209 device_printf(ntb->device, "Unsupported connection type: %u\n", 1210 (unsigned)conn_type); 1211 return (ENXIO); 1212 } 1213 return (0); 1214} 1215 1216static int 1217ntb_detect_atom(struct ntb_softc *ntb) 1218{ 1219 uint32_t ppd, conn_type; 1220 1221 ppd = pci_read_config(ntb->device, NTB_PPD_OFFSET, 4); 1222 ntb->ppd = ppd; 1223 1224 if ((ppd & ATOM_PPD_DEV_TYPE) != 0) 1225 ntb->dev_type = NTB_DEV_DSD; 1226 else 1227 ntb->dev_type = NTB_DEV_USD; 1228 1229 conn_type = (ppd & ATOM_PPD_CONN_TYPE) >> 8; 1230 switch (conn_type) { 1231 case NTB_CONN_B2B: 1232 ntb->conn_type = conn_type; 1233 break; 1234 default: 1235 device_printf(ntb->device, "Unsupported NTB configuration\n"); 1236 return (ENXIO); 1237 } 1238 return (0); 1239} 1240 1241static int 1242ntb_xeon_init_dev(struct ntb_softc *ntb) 1243{ 1244 int rc; 1245 1246 ntb->spad_count = XEON_SPAD_COUNT; 1247 ntb->db_count = XEON_DB_COUNT; 1248 ntb->db_link_mask = XEON_DB_LINK_BIT; 1249 ntb->db_vec_count = XEON_DB_MSIX_VECTOR_COUNT; 1250 ntb->db_vec_shift = XEON_DB_MSIX_VECTOR_SHIFT; 1251 1252 if (ntb->conn_type != NTB_CONN_B2B) { 1253 device_printf(ntb->device, "Connection type %d not supported\n", 1254 ntb->conn_type); 1255 return (ENXIO); 1256 } 1257 1258 ntb->reg = &xeon_reg; 1259 ntb->self_reg = &xeon_pri_reg; 1260 ntb->peer_reg = &xeon_b2b_reg; 1261 ntb->xlat_reg = &xeon_sec_xlat; 1262 1263 /* 1264 * There is a Xeon hardware errata related to writes to SDOORBELL or 1265 * B2BDOORBELL in conjunction with inbound access to NTB MMIO space, 1266 * which may hang the system. To workaround this use the second memory 1267 * window to access the interrupt and scratch pad registers on the 1268 * remote system. 1269 */ 1270 if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) 1271 /* Use the last MW for mapping remote spad */ 1272 ntb->b2b_mw_idx = ntb->mw_count - 1; 1273 else if (HAS_FEATURE(NTB_B2BDOORBELL_BIT14)) 1274 /* 1275 * HW Errata on bit 14 of b2bdoorbell register. Writes will not be 1276 * mirrored to the remote system. Shrink the number of bits by one, 1277 * since bit 14 is the last bit. 1278 * 1279 * On REGS_THRU_MW errata mode, we don't use the b2bdoorbell register 1280 * anyway. Nor for non-B2B connection types. 1281 */ 1282 ntb->db_count = XEON_DB_COUNT - 1; 1283 1284 ntb->db_valid_mask = (1ull << ntb->db_count) - 1; 1285 1286 if (ntb->dev_type == NTB_DEV_USD) 1287 rc = xeon_setup_b2b_mw(ntb, &xeon_b2b_dsd_addr, 1288 &xeon_b2b_usd_addr); 1289 else 1290 rc = xeon_setup_b2b_mw(ntb, &xeon_b2b_usd_addr, 1291 &xeon_b2b_dsd_addr); 1292 if (rc != 0) 1293 return (rc); 1294 1295 /* Enable Bus Master and Memory Space on the secondary side */ 1296 ntb_reg_write(2, XEON_SPCICMD_OFFSET, 1297 PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); 1298 1299 /* 1300 * Mask all doorbell interrupts. 1301 */ 1302 ntb_db_set_mask(ntb, ntb->db_valid_mask); 1303 1304 rc = ntb_init_isr(ntb); 1305 return (rc); 1306} 1307 1308static int 1309ntb_atom_init_dev(struct ntb_softc *ntb) 1310{ 1311 int error; 1312 1313 KASSERT(ntb->conn_type == NTB_CONN_B2B, 1314 ("Unsupported NTB configuration (%d)\n", ntb->conn_type)); 1315 1316 ntb->spad_count = ATOM_SPAD_COUNT; 1317 ntb->db_count = ATOM_DB_COUNT; 1318 ntb->db_vec_count = ATOM_DB_MSIX_VECTOR_COUNT; 1319 ntb->db_vec_shift = ATOM_DB_MSIX_VECTOR_SHIFT; 1320 ntb->db_valid_mask = (1ull << ntb->db_count) - 1; 1321 1322 ntb->reg = &atom_reg; 1323 ntb->self_reg = &atom_pri_reg; 1324 ntb->peer_reg = &atom_b2b_reg; 1325 ntb->xlat_reg = &atom_sec_xlat; 1326 1327 /* 1328 * FIXME - MSI-X bug on early Atom HW, remove once internal issue is 1329 * resolved. Mask transaction layer internal parity errors. 1330 */ 1331 pci_write_config(ntb->device, 0xFC, 0x4, 4); 1332 1333 configure_atom_secondary_side_bars(ntb); 1334 1335 /* Enable Bus Master and Memory Space on the secondary side */ 1336 ntb_reg_write(2, ATOM_SPCICMD_OFFSET, 1337 PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); 1338 1339 error = ntb_init_isr(ntb); 1340 if (error != 0) 1341 return (error); 1342 1343 /* Initiate PCI-E link training */ 1344 ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); 1345 1346 callout_reset(&ntb->heartbeat_timer, 0, atom_link_hb, ntb); 1347 1348 return (0); 1349} 1350 1351/* XXX: Linux driver doesn't seem to do any of this for Atom. */ 1352static void 1353configure_atom_secondary_side_bars(struct ntb_softc *ntb) 1354{ 1355 1356 if (ntb->dev_type == NTB_DEV_USD) { 1357 ntb_reg_write(8, ATOM_PBAR2XLAT_OFFSET, 1358 XEON_B2B_BAR2_ADDR64); 1359 ntb_reg_write(8, ATOM_PBAR4XLAT_OFFSET, 1360 XEON_B2B_BAR4_ADDR64); 1361 ntb_reg_write(8, ATOM_MBAR23_OFFSET, XEON_B2B_BAR2_ADDR64); 1362 ntb_reg_write(8, ATOM_MBAR45_OFFSET, XEON_B2B_BAR4_ADDR64); 1363 } else { 1364 ntb_reg_write(8, ATOM_PBAR2XLAT_OFFSET, 1365 XEON_B2B_BAR2_ADDR64); 1366 ntb_reg_write(8, ATOM_PBAR4XLAT_OFFSET, 1367 XEON_B2B_BAR4_ADDR64); 1368 ntb_reg_write(8, ATOM_MBAR23_OFFSET, XEON_B2B_BAR2_ADDR64); 1369 ntb_reg_write(8, ATOM_MBAR45_OFFSET, XEON_B2B_BAR4_ADDR64); 1370 } 1371} 1372 1373 1374/* 1375 * When working around Xeon SDOORBELL errata by remapping remote registers in a 1376 * MW, limit the B2B MW to half a MW. By sharing a MW, half the shared MW 1377 * remains for use by a higher layer. 1378 * 1379 * Will only be used if working around SDOORBELL errata and the BIOS-configured 1380 * MW size is sufficiently large. 1381 */ 1382static unsigned int ntb_b2b_mw_share; 1383SYSCTL_UINT(_hw_ntb, OID_AUTO, b2b_mw_share, CTLFLAG_RDTUN, &ntb_b2b_mw_share, 1384 0, "If enabled (non-zero), prefer to share half of the B2B peer register " 1385 "MW with higher level consumers. Both sides of the NTB MUST set the same " 1386 "value here."); 1387 1388static void 1389xeon_reset_sbar_size(struct ntb_softc *ntb, enum ntb_bar idx, 1390 enum ntb_bar regbar) 1391{ 1392 struct ntb_pci_bar_info *bar; 1393 uint8_t bar_sz; 1394 1395 if (!HAS_FEATURE(NTB_SPLIT_BAR) && idx >= NTB_B2B_BAR_3) 1396 return; 1397 1398 bar = &ntb->bar_info[idx]; 1399 bar_sz = pci_read_config(ntb->device, bar->psz_off, 1); 1400 if (idx == regbar) { 1401 if (ntb->b2b_off != 0) 1402 bar_sz--; 1403 else 1404 bar_sz = 0; 1405 } 1406 pci_write_config(ntb->device, bar->ssz_off, bar_sz, 1); 1407 bar_sz = pci_read_config(ntb->device, bar->ssz_off, 1); 1408 (void)bar_sz; 1409} 1410 1411static void 1412xeon_set_sbar_base_and_limit(struct ntb_softc *ntb, uint64_t bar_addr, 1413 enum ntb_bar idx, enum ntb_bar regbar) 1414{ 1415 uint64_t reg_val; 1416 uint32_t base_reg, lmt_reg; 1417 1418 bar_get_xlat_params(ntb, idx, &base_reg, NULL, &lmt_reg); 1419 if (idx == regbar) 1420 bar_addr += ntb->b2b_off; 1421 1422 if (!bar_is_64bit(ntb, idx)) { 1423 ntb_reg_write(4, base_reg, bar_addr); 1424 reg_val = ntb_reg_read(4, base_reg); 1425 (void)reg_val; 1426 1427 ntb_reg_write(4, lmt_reg, bar_addr); 1428 reg_val = ntb_reg_read(4, lmt_reg); 1429 (void)reg_val; 1430 } else { 1431 ntb_reg_write(8, base_reg, bar_addr); 1432 reg_val = ntb_reg_read(8, base_reg); 1433 (void)reg_val; 1434 1435 ntb_reg_write(8, lmt_reg, bar_addr); 1436 reg_val = ntb_reg_read(8, lmt_reg); 1437 (void)reg_val; 1438 } 1439} 1440 1441static void 1442xeon_set_pbar_xlat(struct ntb_softc *ntb, uint64_t base_addr, enum ntb_bar idx) 1443{ 1444 struct ntb_pci_bar_info *bar; 1445 1446 bar = &ntb->bar_info[idx]; 1447 if (HAS_FEATURE(NTB_SPLIT_BAR) && idx >= NTB_B2B_BAR_2) { 1448 ntb_reg_write(4, bar->pbarxlat_off, base_addr); 1449 base_addr = ntb_reg_read(4, bar->pbarxlat_off); 1450 } else { 1451 ntb_reg_write(8, bar->pbarxlat_off, base_addr); 1452 base_addr = ntb_reg_read(8, bar->pbarxlat_off); 1453 } 1454 (void)base_addr; 1455} 1456 1457static int 1458xeon_setup_b2b_mw(struct ntb_softc *ntb, const struct ntb_b2b_addr *addr, 1459 const struct ntb_b2b_addr *peer_addr) 1460{ 1461 struct ntb_pci_bar_info *b2b_bar; 1462 vm_size_t bar_size; 1463 uint64_t bar_addr; 1464 enum ntb_bar b2b_bar_num, i; 1465 1466 if (ntb->b2b_mw_idx == B2B_MW_DISABLED) { 1467 b2b_bar = NULL; 1468 b2b_bar_num = NTB_CONFIG_BAR; 1469 ntb->b2b_off = 0; 1470 } else { 1471 b2b_bar_num = ntb_mw_to_bar(ntb, ntb->b2b_mw_idx); 1472 KASSERT(b2b_bar_num > 0 && b2b_bar_num < NTB_MAX_BARS, 1473 ("invalid b2b mw bar")); 1474 1475 b2b_bar = &ntb->bar_info[b2b_bar_num]; 1476 bar_size = b2b_bar->size; 1477 1478 if (ntb_b2b_mw_share != 0 && 1479 (bar_size >> 1) >= XEON_B2B_MIN_SIZE) 1480 ntb->b2b_off = bar_size >> 1; 1481 else if (bar_size >= XEON_B2B_MIN_SIZE) { 1482 ntb->b2b_off = 0; 1483 ntb->mw_count--; 1484 } else { 1485 device_printf(ntb->device, 1486 "B2B bar size is too small!\n"); 1487 return (EIO); 1488 } 1489 } 1490 1491 /* 1492 * Reset the secondary bar sizes to match the primary bar sizes. 1493 * (Except, disable or halve the size of the B2B secondary bar.) 1494 */ 1495 for (i = NTB_B2B_BAR_1; i < NTB_MAX_BARS; i++) 1496 xeon_reset_sbar_size(ntb, i, b2b_bar_num); 1497 1498 bar_addr = 0; 1499 if (b2b_bar_num == NTB_CONFIG_BAR) 1500 bar_addr = addr->bar0_addr; 1501 else if (b2b_bar_num == NTB_B2B_BAR_1) 1502 bar_addr = addr->bar2_addr64; 1503 else if (b2b_bar_num == NTB_B2B_BAR_2 && !HAS_FEATURE(NTB_SPLIT_BAR)) 1504 bar_addr = addr->bar4_addr64; 1505 else if (b2b_bar_num == NTB_B2B_BAR_2) 1506 bar_addr = addr->bar4_addr32; 1507 else if (b2b_bar_num == NTB_B2B_BAR_3) 1508 bar_addr = addr->bar5_addr32; 1509 else 1510 KASSERT(false, ("invalid bar")); 1511 1512 ntb_reg_write(8, XEON_SBAR0BASE_OFFSET, bar_addr); 1513 1514 /* 1515 * Other SBARs are normally hit by the PBAR xlat, except for the b2b 1516 * register BAR. The B2B BAR is either disabled above or configured 1517 * half-size. It starts at PBAR xlat + offset. 1518 * 1519 * Also set up incoming BAR limits == base (zero length window). 1520 */ 1521 xeon_set_sbar_base_and_limit(ntb, addr->bar2_addr64, NTB_B2B_BAR_1, 1522 b2b_bar_num); 1523 if (HAS_FEATURE(NTB_SPLIT_BAR)) { 1524 xeon_set_sbar_base_and_limit(ntb, addr->bar4_addr32, 1525 NTB_B2B_BAR_2, b2b_bar_num); 1526 xeon_set_sbar_base_and_limit(ntb, addr->bar5_addr32, 1527 NTB_B2B_BAR_3, b2b_bar_num); 1528 } else 1529 xeon_set_sbar_base_and_limit(ntb, addr->bar4_addr64, 1530 NTB_B2B_BAR_2, b2b_bar_num); 1531 1532 /* Zero incoming translation addrs */ 1533 ntb_reg_write(8, XEON_SBAR2XLAT_OFFSET, 0); 1534 ntb_reg_write(8, XEON_SBAR4XLAT_OFFSET, 0); 1535 1536 /* Zero outgoing translation limits (whole bar size windows) */ 1537 ntb_reg_write(8, XEON_PBAR2LMT_OFFSET, 0); 1538 ntb_reg_write(8, XEON_PBAR4LMT_OFFSET, 0); 1539 1540 /* Set outgoing translation offsets */ 1541 xeon_set_pbar_xlat(ntb, peer_addr->bar2_addr64, NTB_B2B_BAR_1); 1542 if (HAS_FEATURE(NTB_SPLIT_BAR)) { 1543 xeon_set_pbar_xlat(ntb, peer_addr->bar4_addr32, NTB_B2B_BAR_2); 1544 xeon_set_pbar_xlat(ntb, peer_addr->bar5_addr32, NTB_B2B_BAR_3); 1545 } else 1546 xeon_set_pbar_xlat(ntb, peer_addr->bar4_addr64, NTB_B2B_BAR_2); 1547 1548 /* Set the translation offset for B2B registers */ 1549 bar_addr = 0; 1550 if (b2b_bar_num == NTB_CONFIG_BAR) 1551 bar_addr = peer_addr->bar0_addr; 1552 else if (b2b_bar_num == NTB_B2B_BAR_1) 1553 bar_addr = peer_addr->bar2_addr64; 1554 else if (b2b_bar_num == NTB_B2B_BAR_2 && !HAS_FEATURE(NTB_SPLIT_BAR)) 1555 bar_addr = peer_addr->bar4_addr64; 1556 else if (b2b_bar_num == NTB_B2B_BAR_2) 1557 bar_addr = peer_addr->bar4_addr32; 1558 else if (b2b_bar_num == NTB_B2B_BAR_3) 1559 bar_addr = peer_addr->bar5_addr32; 1560 else 1561 KASSERT(false, ("invalid bar")); 1562 1563 /* 1564 * B2B_XLAT_OFFSET is a 64-bit register but can only be written 32 bits 1565 * at a time. 1566 */ 1567 ntb_reg_write(4, XEON_B2B_XLAT_OFFSETL, bar_addr & 0xffffffff); 1568 ntb_reg_write(4, XEON_B2B_XLAT_OFFSETU, bar_addr >> 32); 1569 return (0); 1570} 1571 1572static inline bool 1573link_is_up(struct ntb_softc *ntb) 1574{ 1575 1576 if (ntb->type == NTB_XEON) { 1577 if (ntb->conn_type == NTB_CONN_TRANSPARENT) 1578 return (true); 1579 return ((ntb->lnk_sta & NTB_LINK_STATUS_ACTIVE) != 0); 1580 } 1581 1582 KASSERT(ntb->type == NTB_ATOM, ("ntb type")); 1583 return ((ntb->ntb_ctl & ATOM_CNTL_LINK_DOWN) == 0); 1584} 1585 1586static inline bool 1587atom_link_is_err(struct ntb_softc *ntb) 1588{ 1589 uint32_t status; 1590 1591 KASSERT(ntb->type == NTB_ATOM, ("ntb type")); 1592 1593 status = ntb_reg_read(4, ATOM_LTSSMSTATEJMP_OFFSET); 1594 if ((status & ATOM_LTSSMSTATEJMP_FORCEDETECT) != 0) 1595 return (true); 1596 1597 status = ntb_reg_read(4, ATOM_IBSTERRRCRVSTS0_OFFSET); 1598 return ((status & ATOM_IBIST_ERR_OFLOW) != 0); 1599} 1600 1601/* Atom does not have link status interrupt, poll on that platform */ 1602static void 1603atom_link_hb(void *arg) 1604{ 1605 struct ntb_softc *ntb = arg; 1606 sbintime_t timo, poll_ts; 1607 1608 timo = NTB_HB_TIMEOUT * hz; 1609 poll_ts = ntb->last_ts + timo; 1610 1611 /* 1612 * Delay polling the link status if an interrupt was received, unless 1613 * the cached link status says the link is down. 1614 */ 1615 if ((sbintime_t)ticks - poll_ts < 0 && link_is_up(ntb)) { 1616 timo = poll_ts - ticks; 1617 goto out; 1618 } 1619 1620 if (ntb_poll_link(ntb)) 1621 ntb_link_event(ntb); 1622 1623 if (!link_is_up(ntb) && atom_link_is_err(ntb)) { 1624 /* Link is down with error, proceed with recovery */ 1625 callout_reset(&ntb->lr_timer, 0, recover_atom_link, ntb); 1626 return; 1627 } 1628 1629out: 1630 callout_reset(&ntb->heartbeat_timer, timo, atom_link_hb, ntb); 1631} 1632 1633static void 1634atom_perform_link_restart(struct ntb_softc *ntb) 1635{ 1636 uint32_t status; 1637 1638 /* Driver resets the NTB ModPhy lanes - magic! */ 1639 ntb_reg_write(1, ATOM_MODPHY_PCSREG6, 0xe0); 1640 ntb_reg_write(1, ATOM_MODPHY_PCSREG4, 0x40); 1641 ntb_reg_write(1, ATOM_MODPHY_PCSREG4, 0x60); 1642 ntb_reg_write(1, ATOM_MODPHY_PCSREG6, 0x60); 1643 1644 /* Driver waits 100ms to allow the NTB ModPhy to settle */ 1645 pause("ModPhy", hz / 10); 1646 1647 /* Clear AER Errors, write to clear */ 1648 status = ntb_reg_read(4, ATOM_ERRCORSTS_OFFSET); 1649 status &= PCIM_AER_COR_REPLAY_ROLLOVER; 1650 ntb_reg_write(4, ATOM_ERRCORSTS_OFFSET, status); 1651 1652 /* Clear unexpected electrical idle event in LTSSM, write to clear */ 1653 status = ntb_reg_read(4, ATOM_LTSSMERRSTS0_OFFSET); 1654 status |= ATOM_LTSSMERRSTS0_UNEXPECTEDEI; 1655 ntb_reg_write(4, ATOM_LTSSMERRSTS0_OFFSET, status); 1656 1657 /* Clear DeSkew Buffer error, write to clear */ 1658 status = ntb_reg_read(4, ATOM_DESKEWSTS_OFFSET); 1659 status |= ATOM_DESKEWSTS_DBERR; 1660 ntb_reg_write(4, ATOM_DESKEWSTS_OFFSET, status); 1661 1662 status = ntb_reg_read(4, ATOM_IBSTERRRCRVSTS0_OFFSET); 1663 status &= ATOM_IBIST_ERR_OFLOW; 1664 ntb_reg_write(4, ATOM_IBSTERRRCRVSTS0_OFFSET, status); 1665 1666 /* Releases the NTB state machine to allow the link to retrain */ 1667 status = ntb_reg_read(4, ATOM_LTSSMSTATEJMP_OFFSET); 1668 status &= ~ATOM_LTSSMSTATEJMP_FORCEDETECT; 1669 ntb_reg_write(4, ATOM_LTSSMSTATEJMP_OFFSET, status); 1670} 1671 1672/* 1673 * ntb_set_ctx() - associate a driver context with an ntb device 1674 * @ntb: NTB device context 1675 * @ctx: Driver context 1676 * @ctx_ops: Driver context operations 1677 * 1678 * Associate a driver context and operations with a ntb device. The context is 1679 * provided by the client driver, and the driver may associate a different 1680 * context with each ntb device. 1681 * 1682 * Return: Zero if the context is associated, otherwise an error number. 1683 */ 1684int 1685ntb_set_ctx(struct ntb_softc *ntb, void *ctx, const struct ntb_ctx_ops *ops) 1686{ 1687 1688 if (ctx == NULL || ops == NULL) 1689 return (EINVAL); 1690 if (ntb->ctx_ops != NULL) 1691 return (EINVAL); 1692 1693 CTX_LOCK(ntb); 1694 if (ntb->ctx_ops != NULL) { 1695 CTX_UNLOCK(ntb); 1696 return (EINVAL); 1697 } 1698 ntb->ntb_ctx = ctx; 1699 ntb->ctx_ops = ops; 1700 CTX_UNLOCK(ntb); 1701 1702 return (0); 1703} 1704 1705/* 1706 * It is expected that this will only be used from contexts where the ctx_lock 1707 * is not needed to protect ntb_ctx lifetime. 1708 */ 1709void * 1710ntb_get_ctx(struct ntb_softc *ntb, const struct ntb_ctx_ops **ops) 1711{ 1712 1713 KASSERT(ntb->ntb_ctx != NULL && ntb->ctx_ops != NULL, ("bogus")); 1714 if (ops != NULL) 1715 *ops = ntb->ctx_ops; 1716 return (ntb->ntb_ctx); 1717} 1718 1719/* 1720 * ntb_clear_ctx() - disassociate any driver context from an ntb device 1721 * @ntb: NTB device context 1722 * 1723 * Clear any association that may exist between a driver context and the ntb 1724 * device. 1725 */ 1726void 1727ntb_clear_ctx(struct ntb_softc *ntb) 1728{ 1729 1730 CTX_LOCK(ntb); 1731 ntb->ntb_ctx = NULL; 1732 ntb->ctx_ops = NULL; 1733 CTX_UNLOCK(ntb); 1734} 1735 1736/* 1737 * ntb_link_event() - notify driver context of a change in link status 1738 * @ntb: NTB device context 1739 * 1740 * Notify the driver context that the link status may have changed. The driver 1741 * should call ntb_link_is_up() to get the current status. 1742 */ 1743void 1744ntb_link_event(struct ntb_softc *ntb) 1745{ 1746 1747 CTX_LOCK(ntb); 1748 if (ntb->ctx_ops != NULL && ntb->ctx_ops->link_event != NULL) 1749 ntb->ctx_ops->link_event(ntb->ntb_ctx); 1750 CTX_UNLOCK(ntb); 1751} 1752 1753/* 1754 * ntb_db_event() - notify driver context of a doorbell event 1755 * @ntb: NTB device context 1756 * @vector: Interrupt vector number 1757 * 1758 * Notify the driver context of a doorbell event. If hardware supports 1759 * multiple interrupt vectors for doorbells, the vector number indicates which 1760 * vector received the interrupt. The vector number is relative to the first 1761 * vector used for doorbells, starting at zero, and must be less than 1762 * ntb_db_vector_count(). The driver may call ntb_db_read() to check which 1763 * doorbell bits need service, and ntb_db_vector_mask() to determine which of 1764 * those bits are associated with the vector number. 1765 */ 1766static void 1767ntb_db_event(struct ntb_softc *ntb, uint32_t vec) 1768{ 1769 1770 CTX_LOCK(ntb); 1771 if (ntb->ctx_ops != NULL && ntb->ctx_ops->db_event != NULL) 1772 ntb->ctx_ops->db_event(ntb->ntb_ctx, vec); 1773 CTX_UNLOCK(ntb); 1774} 1775 1776/* 1777 * ntb_link_enable() - enable the link on the secondary side of the ntb 1778 * @ntb: NTB device context 1779 * @max_speed: The maximum link speed expressed as PCIe generation number[0] 1780 * @max_width: The maximum link width expressed as the number of PCIe lanes[0] 1781 * 1782 * Enable the link on the secondary side of the ntb. This can only be done 1783 * from the primary side of the ntb in primary or b2b topology. The ntb device 1784 * should train the link to its maximum speed and width, or the requested speed 1785 * and width, whichever is smaller, if supported. 1786 * 1787 * Return: Zero on success, otherwise an error number. 1788 * 1789 * [0]: Only NTB_SPEED_AUTO and NTB_WIDTH_AUTO are valid inputs; other speed 1790 * and width input will be ignored. 1791 */ 1792int 1793ntb_link_enable(struct ntb_softc *ntb, enum ntb_speed s __unused, 1794 enum ntb_width w __unused) 1795{ 1796 uint32_t cntl; 1797 1798 if (ntb->type == NTB_ATOM) { 1799 pci_write_config(ntb->device, NTB_PPD_OFFSET, 1800 ntb->ppd | ATOM_PPD_INIT_LINK, 4); 1801 return (0); 1802 } 1803 1804 if (ntb->conn_type == NTB_CONN_TRANSPARENT) { 1805 ntb_link_event(ntb); 1806 return (0); 1807 } 1808 1809 cntl = ntb_reg_read(4, ntb->reg->ntb_ctl); 1810 cntl &= ~(NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK); 1811 cntl |= NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP; 1812 cntl |= NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP; 1813 if (HAS_FEATURE(NTB_SPLIT_BAR)) 1814 cntl |= NTB_CNTL_P2S_BAR5_SNOOP | NTB_CNTL_S2P_BAR5_SNOOP; 1815 ntb_reg_write(4, ntb->reg->ntb_ctl, cntl); 1816 return (0); 1817} 1818 1819/* 1820 * ntb_link_disable() - disable the link on the secondary side of the ntb 1821 * @ntb: NTB device context 1822 * 1823 * Disable the link on the secondary side of the ntb. This can only be done 1824 * from the primary side of the ntb in primary or b2b topology. The ntb device 1825 * should disable the link. Returning from this call must indicate that a 1826 * barrier has passed, though with no more writes may pass in either direction 1827 * across the link, except if this call returns an error number. 1828 * 1829 * Return: Zero on success, otherwise an error number. 1830 */ 1831int 1832ntb_link_disable(struct ntb_softc *ntb) 1833{ 1834 uint32_t cntl; 1835 1836 if (ntb->conn_type == NTB_CONN_TRANSPARENT) { 1837 ntb_link_event(ntb); 1838 return (0); 1839 } 1840 1841 cntl = ntb_reg_read(4, ntb->reg->ntb_ctl); 1842 cntl &= ~(NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP); 1843 cntl &= ~(NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP); 1844 if (HAS_FEATURE(NTB_SPLIT_BAR)) 1845 cntl &= ~(NTB_CNTL_P2S_BAR5_SNOOP | NTB_CNTL_S2P_BAR5_SNOOP); 1846 cntl |= NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK; 1847 ntb_reg_write(4, ntb->reg->ntb_ctl, cntl); 1848 return (0); 1849} 1850 1851static void 1852recover_atom_link(void *arg) 1853{ 1854 struct ntb_softc *ntb = arg; 1855 unsigned speed, width, oldspeed, oldwidth; 1856 uint32_t status32; 1857 1858 atom_perform_link_restart(ntb); 1859 1860 /* 1861 * There is a potential race between the 2 NTB devices recovering at 1862 * the same time. If the times are the same, the link will not recover 1863 * and the driver will be stuck in this loop forever. Add a random 1864 * interval to the recovery time to prevent this race. 1865 */ 1866 status32 = arc4random() % ATOM_LINK_RECOVERY_TIME; 1867 pause("Link", (ATOM_LINK_RECOVERY_TIME + status32) * hz / 1000); 1868 1869 if (atom_link_is_err(ntb)) 1870 goto retry; 1871 1872 status32 = ntb_reg_read(4, ntb->reg->ntb_ctl); 1873 if ((status32 & ATOM_CNTL_LINK_DOWN) != 0) 1874 goto out; 1875 1876 status32 = ntb_reg_read(4, ntb->reg->lnk_sta); 1877 width = NTB_LNK_STA_WIDTH(status32); 1878 speed = status32 & NTB_LINK_SPEED_MASK; 1879 1880 oldwidth = NTB_LNK_STA_WIDTH(ntb->lnk_sta); 1881 oldspeed = ntb->lnk_sta & NTB_LINK_SPEED_MASK; 1882 if (oldwidth != width || oldspeed != speed) 1883 goto retry; 1884 1885out: 1886 callout_reset(&ntb->heartbeat_timer, NTB_HB_TIMEOUT * hz, atom_link_hb, 1887 ntb); 1888 return; 1889 1890retry: 1891 callout_reset(&ntb->lr_timer, NTB_HB_TIMEOUT * hz, recover_atom_link, 1892 ntb); 1893} 1894 1895/* 1896 * Polls the HW link status register(s); returns true if something has changed. 1897 */ 1898static bool 1899ntb_poll_link(struct ntb_softc *ntb) 1900{ 1901 uint32_t ntb_cntl; 1902 uint16_t reg_val; 1903 1904 if (ntb->type == NTB_ATOM) { 1905 ntb_cntl = ntb_reg_read(4, ntb->reg->ntb_ctl); 1906 if (ntb_cntl == ntb->ntb_ctl) 1907 return (false); 1908 1909 ntb->ntb_ctl = ntb_cntl; 1910 ntb->lnk_sta = ntb_reg_read(4, ntb->reg->lnk_sta); 1911 } else { 1912 db_iowrite_raw(ntb, ntb->self_reg->db_bell, ntb->db_link_mask); 1913 1914 reg_val = pci_read_config(ntb->device, ntb->reg->lnk_sta, 2); 1915 if (reg_val == ntb->lnk_sta) 1916 return (false); 1917 1918 ntb->lnk_sta = reg_val; 1919 } 1920 return (true); 1921} 1922 1923static inline enum ntb_speed 1924ntb_link_sta_speed(struct ntb_softc *ntb) 1925{ 1926 1927 if (!link_is_up(ntb)) 1928 return (NTB_SPEED_NONE); 1929 return (ntb->lnk_sta & NTB_LINK_SPEED_MASK); 1930} 1931 1932static inline enum ntb_width 1933ntb_link_sta_width(struct ntb_softc *ntb) 1934{ 1935 1936 if (!link_is_up(ntb)) 1937 return (NTB_WIDTH_NONE); 1938 return (NTB_LNK_STA_WIDTH(ntb->lnk_sta)); 1939} 1940 1941SYSCTL_NODE(_hw_ntb, OID_AUTO, debug_info, CTLFLAG_RW, 0, 1942 "Driver state, statistics, and HW registers"); 1943 1944#define NTB_REGSZ_MASK (3ul << 30) 1945#define NTB_REG_64 (1ul << 30) 1946#define NTB_REG_32 (2ul << 30) 1947#define NTB_REG_16 (3ul << 30) 1948#define NTB_REG_8 (0ul << 30) 1949 1950#define NTB_DB_READ (1ul << 29) 1951#define NTB_PCI_REG (1ul << 28) 1952#define NTB_REGFLAGS_MASK (NTB_REGSZ_MASK | NTB_DB_READ | NTB_PCI_REG) 1953 1954static void 1955ntb_sysctl_init(struct ntb_softc *ntb) 1956{ 1957 struct sysctl_oid_list *tree_par, *regpar, *statpar, *errpar; 1958 struct sysctl_ctx_list *ctx; 1959 struct sysctl_oid *tree, *tmptree; 1960 1961 ctx = device_get_sysctl_ctx(ntb->device); 1962 1963 tree = SYSCTL_ADD_NODE(ctx, 1964 SYSCTL_CHILDREN(device_get_sysctl_tree(ntb->device)), OID_AUTO, 1965 "debug_info", CTLFLAG_RD, NULL, 1966 "Driver state, statistics, and HW registers"); 1967 tree_par = SYSCTL_CHILDREN(tree); 1968 1969 SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "conn_type", CTLFLAG_RD, 1970 &ntb->conn_type, 0, "0 - Transparent; 1 - B2B; 2 - Root Port"); 1971 SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "dev_type", CTLFLAG_RD, 1972 &ntb->dev_type, 0, "0 - USD; 1 - DSD"); 1973 SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "ppd", CTLFLAG_RD, 1974 &ntb->ppd, 0, "Raw PPD register (cached)"); 1975 1976 if (ntb->b2b_mw_idx != B2B_MW_DISABLED) { 1977 SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "b2b_idx", CTLFLAG_RD, 1978 &ntb->b2b_mw_idx, 0, 1979 "Index of the MW used for B2B remote register access"); 1980 SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "b2b_off", 1981 CTLFLAG_RD, &ntb->b2b_off, 1982 "If non-zero, offset of B2B register region in shared MW"); 1983 } 1984 1985 SYSCTL_ADD_PROC(ctx, tree_par, OID_AUTO, "features", 1986 CTLFLAG_RD | CTLTYPE_STRING, ntb, 0, sysctl_handle_features, "A", 1987 "Features/errata of this NTB device"); 1988 1989 SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "ntb_ctl", CTLFLAG_RD, 1990 __DEVOLATILE(uint32_t *, &ntb->ntb_ctl), 0, 1991 "NTB CTL register (cached)"); 1992 SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "lnk_sta", CTLFLAG_RD, 1993 __DEVOLATILE(uint32_t *, &ntb->lnk_sta), 0, 1994 "LNK STA register (cached)"); 1995 1996 SYSCTL_ADD_PROC(ctx, tree_par, OID_AUTO, "link_status", 1997 CTLFLAG_RD | CTLTYPE_STRING, ntb, 0, sysctl_handle_link_status, 1998 "A", "Link status"); 1999 2000 SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "mw_count", CTLFLAG_RD, 2001 &ntb->mw_count, 0, "MW count (excl. non-shared B2B register BAR)"); 2002 SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "spad_count", CTLFLAG_RD, 2003 &ntb->spad_count, 0, "Scratchpad count"); 2004 SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_count", CTLFLAG_RD, 2005 &ntb->db_count, 0, "Doorbell count"); 2006 SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_vec_count", CTLFLAG_RD, 2007 &ntb->db_vec_count, 0, "Doorbell vector count"); 2008 SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_vec_shift", CTLFLAG_RD, 2009 &ntb->db_vec_shift, 0, "Doorbell vector shift"); 2010 2011 SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_valid_mask", CTLFLAG_RD, 2012 &ntb->db_valid_mask, "Doorbell valid mask"); 2013 SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_link_mask", CTLFLAG_RD, 2014 &ntb->db_link_mask, "Doorbell link mask"); 2015 SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_mask", CTLFLAG_RD, 2016 &ntb->db_mask, "Doorbell mask (cached)"); 2017 2018 tmptree = SYSCTL_ADD_NODE(ctx, tree_par, OID_AUTO, "registers", 2019 CTLFLAG_RD, NULL, "Raw HW registers (big-endian)"); 2020 regpar = SYSCTL_CHILDREN(tmptree); 2021 2022 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "ntbcntl", 2023 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | 2024 ntb->reg->ntb_ctl, sysctl_handle_register, "IU", 2025 "NTB Control register"); 2026 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnkcap", 2027 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | 2028 0x19c, sysctl_handle_register, "IU", 2029 "NTB Link Capabilities"); 2030 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnkcon", 2031 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | 2032 0x1a0, sysctl_handle_register, "IU", 2033 "NTB Link Control register"); 2034 2035 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "db_mask", 2036 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2037 NTB_REG_64 | NTB_DB_READ | ntb->self_reg->db_mask, 2038 sysctl_handle_register, "QU", "Doorbell mask register"); 2039 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "db_bell", 2040 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2041 NTB_REG_64 | NTB_DB_READ | ntb->self_reg->db_bell, 2042 sysctl_handle_register, "QU", "Doorbell register"); 2043 2044 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat23", 2045 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2046 NTB_REG_64 | ntb->xlat_reg->bar2_xlat, 2047 sysctl_handle_register, "QU", "Incoming XLAT23 register"); 2048 if (HAS_FEATURE(NTB_SPLIT_BAR)) { 2049 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat4", 2050 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2051 NTB_REG_32 | ntb->xlat_reg->bar4_xlat, 2052 sysctl_handle_register, "IU", "Incoming XLAT4 register"); 2053 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat5", 2054 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2055 NTB_REG_32 | ntb->xlat_reg->bar5_xlat, 2056 sysctl_handle_register, "IU", "Incoming XLAT5 register"); 2057 } else { 2058 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat45", 2059 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2060 NTB_REG_64 | ntb->xlat_reg->bar4_xlat, 2061 sysctl_handle_register, "QU", "Incoming XLAT45 register"); 2062 } 2063 2064 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt23", 2065 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2066 NTB_REG_64 | ntb->xlat_reg->bar2_limit, 2067 sysctl_handle_register, "QU", "Incoming LMT23 register"); 2068 if (HAS_FEATURE(NTB_SPLIT_BAR)) { 2069 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt4", 2070 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2071 NTB_REG_32 | ntb->xlat_reg->bar4_limit, 2072 sysctl_handle_register, "IU", "Incoming LMT4 register"); 2073 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt5", 2074 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2075 NTB_REG_32 | ntb->xlat_reg->bar5_limit, 2076 sysctl_handle_register, "IU", "Incoming LMT5 register"); 2077 } else { 2078 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt45", 2079 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2080 NTB_REG_64 | ntb->xlat_reg->bar4_limit, 2081 sysctl_handle_register, "QU", "Incoming LMT45 register"); 2082 } 2083 2084 if (ntb->type == NTB_ATOM) 2085 return; 2086 2087 tmptree = SYSCTL_ADD_NODE(ctx, regpar, OID_AUTO, "xeon_stats", 2088 CTLFLAG_RD, NULL, "Xeon HW statistics"); 2089 statpar = SYSCTL_CHILDREN(tmptree); 2090 SYSCTL_ADD_PROC(ctx, statpar, OID_AUTO, "upstream_mem_miss", 2091 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2092 NTB_REG_16 | XEON_USMEMMISS_OFFSET, 2093 sysctl_handle_register, "SU", "Upstream Memory Miss"); 2094 2095 tmptree = SYSCTL_ADD_NODE(ctx, regpar, OID_AUTO, "xeon_hw_err", 2096 CTLFLAG_RD, NULL, "Xeon HW errors"); 2097 errpar = SYSCTL_CHILDREN(tmptree); 2098 2099 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "ppd", 2100 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2101 NTB_REG_8 | NTB_PCI_REG | NTB_PPD_OFFSET, 2102 sysctl_handle_register, "CU", "PPD"); 2103 2104 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar23_sz", 2105 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2106 NTB_REG_8 | NTB_PCI_REG | XEON_PBAR23SZ_OFFSET, 2107 sysctl_handle_register, "CU", "PBAR23 SZ (log2)"); 2108 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar4_sz", 2109 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2110 NTB_REG_8 | NTB_PCI_REG | XEON_PBAR4SZ_OFFSET, 2111 sysctl_handle_register, "CU", "PBAR4 SZ (log2)"); 2112 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar5_sz", 2113 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2114 NTB_REG_8 | NTB_PCI_REG | XEON_PBAR5SZ_OFFSET, 2115 sysctl_handle_register, "CU", "PBAR5 SZ (log2)"); 2116 2117 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar23_sz", 2118 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2119 NTB_REG_8 | NTB_PCI_REG | XEON_SBAR23SZ_OFFSET, 2120 sysctl_handle_register, "CU", "SBAR23 SZ (log2)"); 2121 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar4_sz", 2122 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2123 NTB_REG_8 | NTB_PCI_REG | XEON_SBAR4SZ_OFFSET, 2124 sysctl_handle_register, "CU", "SBAR4 SZ (log2)"); 2125 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar5_sz", 2126 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2127 NTB_REG_8 | NTB_PCI_REG | XEON_SBAR5SZ_OFFSET, 2128 sysctl_handle_register, "CU", "SBAR5 SZ (log2)"); 2129 2130 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "devsts", 2131 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2132 NTB_REG_16 | NTB_PCI_REG | XEON_DEVSTS_OFFSET, 2133 sysctl_handle_register, "SU", "DEVSTS"); 2134 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnksts", 2135 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2136 NTB_REG_16 | NTB_PCI_REG | XEON_LINK_STATUS_OFFSET, 2137 sysctl_handle_register, "SU", "LNKSTS"); 2138 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "slnksts", 2139 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2140 NTB_REG_16 | NTB_PCI_REG | XEON_SLINK_STATUS_OFFSET, 2141 sysctl_handle_register, "SU", "SLNKSTS"); 2142 2143 SYSCTL_ADD_PROC(ctx, errpar, OID_AUTO, "uncerrsts", 2144 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2145 NTB_REG_32 | NTB_PCI_REG | XEON_UNCERRSTS_OFFSET, 2146 sysctl_handle_register, "IU", "UNCERRSTS"); 2147 SYSCTL_ADD_PROC(ctx, errpar, OID_AUTO, "corerrsts", 2148 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2149 NTB_REG_32 | NTB_PCI_REG | XEON_CORERRSTS_OFFSET, 2150 sysctl_handle_register, "IU", "CORERRSTS"); 2151 2152 if (ntb->conn_type != NTB_CONN_B2B) 2153 return; 2154 2155 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat23", 2156 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2157 NTB_REG_64 | ntb->bar_info[NTB_B2B_BAR_1].pbarxlat_off, 2158 sysctl_handle_register, "QU", "Outgoing XLAT23 register"); 2159 if (HAS_FEATURE(NTB_SPLIT_BAR)) { 2160 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat4", 2161 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2162 NTB_REG_32 | ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off, 2163 sysctl_handle_register, "IU", "Outgoing XLAT4 register"); 2164 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat5", 2165 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2166 NTB_REG_32 | ntb->bar_info[NTB_B2B_BAR_3].pbarxlat_off, 2167 sysctl_handle_register, "IU", "Outgoing XLAT5 register"); 2168 } else { 2169 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat45", 2170 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2171 NTB_REG_64 | ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off, 2172 sysctl_handle_register, "QU", "Outgoing XLAT45 register"); 2173 } 2174 2175 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt23", 2176 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2177 NTB_REG_64 | XEON_PBAR2LMT_OFFSET, 2178 sysctl_handle_register, "QU", "Outgoing LMT23 register"); 2179 if (HAS_FEATURE(NTB_SPLIT_BAR)) { 2180 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt4", 2181 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2182 NTB_REG_32 | XEON_PBAR4LMT_OFFSET, 2183 sysctl_handle_register, "IU", "Outgoing LMT4 register"); 2184 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt5", 2185 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2186 NTB_REG_32 | XEON_PBAR5LMT_OFFSET, 2187 sysctl_handle_register, "IU", "Outgoing LMT5 register"); 2188 } else { 2189 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt45", 2190 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2191 NTB_REG_64 | XEON_PBAR4LMT_OFFSET, 2192 sysctl_handle_register, "QU", "Outgoing LMT45 register"); 2193 } 2194 2195 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar01_base", 2196 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2197 NTB_REG_64 | ntb->xlat_reg->bar0_base, 2198 sysctl_handle_register, "QU", "Secondary BAR01 base register"); 2199 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar23_base", 2200 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2201 NTB_REG_64 | ntb->xlat_reg->bar2_base, 2202 sysctl_handle_register, "QU", "Secondary BAR23 base register"); 2203 if (HAS_FEATURE(NTB_SPLIT_BAR)) { 2204 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar4_base", 2205 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2206 NTB_REG_32 | ntb->xlat_reg->bar4_base, 2207 sysctl_handle_register, "IU", 2208 "Secondary BAR4 base register"); 2209 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar5_base", 2210 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2211 NTB_REG_32 | ntb->xlat_reg->bar5_base, 2212 sysctl_handle_register, "IU", 2213 "Secondary BAR5 base register"); 2214 } else { 2215 SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar45_base", 2216 CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, 2217 NTB_REG_64 | ntb->xlat_reg->bar4_base, 2218 sysctl_handle_register, "QU", 2219 "Secondary BAR45 base register"); 2220 } 2221} 2222 2223static int 2224sysctl_handle_features(SYSCTL_HANDLER_ARGS) 2225{ 2226 struct ntb_softc *ntb; 2227 struct sbuf sb; 2228 int error; 2229 2230 error = 0; 2231 ntb = arg1; 2232 2233 sbuf_new_for_sysctl(&sb, NULL, 256, req); 2234 2235 sbuf_printf(&sb, "%b", ntb->features, NTB_FEATURES_STR); 2236 error = sbuf_finish(&sb); 2237 sbuf_delete(&sb); 2238 2239 if (error || !req->newptr) 2240 return (error); 2241 return (EINVAL); 2242} 2243 2244static int 2245sysctl_handle_link_status(SYSCTL_HANDLER_ARGS) 2246{ 2247 struct ntb_softc *ntb; 2248 struct sbuf sb; 2249 enum ntb_speed speed; 2250 enum ntb_width width; 2251 int error; 2252 2253 error = 0; 2254 ntb = arg1; 2255 2256 sbuf_new_for_sysctl(&sb, NULL, 32, req); 2257 2258 if (ntb_link_is_up(ntb, &speed, &width)) 2259 sbuf_printf(&sb, "up / PCIe Gen %u / Width x%u", 2260 (unsigned)speed, (unsigned)width); 2261 else 2262 sbuf_printf(&sb, "down"); 2263 2264 error = sbuf_finish(&sb); 2265 sbuf_delete(&sb); 2266 2267 if (error || !req->newptr) 2268 return (error); 2269 return (EINVAL); 2270} 2271 2272static int 2273sysctl_handle_register(SYSCTL_HANDLER_ARGS) 2274{ 2275 struct ntb_softc *ntb; 2276 const void *outp; 2277 uintptr_t sz; 2278 uint64_t umv; 2279 char be[sizeof(umv)]; 2280 size_t outsz; 2281 uint32_t reg; 2282 bool db, pci; 2283 int error; 2284 2285 ntb = arg1; 2286 reg = arg2 & ~NTB_REGFLAGS_MASK; 2287 sz = arg2 & NTB_REGSZ_MASK; 2288 db = (arg2 & NTB_DB_READ) != 0; 2289 pci = (arg2 & NTB_PCI_REG) != 0; 2290 2291 KASSERT(!(db && pci), ("bogus")); 2292 2293 if (db) { 2294 KASSERT(sz == NTB_REG_64, ("bogus")); 2295 umv = db_ioread(ntb, reg); 2296 outsz = sizeof(uint64_t); 2297 } else { 2298 switch (sz) { 2299 case NTB_REG_64: 2300 if (pci) 2301 umv = pci_read_config(ntb->device, reg, 8); 2302 else 2303 umv = ntb_reg_read(8, reg); 2304 outsz = sizeof(uint64_t); 2305 break; 2306 case NTB_REG_32: 2307 if (pci) 2308 umv = pci_read_config(ntb->device, reg, 4); 2309 else 2310 umv = ntb_reg_read(4, reg); 2311 outsz = sizeof(uint32_t); 2312 break; 2313 case NTB_REG_16: 2314 if (pci) 2315 umv = pci_read_config(ntb->device, reg, 2); 2316 else 2317 umv = ntb_reg_read(2, reg); 2318 outsz = sizeof(uint16_t); 2319 break; 2320 case NTB_REG_8: 2321 if (pci) 2322 umv = pci_read_config(ntb->device, reg, 1); 2323 else 2324 umv = ntb_reg_read(1, reg); 2325 outsz = sizeof(uint8_t); 2326 break; 2327 default: 2328 panic("bogus"); 2329 break; 2330 } 2331 } 2332 2333 /* Encode bigendian so that sysctl -x is legible. */ 2334 be64enc(be, umv); 2335 outp = ((char *)be) + sizeof(umv) - outsz; 2336 2337 error = SYSCTL_OUT(req, outp, outsz); 2338 if (error || !req->newptr) 2339 return (error); 2340 return (EINVAL); 2341} 2342 2343/* 2344 * Public API to the rest of the OS 2345 */ 2346 2347/** 2348 * ntb_get_max_spads() - get the total scratch regs usable 2349 * @ntb: pointer to ntb_softc instance 2350 * 2351 * This function returns the max 32bit scratchpad registers usable by the 2352 * upper layer. 2353 * 2354 * RETURNS: total number of scratch pad registers available 2355 */ 2356uint8_t 2357ntb_get_max_spads(struct ntb_softc *ntb) 2358{ 2359 2360 return (ntb->spad_count); 2361} 2362 2363uint8_t 2364ntb_mw_count(struct ntb_softc *ntb) 2365{ 2366 2367 return (ntb->mw_count); 2368} 2369 2370/** 2371 * ntb_spad_write() - write to the secondary scratchpad register 2372 * @ntb: pointer to ntb_softc instance 2373 * @idx: index to the scratchpad register, 0 based 2374 * @val: the data value to put into the register 2375 * 2376 * This function allows writing of a 32bit value to the indexed scratchpad 2377 * register. The register resides on the secondary (external) side. 2378 * 2379 * RETURNS: An appropriate ERRNO error value on error, or zero for success. 2380 */ 2381int 2382ntb_spad_write(struct ntb_softc *ntb, unsigned int idx, uint32_t val) 2383{ 2384 2385 if (idx >= ntb->spad_count) 2386 return (EINVAL); 2387 2388 ntb_reg_write(4, ntb->self_reg->spad + idx * 4, val); 2389 2390 return (0); 2391} 2392 2393/** 2394 * ntb_spad_read() - read from the primary scratchpad register 2395 * @ntb: pointer to ntb_softc instance 2396 * @idx: index to scratchpad register, 0 based 2397 * @val: pointer to 32bit integer for storing the register value 2398 * 2399 * This function allows reading of the 32bit scratchpad register on 2400 * the primary (internal) side. 2401 * 2402 * RETURNS: An appropriate ERRNO error value on error, or zero for success. 2403 */ 2404int 2405ntb_spad_read(struct ntb_softc *ntb, unsigned int idx, uint32_t *val) 2406{ 2407 2408 if (idx >= ntb->spad_count) 2409 return (EINVAL); 2410 2411 *val = ntb_reg_read(4, ntb->self_reg->spad + idx * 4); 2412 2413 return (0); 2414} 2415 2416/** 2417 * ntb_peer_spad_write() - write to the secondary scratchpad register 2418 * @ntb: pointer to ntb_softc instance 2419 * @idx: index to the scratchpad register, 0 based 2420 * @val: the data value to put into the register 2421 * 2422 * This function allows writing of a 32bit value to the indexed scratchpad 2423 * register. The register resides on the secondary (external) side. 2424 * 2425 * RETURNS: An appropriate ERRNO error value on error, or zero for success. 2426 */ 2427int 2428ntb_peer_spad_write(struct ntb_softc *ntb, unsigned int idx, uint32_t val) 2429{ 2430 2431 if (idx >= ntb->spad_count) 2432 return (EINVAL); 2433 2434 if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) 2435 ntb_mw_write(4, XEON_SPAD_OFFSET + idx * 4, val); 2436 else 2437 ntb_reg_write(4, ntb->peer_reg->spad + idx * 4, val); 2438 2439 return (0); 2440} 2441 2442/** 2443 * ntb_peer_spad_read() - read from the primary scratchpad register 2444 * @ntb: pointer to ntb_softc instance 2445 * @idx: index to scratchpad register, 0 based 2446 * @val: pointer to 32bit integer for storing the register value 2447 * 2448 * This function allows reading of the 32bit scratchpad register on 2449 * the primary (internal) side. 2450 * 2451 * RETURNS: An appropriate ERRNO error value on error, or zero for success. 2452 */ 2453int 2454ntb_peer_spad_read(struct ntb_softc *ntb, unsigned int idx, uint32_t *val) 2455{ 2456 2457 if (idx >= ntb->spad_count) 2458 return (EINVAL); 2459 2460 if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) 2461 *val = ntb_mw_read(4, XEON_SPAD_OFFSET + idx * 4); 2462 else 2463 *val = ntb_reg_read(4, ntb->peer_reg->spad + idx * 4); 2464 2465 return (0); 2466} 2467 2468/* 2469 * ntb_mw_get_range() - get the range of a memory window 2470 * @ntb: NTB device context 2471 * @idx: Memory window number 2472 * @base: OUT - the base address for mapping the memory window 2473 * @size: OUT - the size for mapping the memory window 2474 * @align: OUT - the base alignment for translating the memory window 2475 * @align_size: OUT - the size alignment for translating the memory window 2476 * 2477 * Get the range of a memory window. NULL may be given for any output 2478 * parameter if the value is not needed. The base and size may be used for 2479 * mapping the memory window, to access the peer memory. The alignment and 2480 * size may be used for translating the memory window, for the peer to access 2481 * memory on the local system. 2482 * 2483 * Return: Zero on success, otherwise an error number. 2484 */ 2485int 2486ntb_mw_get_range(struct ntb_softc *ntb, unsigned mw_idx, vm_paddr_t *base, 2487 caddr_t *vbase, size_t *size, size_t *align, size_t *align_size) 2488{ 2489 struct ntb_pci_bar_info *bar; 2490 size_t bar_b2b_off; 2491 2492 if (mw_idx >= ntb_mw_count(ntb)) 2493 return (EINVAL); 2494 2495 bar = &ntb->bar_info[ntb_mw_to_bar(ntb, mw_idx)]; 2496 bar_b2b_off = 0; 2497 if (mw_idx == ntb->b2b_mw_idx) { 2498 KASSERT(ntb->b2b_off != 0, 2499 ("user shouldn't get non-shared b2b mw")); 2500 bar_b2b_off = ntb->b2b_off; 2501 } 2502 2503 if (base != NULL) 2504 *base = bar->pbase + bar_b2b_off; 2505 if (vbase != NULL) 2506 *vbase = bar->vbase + bar_b2b_off; 2507 if (size != NULL) 2508 *size = bar->size - bar_b2b_off; 2509 if (align != NULL) 2510 *align = bar->size; 2511 if (align_size != NULL) 2512 *align_size = 1; 2513 return (0); 2514} 2515 2516/* 2517 * ntb_mw_set_trans() - set the translation of a memory window 2518 * @ntb: NTB device context 2519 * @idx: Memory window number 2520 * @addr: The dma address local memory to expose to the peer 2521 * @size: The size of the local memory to expose to the peer 2522 * 2523 * Set the translation of a memory window. The peer may access local memory 2524 * through the window starting at the address, up to the size. The address 2525 * must be aligned to the alignment specified by ntb_mw_get_range(). The size 2526 * must be aligned to the size alignment specified by ntb_mw_get_range(). 2527 * 2528 * Return: Zero on success, otherwise an error number. 2529 */ 2530int 2531ntb_mw_set_trans(struct ntb_softc *ntb, unsigned idx, bus_addr_t addr, 2532 size_t size) 2533{ 2534 struct ntb_pci_bar_info *bar; 2535 uint64_t base, limit, reg_val; 2536 size_t bar_size, mw_size; 2537 uint32_t base_reg, xlat_reg, limit_reg; 2538 enum ntb_bar bar_num; 2539 2540 if (idx >= ntb_mw_count(ntb)) 2541 return (EINVAL); 2542 2543 bar_num = ntb_mw_to_bar(ntb, idx); 2544 bar = &ntb->bar_info[bar_num]; 2545 2546 bar_size = bar->size; 2547 if (idx == ntb->b2b_mw_idx) 2548 mw_size = bar_size - ntb->b2b_off; 2549 else 2550 mw_size = bar_size; 2551 2552 /* Hardware requires that addr is aligned to bar size */ 2553 if ((addr & (bar_size - 1)) != 0) 2554 return (EINVAL); 2555 2556 if (size > mw_size) 2557 return (EINVAL); 2558 2559 bar_get_xlat_params(ntb, bar_num, &base_reg, &xlat_reg, &limit_reg); 2560 2561 limit = 0; 2562 if (bar_is_64bit(ntb, bar_num)) { 2563 base = ntb_reg_read(8, base_reg); 2564 2565 if (limit_reg != 0 && size != mw_size) 2566 limit = base + size; 2567 2568 /* Set and verify translation address */ 2569 ntb_reg_write(8, xlat_reg, addr); 2570 reg_val = ntb_reg_read(8, xlat_reg); 2571 if (reg_val != addr) { 2572 ntb_reg_write(8, xlat_reg, 0); 2573 return (EIO); 2574 } 2575 2576 /* Set and verify the limit */ 2577 ntb_reg_write(8, limit_reg, limit); 2578 reg_val = ntb_reg_read(8, limit_reg); 2579 if (reg_val != limit) { 2580 ntb_reg_write(8, limit_reg, base); 2581 ntb_reg_write(8, xlat_reg, 0); 2582 return (EIO); 2583 } 2584 } else { 2585 /* Configure 32-bit (split) BAR MW */ 2586 2587 if ((addr & UINT32_MAX) != addr) 2588 return (EINVAL); 2589 if (((addr + size) & UINT32_MAX) != (addr + size)) 2590 return (EINVAL); 2591 2592 base = ntb_reg_read(4, base_reg); 2593 2594 if (limit_reg != 0 && size != mw_size) 2595 limit = base + size; 2596 2597 /* Set and verify translation address */ 2598 ntb_reg_write(4, xlat_reg, addr); 2599 reg_val = ntb_reg_read(4, xlat_reg); 2600 if (reg_val != addr) { 2601 ntb_reg_write(4, xlat_reg, 0); 2602 return (EIO); 2603 } 2604 2605 /* Set and verify the limit */ 2606 ntb_reg_write(4, limit_reg, limit); 2607 reg_val = ntb_reg_read(4, limit_reg); 2608 if (reg_val != limit) { 2609 ntb_reg_write(4, limit_reg, base); 2610 ntb_reg_write(4, xlat_reg, 0); 2611 return (EIO); 2612 } 2613 } 2614 return (0); 2615} 2616 2617/* 2618 * ntb_mw_clear_trans() - clear the translation of a memory window 2619 * @ntb: NTB device context 2620 * @idx: Memory window number 2621 * 2622 * Clear the translation of a memory window. The peer may no longer access 2623 * local memory through the window. 2624 * 2625 * Return: Zero on success, otherwise an error number. 2626 */ 2627int 2628ntb_mw_clear_trans(struct ntb_softc *ntb, unsigned mw_idx) 2629{ 2630 2631 return (ntb_mw_set_trans(ntb, mw_idx, 0, 0)); 2632} 2633 2634/* 2635 * ntb_mw_get_wc - Get the write-combine status of a memory window 2636 * 2637 * Returns: Zero on success, setting *wc; otherwise an error number (e.g. if 2638 * idx is an invalid memory window). 2639 */ 2640int 2641ntb_mw_get_wc(struct ntb_softc *ntb, unsigned idx, bool *wc) 2642{ 2643 struct ntb_pci_bar_info *bar; 2644 2645 if (idx >= ntb_mw_count(ntb)) 2646 return (EINVAL); 2647 2648 bar = &ntb->bar_info[ntb_mw_to_bar(ntb, idx)]; 2649 *wc = bar->mapped_wc; 2650 return (0); 2651} 2652 2653/* 2654 * ntb_mw_set_wc - Set the write-combine status of a memory window 2655 * 2656 * If 'wc' matches the current status, this does nothing and succeeds. 2657 * 2658 * Returns: Zero on success, setting the caching attribute on the virtual 2659 * mapping of the BAR; otherwise an error number (e.g. if idx is an invalid 2660 * memory window, or if changing the caching attribute fails). 2661 */ 2662int 2663ntb_mw_set_wc(struct ntb_softc *ntb, unsigned idx, bool wc) 2664{ 2665 struct ntb_pci_bar_info *bar; 2666 vm_memattr_t attr; 2667 int rc; 2668 2669 if (idx >= ntb_mw_count(ntb)) 2670 return (EINVAL); 2671 2672 bar = &ntb->bar_info[ntb_mw_to_bar(ntb, idx)]; 2673 if (bar->mapped_wc == wc) 2674 return (0); 2675 2676 if (wc) 2677 attr = VM_MEMATTR_WRITE_COMBINING; 2678 else 2679 attr = VM_MEMATTR_DEFAULT; 2680 2681 rc = pmap_change_attr((vm_offset_t)bar->vbase, bar->size, attr); 2682 if (rc == 0) 2683 bar->mapped_wc = wc; 2684 2685 return (rc); 2686} 2687 2688/** 2689 * ntb_peer_db_set() - Set the doorbell on the secondary/external side 2690 * @ntb: pointer to ntb_softc instance 2691 * @bit: doorbell bits to ring 2692 * 2693 * This function allows triggering of a doorbell on the secondary/external 2694 * side that will initiate an interrupt on the remote host 2695 */ 2696void 2697ntb_peer_db_set(struct ntb_softc *ntb, uint64_t bit) 2698{ 2699 2700 if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) { 2701 ntb_mw_write(2, XEON_PDOORBELL_OFFSET, bit); 2702 return; 2703 } 2704 2705 db_iowrite(ntb, ntb->peer_reg->db_bell, bit); 2706} 2707 2708/* 2709 * ntb_get_peer_db_addr() - Return the address of the remote doorbell register, 2710 * as well as the size of the register (via *sz_out). 2711 * 2712 * This function allows a caller using I/OAT DMA to chain the remote doorbell 2713 * ring to its memory window write. 2714 * 2715 * Note that writing the peer doorbell via a memory window will *not* generate 2716 * an interrupt on the remote host; that must be done seperately. 2717 */ 2718bus_addr_t 2719ntb_get_peer_db_addr(struct ntb_softc *ntb, vm_size_t *sz_out) 2720{ 2721 struct ntb_pci_bar_info *bar; 2722 uint64_t regoff; 2723 2724 KASSERT(sz_out != NULL, ("must be non-NULL")); 2725 2726 if (!HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) { 2727 bar = &ntb->bar_info[NTB_CONFIG_BAR]; 2728 regoff = ntb->peer_reg->db_bell; 2729 } else { 2730 KASSERT((HAS_FEATURE(NTB_SPLIT_BAR) && ntb->mw_count == 2) || 2731 (!HAS_FEATURE(NTB_SPLIT_BAR) && ntb->mw_count == 1), 2732 ("mw_count invalid after setup")); 2733 KASSERT(ntb->b2b_mw_idx != B2B_MW_DISABLED, 2734 ("invalid b2b idx")); 2735 2736 bar = &ntb->bar_info[ntb_mw_to_bar(ntb, ntb->b2b_mw_idx)]; 2737 regoff = XEON_PDOORBELL_OFFSET; 2738 } 2739 KASSERT(bar->pci_bus_tag != X86_BUS_SPACE_IO, ("uh oh")); 2740 2741 *sz_out = ntb->reg->db_size; 2742 /* HACK: Specific to current x86 bus implementation. */ 2743 return ((uint64_t)bar->pci_bus_handle + regoff); 2744} 2745 2746/* 2747 * ntb_db_valid_mask() - get a mask of doorbell bits supported by the ntb 2748 * @ntb: NTB device context 2749 * 2750 * Hardware may support different number or arrangement of doorbell bits. 2751 * 2752 * Return: A mask of doorbell bits supported by the ntb. 2753 */ 2754uint64_t 2755ntb_db_valid_mask(struct ntb_softc *ntb) 2756{ 2757 2758 return (ntb->db_valid_mask); 2759} 2760 2761/* 2762 * ntb_db_vector_mask() - get a mask of doorbell bits serviced by a vector 2763 * @ntb: NTB device context 2764 * @vector: Doorbell vector number 2765 * 2766 * Each interrupt vector may have a different number or arrangement of bits. 2767 * 2768 * Return: A mask of doorbell bits serviced by a vector. 2769 */ 2770uint64_t 2771ntb_db_vector_mask(struct ntb_softc *ntb, uint32_t vector) 2772{ 2773 2774 if (vector > ntb->db_vec_count) 2775 return (0); 2776 return (ntb->db_valid_mask & ntb_vec_mask(ntb, vector)); 2777} 2778 2779/** 2780 * ntb_link_is_up() - get the current ntb link state 2781 * @ntb: NTB device context 2782 * @speed: OUT - The link speed expressed as PCIe generation number 2783 * @width: OUT - The link width expressed as the number of PCIe lanes 2784 * 2785 * RETURNS: true or false based on the hardware link state 2786 */ 2787bool 2788ntb_link_is_up(struct ntb_softc *ntb, enum ntb_speed *speed, 2789 enum ntb_width *width) 2790{ 2791 2792 if (speed != NULL) 2793 *speed = ntb_link_sta_speed(ntb); 2794 if (width != NULL) 2795 *width = ntb_link_sta_width(ntb); 2796 return (link_is_up(ntb)); 2797} 2798 2799static void 2800save_bar_parameters(struct ntb_pci_bar_info *bar) 2801{ 2802 2803 bar->pci_bus_tag = rman_get_bustag(bar->pci_resource); 2804 bar->pci_bus_handle = rman_get_bushandle(bar->pci_resource); 2805 bar->pbase = rman_get_start(bar->pci_resource); 2806 bar->size = rman_get_size(bar->pci_resource); 2807 bar->vbase = rman_get_virtual(bar->pci_resource); 2808} 2809 2810device_t 2811ntb_get_device(struct ntb_softc *ntb) 2812{ 2813 2814 return (ntb->device); 2815} 2816 2817/* Export HW-specific errata information. */ 2818bool 2819ntb_has_feature(struct ntb_softc *ntb, uint32_t feature) 2820{ 2821 2822 return (HAS_FEATURE(feature)); 2823} 2824