ntb_hw.c revision 289397
1/*- 2 * Copyright (C) 2013 Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/dev/ntb/ntb_hw/ntb_hw.c 289397 2015-10-15 23:46:07Z cem $"); 29 30#include <sys/param.h> 31#include <sys/kernel.h> 32#include <sys/systm.h> 33#include <sys/bus.h> 34#include <sys/malloc.h> 35#include <sys/module.h> 36#include <sys/queue.h> 37#include <sys/rman.h> 38#include <sys/sysctl.h> 39#include <vm/vm.h> 40#include <vm/pmap.h> 41#include <machine/bus.h> 42#include <machine/pmap.h> 43#include <machine/resource.h> 44#include <dev/pci/pcireg.h> 45#include <dev/pci/pcivar.h> 46 47#include "ntb_regs.h" 48#include "ntb_hw.h" 49 50/* 51 * The Non-Transparent Bridge (NTB) is a device on some Intel processors that 52 * allows you to connect two systems using a PCI-e link. 53 * 54 * This module contains the hardware abstraction layer for the NTB. It allows 55 * you to send and recieve interrupts, map the memory windows and send and 56 * receive messages in the scratch-pad registers. 57 * 58 * NOTE: Much of the code in this module is shared with Linux. Any patches may 59 * be picked up and redistributed in Linux with a dual GPL/BSD license. 60 */ 61 62#define NTB_CONFIG_BAR 0 63#define NTB_B2B_BAR_1 1 64#define NTB_B2B_BAR_2 2 65#define NTB_B2B_BAR_3 3 66#define NTB_MAX_BARS 4 67#define NTB_MW_TO_BAR(mw) ((mw) + 1) 68 69#define MAX_MSIX_INTERRUPTS MAX(XEON_MAX_DB_BITS, SOC_MAX_DB_BITS) 70 71#define NTB_HB_TIMEOUT 1 /* second */ 72#define SOC_LINK_RECOVERY_TIME 500 73 74#define DEVICE2SOFTC(dev) ((struct ntb_softc *) device_get_softc(dev)) 75 76enum ntb_device_type { 77 NTB_XEON, 78 NTB_SOC 79}; 80 81/* Device features and workarounds */ 82#define HAS_FEATURE(feature) \ 83 ((ntb->features & (feature)) != 0) 84 85struct ntb_hw_info { 86 uint32_t device_id; 87 const char *desc; 88 enum ntb_device_type type; 89 uint32_t features; 90}; 91 92struct ntb_pci_bar_info { 93 bus_space_tag_t pci_bus_tag; 94 bus_space_handle_t pci_bus_handle; 95 int pci_resource_id; 96 struct resource *pci_resource; 97 vm_paddr_t pbase; 98 void *vbase; 99 u_long size; 100}; 101 102struct ntb_int_info { 103 struct resource *res; 104 int rid; 105 void *tag; 106}; 107 108struct ntb_db_cb { 109 ntb_db_callback callback; 110 unsigned int db_num; 111 void *data; 112 struct ntb_softc *ntb; 113 struct callout irq_work; 114 bool reserved; 115}; 116 117struct ntb_softc { 118 device_t device; 119 enum ntb_device_type type; 120 uint64_t features; 121 122 struct ntb_pci_bar_info bar_info[NTB_MAX_BARS]; 123 struct ntb_int_info int_info[MAX_MSIX_INTERRUPTS]; 124 uint32_t allocated_interrupts; 125 126 struct callout heartbeat_timer; 127 struct callout lr_timer; 128 129 void *ntb_transport; 130 ntb_event_callback event_cb; 131 struct ntb_db_cb *db_cb; 132 uint8_t max_cbs; 133 134 struct { 135 uint8_t max_mw; 136 uint8_t max_spads; 137 uint8_t max_db_bits; 138 uint8_t msix_cnt; 139 } limits; 140 struct { 141 uint32_t ldb; 142 uint32_t ldb_mask; 143 uint32_t rdb; 144 uint32_t bar2_xlat; 145 uint32_t bar4_xlat; 146 uint32_t bar5_xlat; 147 uint32_t spad_remote; 148 uint32_t spad_local; 149 uint32_t lnk_cntl; 150 uint32_t lnk_stat; 151 uint32_t spci_cmd; 152 } reg_ofs; 153 uint32_t ppd; 154 uint8_t conn_type; 155 uint8_t dev_type; 156 uint8_t bits_per_vector; 157 uint8_t link_status; 158 uint8_t link_width; 159 uint8_t link_speed; 160}; 161 162#ifdef __i386__ 163static __inline uint64_t 164bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle, 165 bus_size_t offset) 166{ 167 168 return (bus_space_read_4(tag, handle, offset) | 169 ((uint64_t)bus_space_read_4(tag, handle, offset + 4)) << 32); 170} 171 172static __inline void 173bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t handle, 174 bus_size_t offset, uint64_t val) 175{ 176 177 bus_space_write_4(tag, handle, offset, val); 178 bus_space_write_4(tag, handle, offset + 4, val >> 32); 179} 180#endif 181 182#define ntb_bar_read(SIZE, bar, offset) \ 183 bus_space_read_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \ 184 ntb->bar_info[(bar)].pci_bus_handle, (offset)) 185#define ntb_bar_write(SIZE, bar, offset, val) \ 186 bus_space_write_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \ 187 ntb->bar_info[(bar)].pci_bus_handle, (offset), (val)) 188#define ntb_reg_read(SIZE, offset) ntb_bar_read(SIZE, NTB_CONFIG_BAR, offset) 189#define ntb_reg_write(SIZE, offset, val) \ 190 ntb_bar_write(SIZE, NTB_CONFIG_BAR, offset, val) 191#define ntb_mw_read(SIZE, offset) \ 192 ntb_bar_read(SIZE, NTB_MW_TO_BAR(ntb->limits.max_mw), offset) 193#define ntb_mw_write(SIZE, offset, val) \ 194 ntb_bar_write(SIZE, NTB_MW_TO_BAR(ntb->limits.max_mw), \ 195 offset, val) 196 197typedef int (*bar_map_strategy)(struct ntb_softc *ntb, 198 struct ntb_pci_bar_info *bar); 199 200static int ntb_probe(device_t device); 201static int ntb_attach(device_t device); 202static int ntb_detach(device_t device); 203static int ntb_map_pci_bars(struct ntb_softc *ntb); 204static int map_pci_bar(struct ntb_softc *ntb, bar_map_strategy strategy, 205 struct ntb_pci_bar_info *bar); 206static int map_mmr_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar); 207static int map_memory_window_bar(struct ntb_softc *ntb, 208 struct ntb_pci_bar_info *bar); 209static void ntb_unmap_pci_bar(struct ntb_softc *ntb); 210static int ntb_remap_msix(device_t, uint32_t desired, uint32_t avail); 211static int ntb_setup_interrupts(struct ntb_softc *ntb); 212static int ntb_setup_legacy_interrupt(struct ntb_softc *ntb); 213static int ntb_setup_xeon_msix(struct ntb_softc *ntb, uint32_t num_vectors); 214static int ntb_setup_soc_msix(struct ntb_softc *ntb, uint32_t num_vectors); 215static void ntb_teardown_interrupts(struct ntb_softc *ntb); 216static void handle_soc_irq(void *arg); 217static void handle_xeon_irq(void *arg); 218static void handle_xeon_event_irq(void *arg); 219static void ntb_handle_legacy_interrupt(void *arg); 220static void ntb_irq_work(void *arg); 221static uint64_t db_ioread(struct ntb_softc *, uint32_t regoff); 222static void db_iowrite(struct ntb_softc *, uint32_t regoff, uint64_t val); 223static void mask_ldb_interrupt(struct ntb_softc *ntb, unsigned int idx); 224static void unmask_ldb_interrupt(struct ntb_softc *ntb, unsigned int idx); 225static int ntb_create_callbacks(struct ntb_softc *ntb, uint32_t num_vectors); 226static void ntb_free_callbacks(struct ntb_softc *ntb); 227static struct ntb_hw_info *ntb_get_device_info(uint32_t device_id); 228static void ntb_detect_max_mw(struct ntb_softc *ntb); 229static int ntb_detect_xeon(struct ntb_softc *ntb); 230static int ntb_detect_soc(struct ntb_softc *ntb); 231static int ntb_setup_xeon(struct ntb_softc *ntb); 232static int ntb_setup_soc(struct ntb_softc *ntb); 233static void ntb_teardown_xeon(struct ntb_softc *ntb); 234static void configure_soc_secondary_side_bars(struct ntb_softc *ntb); 235static void configure_xeon_secondary_side_bars(struct ntb_softc *ntb); 236static void ntb_handle_heartbeat(void *arg); 237static void ntb_handle_link_event(struct ntb_softc *ntb, int link_state); 238static void ntb_hw_link_down(struct ntb_softc *ntb); 239static void ntb_hw_link_up(struct ntb_softc *ntb); 240static void recover_soc_link(void *arg); 241static int ntb_check_link_status(struct ntb_softc *ntb); 242static void save_bar_parameters(struct ntb_pci_bar_info *bar); 243 244static struct ntb_hw_info pci_ids[] = { 245 { 0x0C4E8086, "Atom Processor S1200 NTB Primary B2B", NTB_SOC, 0 }, 246 247 /* XXX: PS/SS IDs left out until they are supported. */ 248 { 0x37258086, "JSF Xeon C35xx/C55xx Non-Transparent Bridge B2B", 249 NTB_XEON, NTB_REGS_THRU_MW | NTB_B2BDOORBELL_BIT14 }, 250 { 0x3C0D8086, "SNB Xeon E5/Core i7 Non-Transparent Bridge B2B", 251 NTB_XEON, NTB_REGS_THRU_MW | NTB_B2BDOORBELL_BIT14 }, 252 { 0x0E0D8086, "IVT Xeon E5 V2 Non-Transparent Bridge B2B", NTB_XEON, 253 NTB_REGS_THRU_MW | NTB_B2BDOORBELL_BIT14 | NTB_SB01BASE_LOCKUP 254 | NTB_BAR_SIZE_4K }, 255 { 0x2F0D8086, "HSX Xeon E5 V3 Non-Transparent Bridge B2B", NTB_XEON, 256 NTB_REGS_THRU_MW | NTB_B2BDOORBELL_BIT14 | NTB_SB01BASE_LOCKUP 257 }, 258 { 0x6F0D8086, "BDX Xeon E5 V4 Non-Transparent Bridge B2B", NTB_XEON, 259 NTB_REGS_THRU_MW | NTB_B2BDOORBELL_BIT14 | NTB_SB01BASE_LOCKUP 260 }, 261 262 { 0x00000000, NULL, NTB_SOC, 0 } 263}; 264 265/* 266 * OS <-> Driver interface structures 267 */ 268MALLOC_DEFINE(M_NTB, "ntb_hw", "ntb_hw driver memory allocations"); 269 270static device_method_t ntb_pci_methods[] = { 271 /* Device interface */ 272 DEVMETHOD(device_probe, ntb_probe), 273 DEVMETHOD(device_attach, ntb_attach), 274 DEVMETHOD(device_detach, ntb_detach), 275 DEVMETHOD_END 276}; 277 278static driver_t ntb_pci_driver = { 279 "ntb_hw", 280 ntb_pci_methods, 281 sizeof(struct ntb_softc), 282}; 283 284static devclass_t ntb_devclass; 285DRIVER_MODULE(ntb_hw, pci, ntb_pci_driver, ntb_devclass, NULL, NULL); 286MODULE_VERSION(ntb_hw, 1); 287 288SYSCTL_NODE(_hw, OID_AUTO, ntb, CTLFLAG_RW, 0, "NTB sysctls"); 289 290/* 291 * OS <-> Driver linkage functions 292 */ 293static int 294ntb_probe(device_t device) 295{ 296 struct ntb_hw_info *p; 297 298 p = ntb_get_device_info(pci_get_devid(device)); 299 if (p == NULL) 300 return (ENXIO); 301 302 device_set_desc(device, p->desc); 303 return (0); 304} 305 306static int 307ntb_attach(device_t device) 308{ 309 struct ntb_softc *ntb; 310 struct ntb_hw_info *p; 311 int error; 312 313 ntb = DEVICE2SOFTC(device); 314 p = ntb_get_device_info(pci_get_devid(device)); 315 316 ntb->device = device; 317 ntb->type = p->type; 318 ntb->features = p->features; 319 320 /* Heartbeat timer for NTB_SOC since there is no link interrupt */ 321 callout_init(&ntb->heartbeat_timer, 1); 322 callout_init(&ntb->lr_timer, 1); 323 324 if (ntb->type == NTB_SOC) 325 error = ntb_detect_soc(ntb); 326 else 327 error = ntb_detect_xeon(ntb); 328 if (error) 329 goto out; 330 331 ntb_detect_max_mw(ntb); 332 333 error = ntb_map_pci_bars(ntb); 334 if (error) 335 goto out; 336 if (ntb->type == NTB_SOC) 337 error = ntb_setup_soc(ntb); 338 else 339 error = ntb_setup_xeon(ntb); 340 if (error) 341 goto out; 342 error = ntb_setup_interrupts(ntb); 343 if (error) 344 goto out; 345 346 pci_enable_busmaster(ntb->device); 347 348out: 349 if (error != 0) 350 ntb_detach(device); 351 return (error); 352} 353 354static int 355ntb_detach(device_t device) 356{ 357 struct ntb_softc *ntb; 358 359 ntb = DEVICE2SOFTC(device); 360 callout_drain(&ntb->heartbeat_timer); 361 callout_drain(&ntb->lr_timer); 362 if (ntb->type == NTB_XEON) 363 ntb_teardown_xeon(ntb); 364 ntb_teardown_interrupts(ntb); 365 366 /* 367 * Redetect total MWs so we unmap properly -- in case we lowered the 368 * maximum to work around Xeon errata. 369 */ 370 ntb_detect_max_mw(ntb); 371 ntb_unmap_pci_bar(ntb); 372 373 return (0); 374} 375 376static int 377ntb_map_pci_bars(struct ntb_softc *ntb) 378{ 379 int rc; 380 381 ntb->bar_info[NTB_CONFIG_BAR].pci_resource_id = PCIR_BAR(0); 382 rc = map_pci_bar(ntb, map_mmr_bar, &ntb->bar_info[NTB_CONFIG_BAR]); 383 if (rc != 0) 384 return (rc); 385 386 ntb->bar_info[NTB_B2B_BAR_1].pci_resource_id = PCIR_BAR(2); 387 rc = map_pci_bar(ntb, map_memory_window_bar, 388 &ntb->bar_info[NTB_B2B_BAR_1]); 389 if (rc != 0) 390 return (rc); 391 392 ntb->bar_info[NTB_B2B_BAR_2].pci_resource_id = PCIR_BAR(4); 393 if (HAS_FEATURE(NTB_REGS_THRU_MW) && !HAS_FEATURE(NTB_SPLIT_BAR)) 394 rc = map_pci_bar(ntb, map_mmr_bar, 395 &ntb->bar_info[NTB_B2B_BAR_2]); 396 else 397 rc = map_pci_bar(ntb, map_memory_window_bar, 398 &ntb->bar_info[NTB_B2B_BAR_2]); 399 if (!HAS_FEATURE(NTB_SPLIT_BAR)) 400 return (rc); 401 402 ntb->bar_info[NTB_B2B_BAR_3].pci_resource_id = PCIR_BAR(5); 403 if (HAS_FEATURE(NTB_REGS_THRU_MW)) 404 rc = map_pci_bar(ntb, map_mmr_bar, 405 &ntb->bar_info[NTB_B2B_BAR_3]); 406 else 407 rc = map_pci_bar(ntb, map_memory_window_bar, 408 &ntb->bar_info[NTB_B2B_BAR_3]); 409 return (rc); 410} 411 412static int 413map_pci_bar(struct ntb_softc *ntb, bar_map_strategy strategy, 414 struct ntb_pci_bar_info *bar) 415{ 416 int rc; 417 418 rc = strategy(ntb, bar); 419 if (rc != 0) 420 device_printf(ntb->device, 421 "unable to allocate pci resource\n"); 422 else 423 device_printf(ntb->device, 424 "Bar size = %lx, v %p, p %p\n", 425 bar->size, bar->vbase, (void *)(bar->pbase)); 426 return (rc); 427} 428 429static int 430map_mmr_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar) 431{ 432 433 bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY, 434 &bar->pci_resource_id, RF_ACTIVE); 435 if (bar->pci_resource == NULL) 436 return (ENXIO); 437 438 save_bar_parameters(bar); 439 return (0); 440} 441 442static int 443map_memory_window_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar) 444{ 445 int rc; 446 uint8_t bar_size_bits = 0; 447 448 bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY, 449 &bar->pci_resource_id, RF_ACTIVE); 450 451 if (bar->pci_resource == NULL) 452 return (ENXIO); 453 454 save_bar_parameters(bar); 455 /* 456 * Ivytown NTB BAR sizes are misreported by the hardware due to a 457 * hardware issue. To work around this, query the size it should be 458 * configured to by the device and modify the resource to correspond to 459 * this new size. The BIOS on systems with this problem is required to 460 * provide enough address space to allow the driver to make this change 461 * safely. 462 * 463 * Ideally I could have just specified the size when I allocated the 464 * resource like: 465 * bus_alloc_resource(ntb->device, 466 * SYS_RES_MEMORY, &bar->pci_resource_id, 0ul, ~0ul, 467 * 1ul << bar_size_bits, RF_ACTIVE); 468 * but the PCI driver does not honor the size in this call, so we have 469 * to modify it after the fact. 470 */ 471 if (HAS_FEATURE(NTB_BAR_SIZE_4K)) { 472 if (bar->pci_resource_id == PCIR_BAR(2)) 473 bar_size_bits = pci_read_config(ntb->device, 474 XEON_PBAR23SZ_OFFSET, 1); 475 else 476 bar_size_bits = pci_read_config(ntb->device, 477 XEON_PBAR45SZ_OFFSET, 1); 478 479 rc = bus_adjust_resource(ntb->device, SYS_RES_MEMORY, 480 bar->pci_resource, bar->pbase, 481 bar->pbase + (1ul << bar_size_bits) - 1); 482 if (rc != 0) { 483 device_printf(ntb->device, 484 "unable to resize bar\n"); 485 return (rc); 486 } 487 488 save_bar_parameters(bar); 489 } 490 491 /* Mark bar region as write combining to improve performance. */ 492 rc = pmap_change_attr((vm_offset_t)bar->vbase, bar->size, 493 VM_MEMATTR_WRITE_COMBINING); 494 if (rc != 0) { 495 device_printf(ntb->device, 496 "unable to mark bar as WRITE_COMBINING\n"); 497 return (rc); 498 } 499 return (0); 500} 501 502static void 503ntb_unmap_pci_bar(struct ntb_softc *ntb) 504{ 505 struct ntb_pci_bar_info *current_bar; 506 int i; 507 508 for (i = 0; i < NTB_MAX_BARS; i++) { 509 current_bar = &ntb->bar_info[i]; 510 if (current_bar->pci_resource != NULL) 511 bus_release_resource(ntb->device, SYS_RES_MEMORY, 512 current_bar->pci_resource_id, 513 current_bar->pci_resource); 514 } 515} 516 517static int 518ntb_setup_xeon_msix(struct ntb_softc *ntb, uint32_t num_vectors) 519{ 520 void (*interrupt_handler)(void *); 521 void *int_arg; 522 uint32_t i; 523 int rc; 524 525 if (num_vectors < 4) 526 return (ENOSPC); 527 528 for (i = 0; i < num_vectors; i++) { 529 ntb->int_info[i].rid = i + 1; 530 ntb->int_info[i].res = bus_alloc_resource_any(ntb->device, 531 SYS_RES_IRQ, &ntb->int_info[i].rid, RF_ACTIVE); 532 if (ntb->int_info[i].res == NULL) { 533 device_printf(ntb->device, 534 "bus_alloc_resource failed\n"); 535 return (ENOMEM); 536 } 537 ntb->int_info[i].tag = NULL; 538 ntb->allocated_interrupts++; 539 if (i == num_vectors - 1) { 540 interrupt_handler = handle_xeon_event_irq; 541 int_arg = ntb; 542 } else { 543 interrupt_handler = handle_xeon_irq; 544 int_arg = &ntb->db_cb[i]; 545 } 546 rc = bus_setup_intr(ntb->device, ntb->int_info[i].res, 547 INTR_MPSAFE | INTR_TYPE_MISC, NULL, interrupt_handler, 548 int_arg, &ntb->int_info[i].tag); 549 if (rc != 0) { 550 device_printf(ntb->device, 551 "bus_setup_intr failed\n"); 552 return (ENXIO); 553 } 554 } 555 556 /* 557 * Prevent consumers from registering callbacks on the link event irq 558 * slot, from which they will never be called back. 559 */ 560 ntb->db_cb[num_vectors - 1].reserved = true; 561 ntb->max_cbs--; 562 return (0); 563} 564 565static int 566ntb_setup_soc_msix(struct ntb_softc *ntb, uint32_t num_vectors) 567{ 568 uint32_t i; 569 int rc; 570 571 for (i = 0; i < num_vectors; i++) { 572 ntb->int_info[i].rid = i + 1; 573 ntb->int_info[i].res = bus_alloc_resource_any(ntb->device, 574 SYS_RES_IRQ, &ntb->int_info[i].rid, RF_ACTIVE); 575 if (ntb->int_info[i].res == NULL) { 576 device_printf(ntb->device, 577 "bus_alloc_resource failed\n"); 578 return (ENOMEM); 579 } 580 ntb->int_info[i].tag = NULL; 581 ntb->allocated_interrupts++; 582 rc = bus_setup_intr(ntb->device, ntb->int_info[i].res, 583 INTR_MPSAFE | INTR_TYPE_MISC, NULL, handle_soc_irq, 584 &ntb->db_cb[i], &ntb->int_info[i].tag); 585 if (rc != 0) { 586 device_printf(ntb->device, "bus_setup_intr failed\n"); 587 return (ENXIO); 588 } 589 } 590 return (0); 591} 592 593/* 594 * The Linux NTB driver drops from MSI-X to legacy INTx if a unique vector 595 * cannot be allocated for each MSI-X message. JHB seems to think remapping 596 * should be okay. This tunable should enable us to test that hypothesis 597 * when someone gets their hands on some Xeon hardware. 598 */ 599static int ntb_force_remap_mode; 600SYSCTL_INT(_hw_ntb, OID_AUTO, force_remap_mode, CTLFLAG_RDTUN, 601 &ntb_force_remap_mode, 0, "If enabled, force MSI-X messages to be remapped" 602 " to a smaller number of ithreads, even if the desired number are " 603 "available"); 604 605/* 606 * In case it is NOT ok, give consumers an abort button. 607 */ 608static int ntb_prefer_intx; 609SYSCTL_INT(_hw_ntb, OID_AUTO, prefer_intx_to_remap, CTLFLAG_RDTUN, 610 &ntb_prefer_intx, 0, "If enabled, prefer to use legacy INTx mode rather " 611 "than remapping MSI-X messages over available slots (match Linux driver " 612 "behavior)"); 613 614/* 615 * Remap the desired number of MSI-X messages to available ithreads in a simple 616 * round-robin fashion. 617 */ 618static int 619ntb_remap_msix(device_t dev, uint32_t desired, uint32_t avail) 620{ 621 u_int *vectors; 622 uint32_t i; 623 int rc; 624 625 if (ntb_prefer_intx != 0) 626 return (ENXIO); 627 628 vectors = malloc(desired * sizeof(*vectors), M_NTB, M_ZERO | M_WAITOK); 629 630 for (i = 0; i < desired; i++) 631 vectors[i] = (i % avail) + 1; 632 633 rc = pci_remap_msix(dev, desired, vectors); 634 free(vectors, M_NTB); 635 return (rc); 636} 637 638static int 639ntb_setup_interrupts(struct ntb_softc *ntb) 640{ 641 uint32_t desired_vectors, num_vectors; 642 uint64_t mask; 643 int rc; 644 645 ntb->allocated_interrupts = 0; 646 647 /* 648 * On SOC, disable all interrupts. On XEON, disable all but Link 649 * Interrupt. The rest will be unmasked as callbacks are registered. 650 */ 651 mask = 0; 652 if (ntb->type == NTB_XEON) 653 mask = (1 << XEON_LINK_DB); 654 db_iowrite(ntb, ntb->reg_ofs.ldb_mask, ~mask); 655 656 num_vectors = desired_vectors = MIN(pci_msix_count(ntb->device), 657 ntb->limits.max_db_bits); 658 if (desired_vectors >= 1) { 659 rc = pci_alloc_msix(ntb->device, &num_vectors); 660 661 if (ntb_force_remap_mode != 0 && rc == 0 && 662 num_vectors == desired_vectors) 663 num_vectors--; 664 665 if (rc == 0 && num_vectors < desired_vectors) { 666 rc = ntb_remap_msix(ntb->device, desired_vectors, 667 num_vectors); 668 if (rc == 0) 669 num_vectors = desired_vectors; 670 else 671 pci_release_msi(ntb->device); 672 } 673 if (rc != 0) 674 num_vectors = 1; 675 } else 676 num_vectors = 1; 677 678 /* 679 * If allocating MSI-X interrupts succeeds, limit callbacks to the 680 * number of MSI-X slots available. 681 */ 682 ntb_create_callbacks(ntb, num_vectors); 683 684 if (ntb->type == NTB_XEON) 685 rc = ntb_setup_xeon_msix(ntb, num_vectors); 686 else 687 rc = ntb_setup_soc_msix(ntb, num_vectors); 688 if (rc != 0) { 689 device_printf(ntb->device, 690 "Error allocating MSI-X interrupts: %d\n", rc); 691 692 /* 693 * If allocating MSI-X interrupts failed and we're forced to 694 * use legacy INTx anyway, the only limit on individual 695 * callbacks is the number of doorbell bits. 696 * 697 * CEM: This seems odd to me but matches the behavior of the 698 * Linux driver ca. September 2013 699 */ 700 ntb_free_callbacks(ntb); 701 ntb_create_callbacks(ntb, ntb->limits.max_db_bits); 702 } 703 704 if (ntb->type == NTB_XEON && rc == ENOSPC) 705 rc = ntb_setup_legacy_interrupt(ntb); 706 707 return (rc); 708} 709 710static int 711ntb_setup_legacy_interrupt(struct ntb_softc *ntb) 712{ 713 int rc; 714 715 ntb->int_info[0].rid = 0; 716 ntb->int_info[0].res = bus_alloc_resource_any(ntb->device, SYS_RES_IRQ, 717 &ntb->int_info[0].rid, RF_SHAREABLE|RF_ACTIVE); 718 if (ntb->int_info[0].res == NULL) { 719 device_printf(ntb->device, "bus_alloc_resource failed\n"); 720 return (ENOMEM); 721 } 722 723 ntb->int_info[0].tag = NULL; 724 ntb->allocated_interrupts = 1; 725 726 rc = bus_setup_intr(ntb->device, ntb->int_info[0].res, 727 INTR_MPSAFE | INTR_TYPE_MISC, NULL, ntb_handle_legacy_interrupt, 728 ntb, &ntb->int_info[0].tag); 729 if (rc != 0) { 730 device_printf(ntb->device, "bus_setup_intr failed\n"); 731 return (ENXIO); 732 } 733 734 return (0); 735} 736 737static void 738ntb_teardown_interrupts(struct ntb_softc *ntb) 739{ 740 struct ntb_int_info *current_int; 741 int i; 742 743 for (i = 0; i < ntb->allocated_interrupts; i++) { 744 current_int = &ntb->int_info[i]; 745 if (current_int->tag != NULL) 746 bus_teardown_intr(ntb->device, current_int->res, 747 current_int->tag); 748 749 if (current_int->res != NULL) 750 bus_release_resource(ntb->device, SYS_RES_IRQ, 751 rman_get_rid(current_int->res), current_int->res); 752 } 753 754 ntb_free_callbacks(ntb); 755 pci_release_msi(ntb->device); 756} 757 758/* 759 * Doorbell register and mask are 64-bit on SoC, 16-bit on Xeon. Abstract it 760 * out to make code clearer. 761 */ 762static uint64_t 763db_ioread(struct ntb_softc *ntb, uint32_t regoff) 764{ 765 766 if (ntb->type == NTB_SOC) 767 return (ntb_reg_read(8, regoff)); 768 769 KASSERT(ntb->type == NTB_XEON, ("bad ntb type")); 770 771 return (ntb_reg_read(2, regoff)); 772} 773 774static void 775db_iowrite(struct ntb_softc *ntb, uint32_t regoff, uint64_t val) 776{ 777 778 if (ntb->type == NTB_SOC) { 779 ntb_reg_write(8, regoff, val); 780 return; 781 } 782 783 KASSERT(ntb->type == NTB_XEON, ("bad ntb type")); 784 ntb_reg_write(2, regoff, (uint16_t)val); 785} 786 787static void 788mask_ldb_interrupt(struct ntb_softc *ntb, unsigned int idx) 789{ 790 uint64_t mask; 791 792 mask = db_ioread(ntb, ntb->reg_ofs.ldb_mask); 793 mask |= 1 << (idx * ntb->bits_per_vector); 794 db_iowrite(ntb, ntb->reg_ofs.ldb_mask, mask); 795} 796 797static void 798unmask_ldb_interrupt(struct ntb_softc *ntb, unsigned int idx) 799{ 800 uint64_t mask; 801 802 mask = db_ioread(ntb, ntb->reg_ofs.ldb_mask); 803 mask &= ~(1 << (idx * ntb->bits_per_vector)); 804 db_iowrite(ntb, ntb->reg_ofs.ldb_mask, mask); 805} 806 807static void 808handle_soc_irq(void *arg) 809{ 810 struct ntb_db_cb *db_cb = arg; 811 struct ntb_softc *ntb = db_cb->ntb; 812 813 db_iowrite(ntb, ntb->reg_ofs.ldb, (uint64_t) 1 << db_cb->db_num); 814 815 if (db_cb->callback != NULL) { 816 mask_ldb_interrupt(ntb, db_cb->db_num); 817 callout_reset(&db_cb->irq_work, 0, ntb_irq_work, db_cb); 818 } 819} 820 821static void 822handle_xeon_irq(void *arg) 823{ 824 struct ntb_db_cb *db_cb = arg; 825 struct ntb_softc *ntb = db_cb->ntb; 826 827 /* 828 * On Xeon, there are 16 bits in the interrupt register 829 * but only 4 vectors. So, 5 bits are assigned to the first 3 830 * vectors, with the 4th having a single bit for link 831 * interrupts. 832 */ 833 db_iowrite(ntb, ntb->reg_ofs.ldb, 834 ((1 << ntb->bits_per_vector) - 1) << 835 (db_cb->db_num * ntb->bits_per_vector)); 836 837 if (db_cb->callback != NULL) { 838 mask_ldb_interrupt(ntb, db_cb->db_num); 839 callout_reset(&db_cb->irq_work, 0, ntb_irq_work, db_cb); 840 } 841} 842 843/* Since we do not have a HW doorbell in SOC, this is only used in JF/JT */ 844static void 845handle_xeon_event_irq(void *arg) 846{ 847 struct ntb_softc *ntb = arg; 848 int rc; 849 850 rc = ntb_check_link_status(ntb); 851 if (rc != 0) 852 device_printf(ntb->device, "Error determining link status\n"); 853 854 /* bit 15 is always the link bit */ 855 db_iowrite(ntb, ntb->reg_ofs.ldb, 1 << XEON_LINK_DB); 856} 857 858static void 859ntb_handle_legacy_interrupt(void *arg) 860{ 861 struct ntb_softc *ntb = arg; 862 unsigned int i; 863 uint64_t ldb; 864 865 ldb = db_ioread(ntb, ntb->reg_ofs.ldb); 866 867 if (ntb->type == NTB_XEON && (ldb & XEON_DB_HW_LINK) != 0) { 868 handle_xeon_event_irq(ntb); 869 ldb &= ~XEON_DB_HW_LINK; 870 } 871 872 while (ldb != 0) { 873 i = ffs(ldb); 874 ldb &= ldb - 1; 875 if (ntb->type == NTB_SOC) 876 handle_soc_irq(&ntb->db_cb[i]); 877 else 878 handle_xeon_irq(&ntb->db_cb[i]); 879 } 880} 881 882static int 883ntb_create_callbacks(struct ntb_softc *ntb, uint32_t num_vectors) 884{ 885 uint32_t i; 886 887 ntb->max_cbs = num_vectors; 888 ntb->db_cb = malloc(num_vectors * sizeof(*ntb->db_cb), M_NTB, 889 M_ZERO | M_WAITOK); 890 for (i = 0; i < num_vectors; i++) { 891 ntb->db_cb[i].db_num = i; 892 ntb->db_cb[i].ntb = ntb; 893 } 894 895 return (0); 896} 897 898static void 899ntb_free_callbacks(struct ntb_softc *ntb) 900{ 901 uint8_t i; 902 903 for (i = 0; i < ntb->max_cbs; i++) 904 ntb_unregister_db_callback(ntb, i); 905 906 free(ntb->db_cb, M_NTB); 907 ntb->max_cbs = 0; 908} 909 910static struct ntb_hw_info * 911ntb_get_device_info(uint32_t device_id) 912{ 913 struct ntb_hw_info *ep = pci_ids; 914 915 while (ep->device_id) { 916 if (ep->device_id == device_id) 917 return (ep); 918 ++ep; 919 } 920 return (NULL); 921} 922 923static void 924ntb_teardown_xeon(struct ntb_softc *ntb) 925{ 926 927 ntb_hw_link_down(ntb); 928} 929 930static void 931ntb_detect_max_mw(struct ntb_softc *ntb) 932{ 933 934 if (ntb->type == NTB_SOC) { 935 ntb->limits.max_mw = SOC_MAX_MW; 936 return; 937 } 938 939 if (HAS_FEATURE(NTB_SPLIT_BAR)) 940 ntb->limits.max_mw = XEON_HSXSPLIT_MAX_MW; 941 else 942 ntb->limits.max_mw = XEON_SNB_MAX_MW; 943} 944 945static int 946ntb_detect_xeon(struct ntb_softc *ntb) 947{ 948 uint8_t ppd, conn_type; 949 950 ppd = pci_read_config(ntb->device, NTB_PPD_OFFSET, 1); 951 ntb->ppd = ppd; 952 953 if ((ppd & XEON_PPD_DEV_TYPE) != 0) 954 ntb->dev_type = NTB_DEV_USD; 955 else 956 ntb->dev_type = NTB_DEV_DSD; 957 958 if ((ppd & XEON_PPD_SPLIT_BAR) != 0) 959 ntb->features |= NTB_SPLIT_BAR; 960 961 conn_type = ppd & XEON_PPD_CONN_TYPE; 962 switch (conn_type) { 963 case NTB_CONN_B2B: 964 ntb->conn_type = conn_type; 965 break; 966 case NTB_CONN_RP: 967 case NTB_CONN_TRANSPARENT: 968 default: 969 device_printf(ntb->device, "Unsupported connection type: %u\n", 970 (unsigned)conn_type); 971 return (ENXIO); 972 } 973 return (0); 974} 975 976static int 977ntb_detect_soc(struct ntb_softc *ntb) 978{ 979 uint32_t ppd, conn_type; 980 981 ppd = pci_read_config(ntb->device, NTB_PPD_OFFSET, 4); 982 ntb->ppd = ppd; 983 984 if ((ppd & SOC_PPD_DEV_TYPE) != 0) 985 ntb->dev_type = NTB_DEV_DSD; 986 else 987 ntb->dev_type = NTB_DEV_USD; 988 989 conn_type = (ppd & SOC_PPD_CONN_TYPE) >> 8; 990 switch (conn_type) { 991 case NTB_CONN_B2B: 992 ntb->conn_type = conn_type; 993 break; 994 default: 995 device_printf(ntb->device, "Unsupported NTB configuration\n"); 996 return (ENXIO); 997 } 998 return (0); 999} 1000 1001static int 1002ntb_setup_xeon(struct ntb_softc *ntb) 1003{ 1004 1005 ntb->reg_ofs.ldb = XEON_PDOORBELL_OFFSET; 1006 ntb->reg_ofs.ldb_mask = XEON_PDBMSK_OFFSET; 1007 ntb->reg_ofs.spad_local = XEON_SPAD_OFFSET; 1008 ntb->reg_ofs.bar2_xlat = XEON_SBAR2XLAT_OFFSET; 1009 ntb->reg_ofs.bar4_xlat = XEON_SBAR4XLAT_OFFSET; 1010 if (HAS_FEATURE(NTB_SPLIT_BAR)) 1011 ntb->reg_ofs.bar5_xlat = XEON_SBAR5XLAT_OFFSET; 1012 1013 switch (ntb->conn_type) { 1014 case NTB_CONN_B2B: 1015 /* 1016 * reg_ofs.rdb and reg_ofs.spad_remote are effectively ignored 1017 * with the NTB_REGS_THRU_MW errata mode enabled. (See 1018 * ntb_ring_doorbell() and ntb_read/write_remote_spad().) 1019 */ 1020 ntb->reg_ofs.rdb = XEON_B2B_DOORBELL_OFFSET; 1021 ntb->reg_ofs.spad_remote = XEON_B2B_SPAD_OFFSET; 1022 1023 ntb->limits.max_spads = XEON_MAX_SPADS; 1024 break; 1025 1026 case NTB_CONN_RP: 1027 /* 1028 * Every Xeon today needs NTB_REGS_THRU_MW, so punt on RP for 1029 * now. 1030 */ 1031 KASSERT(HAS_FEATURE(NTB_REGS_THRU_MW), 1032 ("Xeon without MW errata unimplemented")); 1033 device_printf(ntb->device, 1034 "NTB-RP disabled to due hardware errata.\n"); 1035 return (ENXIO); 1036 1037 case NTB_CONN_TRANSPARENT: 1038 default: 1039 device_printf(ntb->device, "Connection type %d not supported\n", 1040 ntb->conn_type); 1041 return (ENXIO); 1042 } 1043 1044 /* 1045 * There is a Xeon hardware errata related to writes to SDOORBELL or 1046 * B2BDOORBELL in conjunction with inbound access to NTB MMIO space, 1047 * which may hang the system. To workaround this use the second memory 1048 * window to access the interrupt and scratch pad registers on the 1049 * remote system. 1050 * 1051 * There is another HW errata on the limit registers -- they can only 1052 * be written when the base register is (?)4GB aligned and < 32-bit. 1053 * This should already be the case based on the driver defaults, but 1054 * write the limit registers first just in case. 1055 */ 1056 if (HAS_FEATURE(NTB_REGS_THRU_MW)) { 1057 /* 1058 * Set the Limit register to 4k, the minimum size, to prevent 1059 * an illegal access. 1060 * 1061 * XXX: Should this be PBAR5LMT / get_mw_size(, max_mw - 1)? 1062 */ 1063 ntb_reg_write(8, XEON_PBAR4LMT_OFFSET, 1064 ntb_get_mw_size(ntb, 1) + 0x1000); 1065 /* Reserve the last MW for mapping remote spad */ 1066 ntb->limits.max_mw--; 1067 } else 1068 /* 1069 * Disable the limit register, just in case it is set to 1070 * something silly. A 64-bit write will also clear PBAR5LMT in 1071 * split-bar mode, and this is desired. 1072 */ 1073 ntb_reg_write(8, XEON_PBAR4LMT_OFFSET, 0); 1074 1075 ntb->reg_ofs.lnk_cntl = XEON_NTBCNTL_OFFSET; 1076 ntb->reg_ofs.lnk_stat = XEON_LINK_STATUS_OFFSET; 1077 ntb->reg_ofs.spci_cmd = XEON_PCICMD_OFFSET; 1078 1079 ntb->limits.max_db_bits = XEON_MAX_DB_BITS; 1080 ntb->limits.msix_cnt = XEON_MSIX_CNT; 1081 ntb->bits_per_vector = XEON_DB_BITS_PER_VEC; 1082 1083 /* 1084 * HW Errata on bit 14 of b2bdoorbell register. Writes will not be 1085 * mirrored to the remote system. Shrink the number of bits by one, 1086 * since bit 14 is the last bit. 1087 * 1088 * On REGS_THRU_MW errata mode, we don't use the b2bdoorbell register 1089 * anyway. Nor for non-B2B connection types. 1090 */ 1091 if (HAS_FEATURE(NTB_B2BDOORBELL_BIT14) && 1092 !HAS_FEATURE(NTB_REGS_THRU_MW) && 1093 ntb->conn_type == NTB_CONN_B2B) 1094 ntb->limits.max_db_bits = XEON_MAX_DB_BITS - 1; 1095 1096 configure_xeon_secondary_side_bars(ntb); 1097 1098 /* Enable Bus Master and Memory Space on the secondary side */ 1099 if (ntb->conn_type == NTB_CONN_B2B) 1100 ntb_reg_write(2, ntb->reg_ofs.spci_cmd, 1101 PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); 1102 1103 /* Enable link training */ 1104 ntb_hw_link_up(ntb); 1105 1106 return (0); 1107} 1108 1109static int 1110ntb_setup_soc(struct ntb_softc *ntb) 1111{ 1112 1113 KASSERT(ntb->conn_type == NTB_CONN_B2B, 1114 ("Unsupported NTB configuration (%d)\n", ntb->conn_type)); 1115 1116 /* Initiate PCI-E link training */ 1117 pci_write_config(ntb->device, NTB_PPD_OFFSET, 1118 ntb->ppd | SOC_PPD_INIT_LINK, 4); 1119 1120 ntb->reg_ofs.ldb = SOC_PDOORBELL_OFFSET; 1121 ntb->reg_ofs.ldb_mask = SOC_PDBMSK_OFFSET; 1122 ntb->reg_ofs.rdb = SOC_B2B_DOORBELL_OFFSET; 1123 ntb->reg_ofs.bar2_xlat = SOC_SBAR2XLAT_OFFSET; 1124 ntb->reg_ofs.bar4_xlat = SOC_SBAR4XLAT_OFFSET; 1125 ntb->reg_ofs.lnk_cntl = SOC_NTBCNTL_OFFSET; 1126 ntb->reg_ofs.lnk_stat = SOC_LINK_STATUS_OFFSET; 1127 ntb->reg_ofs.spad_local = SOC_SPAD_OFFSET; 1128 ntb->reg_ofs.spad_remote = SOC_B2B_SPAD_OFFSET; 1129 ntb->reg_ofs.spci_cmd = SOC_PCICMD_OFFSET; 1130 1131 ntb->limits.max_spads = SOC_MAX_SPADS; 1132 ntb->limits.max_db_bits = SOC_MAX_DB_BITS; 1133 ntb->limits.msix_cnt = SOC_MSIX_CNT; 1134 ntb->bits_per_vector = SOC_DB_BITS_PER_VEC; 1135 1136 /* 1137 * FIXME - MSI-X bug on early SOC HW, remove once internal issue is 1138 * resolved. Mask transaction layer internal parity errors. 1139 */ 1140 pci_write_config(ntb->device, 0xFC, 0x4, 4); 1141 1142 configure_soc_secondary_side_bars(ntb); 1143 1144 /* Enable Bus Master and Memory Space on the secondary side */ 1145 ntb_reg_write(2, ntb->reg_ofs.spci_cmd, 1146 PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); 1147 1148 callout_reset(&ntb->heartbeat_timer, 0, ntb_handle_heartbeat, ntb); 1149 1150 return (0); 1151} 1152 1153static void 1154configure_soc_secondary_side_bars(struct ntb_softc *ntb) 1155{ 1156 1157 if (ntb->dev_type == NTB_DEV_USD) { 1158 ntb_reg_write(8, SOC_PBAR2XLAT_OFFSET, MBAR23_DSD_ADDR); 1159 ntb_reg_write(8, SOC_PBAR4XLAT_OFFSET, MBAR4_DSD_ADDR); 1160 ntb_reg_write(8, SOC_MBAR23_OFFSET, MBAR23_USD_ADDR); 1161 ntb_reg_write(8, SOC_MBAR45_OFFSET, MBAR4_USD_ADDR); 1162 } else { 1163 ntb_reg_write(8, SOC_PBAR2XLAT_OFFSET, MBAR23_USD_ADDR); 1164 ntb_reg_write(8, SOC_PBAR4XLAT_OFFSET, MBAR4_USD_ADDR); 1165 ntb_reg_write(8, SOC_MBAR23_OFFSET, MBAR23_DSD_ADDR); 1166 ntb_reg_write(8, SOC_MBAR45_OFFSET, MBAR4_DSD_ADDR); 1167 } 1168} 1169 1170static void 1171configure_xeon_secondary_side_bars(struct ntb_softc *ntb) 1172{ 1173 1174 if (ntb->dev_type == NTB_DEV_USD) { 1175 ntb_reg_write(8, XEON_PBAR2XLAT_OFFSET, MBAR23_DSD_ADDR); 1176 if (HAS_FEATURE(NTB_REGS_THRU_MW)) 1177 ntb_reg_write(8, XEON_PBAR4XLAT_OFFSET, 1178 MBAR01_DSD_ADDR); 1179 else { 1180 if (HAS_FEATURE(NTB_SPLIT_BAR)) { 1181 ntb_reg_write(4, XEON_PBAR4XLAT_OFFSET, 1182 MBAR4_DSD_ADDR); 1183 ntb_reg_write(4, XEON_PBAR5XLAT_OFFSET, 1184 MBAR5_DSD_ADDR); 1185 } else 1186 ntb_reg_write(8, XEON_PBAR4XLAT_OFFSET, 1187 MBAR4_DSD_ADDR); 1188 /* 1189 * B2B_XLAT_OFFSET is a 64-bit register but can only be 1190 * written 32 bits at a time. 1191 */ 1192 ntb_reg_write(4, XEON_B2B_XLAT_OFFSETL, 1193 MBAR01_DSD_ADDR & 0xffffffff); 1194 ntb_reg_write(4, XEON_B2B_XLAT_OFFSETU, 1195 MBAR01_DSD_ADDR >> 32); 1196 } 1197 ntb_reg_write(8, XEON_SBAR0BASE_OFFSET, MBAR01_USD_ADDR); 1198 ntb_reg_write(8, XEON_SBAR2BASE_OFFSET, MBAR23_USD_ADDR); 1199 if (HAS_FEATURE(NTB_SPLIT_BAR)) { 1200 ntb_reg_write(4, XEON_SBAR4BASE_OFFSET, MBAR4_USD_ADDR); 1201 ntb_reg_write(4, XEON_SBAR5BASE_OFFSET, MBAR5_USD_ADDR); 1202 } else 1203 ntb_reg_write(8, XEON_SBAR4BASE_OFFSET, MBAR4_USD_ADDR); 1204 } else { 1205 ntb_reg_write(8, XEON_PBAR2XLAT_OFFSET, MBAR23_USD_ADDR); 1206 if (HAS_FEATURE(NTB_REGS_THRU_MW)) 1207 ntb_reg_write(8, XEON_PBAR4XLAT_OFFSET, 1208 MBAR01_USD_ADDR); 1209 else { 1210 if (HAS_FEATURE(NTB_SPLIT_BAR)) { 1211 ntb_reg_write(4, XEON_PBAR4XLAT_OFFSET, 1212 MBAR4_USD_ADDR); 1213 ntb_reg_write(4, XEON_PBAR5XLAT_OFFSET, 1214 MBAR5_USD_ADDR); 1215 } else 1216 ntb_reg_write(8, XEON_PBAR4XLAT_OFFSET, 1217 MBAR4_USD_ADDR); 1218 /* 1219 * B2B_XLAT_OFFSET is a 64-bit register but can only be 1220 * written 32 bits at a time. 1221 */ 1222 ntb_reg_write(4, XEON_B2B_XLAT_OFFSETL, 1223 MBAR01_USD_ADDR & 0xffffffff); 1224 ntb_reg_write(4, XEON_B2B_XLAT_OFFSETU, 1225 MBAR01_USD_ADDR >> 32); 1226 } 1227 ntb_reg_write(8, XEON_SBAR0BASE_OFFSET, MBAR01_DSD_ADDR); 1228 ntb_reg_write(8, XEON_SBAR2BASE_OFFSET, MBAR23_DSD_ADDR); 1229 if (HAS_FEATURE(NTB_SPLIT_BAR)) { 1230 ntb_reg_write(4, XEON_SBAR4BASE_OFFSET, 1231 MBAR4_DSD_ADDR); 1232 ntb_reg_write(4, XEON_SBAR5BASE_OFFSET, 1233 MBAR5_DSD_ADDR); 1234 } else 1235 ntb_reg_write(8, XEON_SBAR4BASE_OFFSET, 1236 MBAR4_DSD_ADDR); 1237 } 1238} 1239 1240/* SOC does not have link status interrupt, poll on that platform */ 1241static void 1242ntb_handle_heartbeat(void *arg) 1243{ 1244 struct ntb_softc *ntb = arg; 1245 uint32_t status32; 1246 int rc; 1247 1248 rc = ntb_check_link_status(ntb); 1249 if (rc != 0) 1250 device_printf(ntb->device, 1251 "Error determining link status\n"); 1252 1253 /* Check to see if a link error is the cause of the link down */ 1254 if (ntb->link_status == NTB_LINK_DOWN) { 1255 status32 = ntb_reg_read(4, SOC_LTSSMSTATEJMP_OFFSET); 1256 if ((status32 & SOC_LTSSMSTATEJMP_FORCEDETECT) != 0) { 1257 callout_reset(&ntb->lr_timer, 0, recover_soc_link, 1258 ntb); 1259 return; 1260 } 1261 } 1262 1263 callout_reset(&ntb->heartbeat_timer, NTB_HB_TIMEOUT * hz, 1264 ntb_handle_heartbeat, ntb); 1265} 1266 1267static void 1268soc_perform_link_restart(struct ntb_softc *ntb) 1269{ 1270 uint32_t status; 1271 1272 /* Driver resets the NTB ModPhy lanes - magic! */ 1273 ntb_reg_write(1, SOC_MODPHY_PCSREG6, 0xe0); 1274 ntb_reg_write(1, SOC_MODPHY_PCSREG4, 0x40); 1275 ntb_reg_write(1, SOC_MODPHY_PCSREG4, 0x60); 1276 ntb_reg_write(1, SOC_MODPHY_PCSREG6, 0x60); 1277 1278 /* Driver waits 100ms to allow the NTB ModPhy to settle */ 1279 pause("ModPhy", hz / 10); 1280 1281 /* Clear AER Errors, write to clear */ 1282 status = ntb_reg_read(4, SOC_ERRCORSTS_OFFSET); 1283 status &= PCIM_AER_COR_REPLAY_ROLLOVER; 1284 ntb_reg_write(4, SOC_ERRCORSTS_OFFSET, status); 1285 1286 /* Clear unexpected electrical idle event in LTSSM, write to clear */ 1287 status = ntb_reg_read(4, SOC_LTSSMERRSTS0_OFFSET); 1288 status |= SOC_LTSSMERRSTS0_UNEXPECTEDEI; 1289 ntb_reg_write(4, SOC_LTSSMERRSTS0_OFFSET, status); 1290 1291 /* Clear DeSkew Buffer error, write to clear */ 1292 status = ntb_reg_read(4, SOC_DESKEWSTS_OFFSET); 1293 status |= SOC_DESKEWSTS_DBERR; 1294 ntb_reg_write(4, SOC_DESKEWSTS_OFFSET, status); 1295 1296 status = ntb_reg_read(4, SOC_IBSTERRRCRVSTS0_OFFSET); 1297 status &= SOC_IBIST_ERR_OFLOW; 1298 ntb_reg_write(4, SOC_IBSTERRRCRVSTS0_OFFSET, status); 1299 1300 /* Releases the NTB state machine to allow the link to retrain */ 1301 status = ntb_reg_read(4, SOC_LTSSMSTATEJMP_OFFSET); 1302 status &= ~SOC_LTSSMSTATEJMP_FORCEDETECT; 1303 ntb_reg_write(4, SOC_LTSSMSTATEJMP_OFFSET, status); 1304} 1305 1306static void 1307ntb_handle_link_event(struct ntb_softc *ntb, int link_state) 1308{ 1309 enum ntb_hw_event event; 1310 uint16_t status; 1311 1312 if (ntb->link_status == link_state) 1313 return; 1314 1315 if (link_state == NTB_LINK_UP) { 1316 device_printf(ntb->device, "Link Up\n"); 1317 ntb->link_status = NTB_LINK_UP; 1318 event = NTB_EVENT_HW_LINK_UP; 1319 1320 if (ntb->type == NTB_SOC || 1321 ntb->conn_type == NTB_CONN_TRANSPARENT) 1322 status = ntb_reg_read(2, ntb->reg_ofs.lnk_stat); 1323 else 1324 status = pci_read_config(ntb->device, 1325 XEON_LINK_STATUS_OFFSET, 2); 1326 ntb->link_width = (status & NTB_LINK_WIDTH_MASK) >> 4; 1327 ntb->link_speed = (status & NTB_LINK_SPEED_MASK); 1328 device_printf(ntb->device, "Link Width %d, Link Speed %d\n", 1329 ntb->link_width, ntb->link_speed); 1330 callout_reset(&ntb->heartbeat_timer, NTB_HB_TIMEOUT * hz, 1331 ntb_handle_heartbeat, ntb); 1332 } else { 1333 device_printf(ntb->device, "Link Down\n"); 1334 ntb->link_status = NTB_LINK_DOWN; 1335 event = NTB_EVENT_HW_LINK_DOWN; 1336 /* Do not modify link width/speed, we need it in link recovery */ 1337 } 1338 1339 /* notify the upper layer if we have an event change */ 1340 if (ntb->event_cb != NULL) 1341 ntb->event_cb(ntb->ntb_transport, event); 1342} 1343 1344static void 1345ntb_hw_link_up(struct ntb_softc *ntb) 1346{ 1347 uint32_t cntl; 1348 1349 if (ntb->conn_type == NTB_CONN_TRANSPARENT) { 1350 ntb_handle_link_event(ntb, NTB_LINK_UP); 1351 return; 1352 } 1353 1354 cntl = ntb_reg_read(4, ntb->reg_ofs.lnk_cntl); 1355 cntl &= ~(NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK); 1356 cntl |= NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP; 1357 cntl |= NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP; 1358 if (HAS_FEATURE(NTB_SPLIT_BAR)) 1359 cntl |= NTB_CNTL_P2S_BAR5_SNOOP | NTB_CNTL_S2P_BAR5_SNOOP; 1360 ntb_reg_write(4, ntb->reg_ofs.lnk_cntl, cntl); 1361} 1362 1363static void 1364ntb_hw_link_down(struct ntb_softc *ntb) 1365{ 1366 uint32_t cntl; 1367 1368 if (ntb->conn_type == NTB_CONN_TRANSPARENT) { 1369 ntb_handle_link_event(ntb, NTB_LINK_DOWN); 1370 return; 1371 } 1372 1373 cntl = ntb_reg_read(4, ntb->reg_ofs.lnk_cntl); 1374 cntl &= ~(NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP); 1375 cntl &= ~(NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP); 1376 if (HAS_FEATURE(NTB_SPLIT_BAR)) 1377 cntl &= ~(NTB_CNTL_P2S_BAR5_SNOOP | NTB_CNTL_S2P_BAR5_SNOOP); 1378 cntl |= NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK; 1379 ntb_reg_write(4, ntb->reg_ofs.lnk_cntl, cntl); 1380} 1381 1382static void 1383recover_soc_link(void *arg) 1384{ 1385 struct ntb_softc *ntb = arg; 1386 uint8_t speed, width; 1387 uint32_t status32; 1388 uint16_t status16; 1389 1390 soc_perform_link_restart(ntb); 1391 1392 /* 1393 * There is a potential race between the 2 NTB devices recovering at 1394 * the same time. If the times are the same, the link will not recover 1395 * and the driver will be stuck in this loop forever. Add a random 1396 * interval to the recovery time to prevent this race. 1397 */ 1398 status32 = arc4random() % SOC_LINK_RECOVERY_TIME; 1399 pause("Link", (SOC_LINK_RECOVERY_TIME + status32) * hz / 1000); 1400 1401 status32 = ntb_reg_read(4, SOC_LTSSMSTATEJMP_OFFSET); 1402 if ((status32 & SOC_LTSSMSTATEJMP_FORCEDETECT) != 0) 1403 goto retry; 1404 1405 status32 = ntb_reg_read(4, SOC_IBSTERRRCRVSTS0_OFFSET); 1406 if ((status32 & SOC_IBIST_ERR_OFLOW) != 0) 1407 goto retry; 1408 1409 status32 = ntb_reg_read(4, ntb->reg_ofs.lnk_cntl); 1410 if ((status32 & SOC_CNTL_LINK_DOWN) != 0) 1411 goto out; 1412 1413 status16 = ntb_reg_read(2, ntb->reg_ofs.lnk_stat); 1414 width = (status16 & NTB_LINK_WIDTH_MASK) >> 4; 1415 speed = (status16 & NTB_LINK_SPEED_MASK); 1416 if (ntb->link_width != width || ntb->link_speed != speed) 1417 goto retry; 1418 1419out: 1420 callout_reset(&ntb->heartbeat_timer, NTB_HB_TIMEOUT * hz, 1421 ntb_handle_heartbeat, ntb); 1422 return; 1423 1424retry: 1425 callout_reset(&ntb->lr_timer, NTB_HB_TIMEOUT * hz, recover_soc_link, 1426 ntb); 1427} 1428 1429static int 1430ntb_check_link_status(struct ntb_softc *ntb) 1431{ 1432 int link_state; 1433 uint32_t ntb_cntl; 1434 uint16_t status; 1435 1436 if (ntb->type == NTB_SOC) { 1437 ntb_cntl = ntb_reg_read(4, ntb->reg_ofs.lnk_cntl); 1438 if ((ntb_cntl & SOC_CNTL_LINK_DOWN) != 0) 1439 link_state = NTB_LINK_DOWN; 1440 else 1441 link_state = NTB_LINK_UP; 1442 } else { 1443 status = pci_read_config(ntb->device, XEON_LINK_STATUS_OFFSET, 1444 2); 1445 1446 if ((status & NTB_LINK_STATUS_ACTIVE) != 0) 1447 link_state = NTB_LINK_UP; 1448 else 1449 link_state = NTB_LINK_DOWN; 1450 } 1451 1452 ntb_handle_link_event(ntb, link_state); 1453 1454 return (0); 1455} 1456 1457/** 1458 * ntb_register_event_callback() - register event callback 1459 * @ntb: pointer to ntb_softc instance 1460 * @func: callback function to register 1461 * 1462 * This function registers a callback for any HW driver events such as link 1463 * up/down, power management notices and etc. 1464 * 1465 * RETURNS: An appropriate ERRNO error value on error, or zero for success. 1466 */ 1467int 1468ntb_register_event_callback(struct ntb_softc *ntb, ntb_event_callback func) 1469{ 1470 1471 if (ntb->event_cb != NULL) 1472 return (EINVAL); 1473 1474 ntb->event_cb = func; 1475 1476 return (0); 1477} 1478 1479/** 1480 * ntb_unregister_event_callback() - unregisters the event callback 1481 * @ntb: pointer to ntb_softc instance 1482 * 1483 * This function unregisters the existing callback from transport 1484 */ 1485void 1486ntb_unregister_event_callback(struct ntb_softc *ntb) 1487{ 1488 1489 ntb->event_cb = NULL; 1490} 1491 1492static void 1493ntb_irq_work(void *arg) 1494{ 1495 struct ntb_db_cb *db_cb = arg; 1496 struct ntb_softc *ntb; 1497 int rc; 1498 1499 rc = db_cb->callback(db_cb->data, db_cb->db_num); 1500 /* Poll if forward progress was made. */ 1501 if (rc != 0) { 1502 callout_reset(&db_cb->irq_work, 0, ntb_irq_work, db_cb); 1503 return; 1504 } 1505 1506 /* Unmask interrupt if no progress was made. */ 1507 ntb = db_cb->ntb; 1508 unmask_ldb_interrupt(ntb, db_cb->db_num); 1509} 1510 1511/** 1512 * ntb_register_db_callback() - register a callback for doorbell interrupt 1513 * @ntb: pointer to ntb_softc instance 1514 * @idx: doorbell index to register callback, zero based 1515 * @data: pointer to be returned to caller with every callback 1516 * @func: callback function to register 1517 * 1518 * This function registers a callback function for the doorbell interrupt 1519 * on the primary side. The function will unmask the doorbell as well to 1520 * allow interrupt. 1521 * 1522 * RETURNS: An appropriate ERRNO error value on error, or zero for success. 1523 */ 1524int 1525ntb_register_db_callback(struct ntb_softc *ntb, unsigned int idx, void *data, 1526 ntb_db_callback func) 1527{ 1528 struct ntb_db_cb *db_cb = &ntb->db_cb[idx]; 1529 1530 if (idx >= ntb->max_cbs || db_cb->callback != NULL || db_cb->reserved) { 1531 device_printf(ntb->device, "Invalid Index.\n"); 1532 return (EINVAL); 1533 } 1534 1535 db_cb->callback = func; 1536 db_cb->data = data; 1537 callout_init(&db_cb->irq_work, 1); 1538 1539 unmask_ldb_interrupt(ntb, idx); 1540 1541 return (0); 1542} 1543 1544/** 1545 * ntb_unregister_db_callback() - unregister a callback for doorbell interrupt 1546 * @ntb: pointer to ntb_softc instance 1547 * @idx: doorbell index to register callback, zero based 1548 * 1549 * This function unregisters a callback function for the doorbell interrupt 1550 * on the primary side. The function will also mask the said doorbell. 1551 */ 1552void 1553ntb_unregister_db_callback(struct ntb_softc *ntb, unsigned int idx) 1554{ 1555 1556 if (idx >= ntb->max_cbs || ntb->db_cb[idx].callback == NULL) 1557 return; 1558 1559 mask_ldb_interrupt(ntb, idx); 1560 1561 callout_drain(&ntb->db_cb[idx].irq_work); 1562 ntb->db_cb[idx].callback = NULL; 1563} 1564 1565/** 1566 * ntb_find_transport() - find the transport pointer 1567 * @transport: pointer to pci device 1568 * 1569 * Given the pci device pointer, return the transport pointer passed in when 1570 * the transport attached when it was inited. 1571 * 1572 * RETURNS: pointer to transport. 1573 */ 1574void * 1575ntb_find_transport(struct ntb_softc *ntb) 1576{ 1577 1578 return (ntb->ntb_transport); 1579} 1580 1581/** 1582 * ntb_register_transport() - Register NTB transport with NTB HW driver 1583 * @transport: transport identifier 1584 * 1585 * This function allows a transport to reserve the hardware driver for 1586 * NTB usage. 1587 * 1588 * RETURNS: pointer to ntb_softc, NULL on error. 1589 */ 1590struct ntb_softc * 1591ntb_register_transport(struct ntb_softc *ntb, void *transport) 1592{ 1593 1594 /* 1595 * TODO: when we have more than one transport, we will need to rewrite 1596 * this to prevent race conditions 1597 */ 1598 if (ntb->ntb_transport != NULL) 1599 return (NULL); 1600 1601 ntb->ntb_transport = transport; 1602 return (ntb); 1603} 1604 1605/** 1606 * ntb_unregister_transport() - Unregister the transport with the NTB HW driver 1607 * @ntb - ntb_softc of the transport to be freed 1608 * 1609 * This function unregisters the transport from the HW driver and performs any 1610 * necessary cleanups. 1611 */ 1612void 1613ntb_unregister_transport(struct ntb_softc *ntb) 1614{ 1615 uint8_t i; 1616 1617 if (ntb->ntb_transport == NULL) 1618 return; 1619 1620 for (i = 0; i < ntb->max_cbs; i++) 1621 ntb_unregister_db_callback(ntb, i); 1622 1623 ntb_unregister_event_callback(ntb); 1624 ntb->ntb_transport = NULL; 1625} 1626 1627/** 1628 * ntb_get_max_spads() - get the total scratch regs usable 1629 * @ntb: pointer to ntb_softc instance 1630 * 1631 * This function returns the max 32bit scratchpad registers usable by the 1632 * upper layer. 1633 * 1634 * RETURNS: total number of scratch pad registers available 1635 */ 1636uint8_t 1637ntb_get_max_spads(struct ntb_softc *ntb) 1638{ 1639 1640 return (ntb->limits.max_spads); 1641} 1642 1643uint8_t 1644ntb_get_max_cbs(struct ntb_softc *ntb) 1645{ 1646 1647 return (ntb->max_cbs); 1648} 1649 1650uint8_t 1651ntb_get_max_mw(struct ntb_softc *ntb) 1652{ 1653 1654 return (ntb->limits.max_mw); 1655} 1656 1657/** 1658 * ntb_write_local_spad() - write to the secondary scratchpad register 1659 * @ntb: pointer to ntb_softc instance 1660 * @idx: index to the scratchpad register, 0 based 1661 * @val: the data value to put into the register 1662 * 1663 * This function allows writing of a 32bit value to the indexed scratchpad 1664 * register. The register resides on the secondary (external) side. 1665 * 1666 * RETURNS: An appropriate ERRNO error value on error, or zero for success. 1667 */ 1668int 1669ntb_write_local_spad(struct ntb_softc *ntb, unsigned int idx, uint32_t val) 1670{ 1671 1672 if (idx >= ntb->limits.max_spads) 1673 return (EINVAL); 1674 1675 ntb_reg_write(4, ntb->reg_ofs.spad_local + idx * 4, val); 1676 1677 return (0); 1678} 1679 1680/** 1681 * ntb_read_local_spad() - read from the primary scratchpad register 1682 * @ntb: pointer to ntb_softc instance 1683 * @idx: index to scratchpad register, 0 based 1684 * @val: pointer to 32bit integer for storing the register value 1685 * 1686 * This function allows reading of the 32bit scratchpad register on 1687 * the primary (internal) side. 1688 * 1689 * RETURNS: An appropriate ERRNO error value on error, or zero for success. 1690 */ 1691int 1692ntb_read_local_spad(struct ntb_softc *ntb, unsigned int idx, uint32_t *val) 1693{ 1694 1695 if (idx >= ntb->limits.max_spads) 1696 return (EINVAL); 1697 1698 *val = ntb_reg_read(4, ntb->reg_ofs.spad_local + idx * 4); 1699 1700 return (0); 1701} 1702 1703/** 1704 * ntb_write_remote_spad() - write to the secondary scratchpad register 1705 * @ntb: pointer to ntb_softc instance 1706 * @idx: index to the scratchpad register, 0 based 1707 * @val: the data value to put into the register 1708 * 1709 * This function allows writing of a 32bit value to the indexed scratchpad 1710 * register. The register resides on the secondary (external) side. 1711 * 1712 * RETURNS: An appropriate ERRNO error value on error, or zero for success. 1713 */ 1714int 1715ntb_write_remote_spad(struct ntb_softc *ntb, unsigned int idx, uint32_t val) 1716{ 1717 1718 if (idx >= ntb->limits.max_spads) 1719 return (EINVAL); 1720 1721 if (HAS_FEATURE(NTB_REGS_THRU_MW)) 1722 ntb_mw_write(4, XEON_SHADOW_SPAD_OFFSET + idx * 4, val); 1723 else 1724 ntb_reg_write(4, ntb->reg_ofs.spad_remote + idx * 4, val); 1725 1726 return (0); 1727} 1728 1729/** 1730 * ntb_read_remote_spad() - read from the primary scratchpad register 1731 * @ntb: pointer to ntb_softc instance 1732 * @idx: index to scratchpad register, 0 based 1733 * @val: pointer to 32bit integer for storing the register value 1734 * 1735 * This function allows reading of the 32bit scratchpad register on 1736 * the primary (internal) side. 1737 * 1738 * RETURNS: An appropriate ERRNO error value on error, or zero for success. 1739 */ 1740int 1741ntb_read_remote_spad(struct ntb_softc *ntb, unsigned int idx, uint32_t *val) 1742{ 1743 1744 if (idx >= ntb->limits.max_spads) 1745 return (EINVAL); 1746 1747 if (HAS_FEATURE(NTB_REGS_THRU_MW)) 1748 *val = ntb_mw_read(4, XEON_SHADOW_SPAD_OFFSET + idx * 4); 1749 else 1750 *val = ntb_reg_read(4, ntb->reg_ofs.spad_remote + idx * 4); 1751 1752 return (0); 1753} 1754 1755/** 1756 * ntb_get_mw_vbase() - get virtual addr for the NTB memory window 1757 * @ntb: pointer to ntb_softc instance 1758 * @mw: memory window number 1759 * 1760 * This function provides the base virtual address of the memory window 1761 * specified. 1762 * 1763 * RETURNS: pointer to virtual address, or NULL on error. 1764 */ 1765void * 1766ntb_get_mw_vbase(struct ntb_softc *ntb, unsigned int mw) 1767{ 1768 1769 if (mw >= ntb_get_max_mw(ntb)) 1770 return (NULL); 1771 1772 return (ntb->bar_info[NTB_MW_TO_BAR(mw)].vbase); 1773} 1774 1775vm_paddr_t 1776ntb_get_mw_pbase(struct ntb_softc *ntb, unsigned int mw) 1777{ 1778 1779 if (mw >= ntb_get_max_mw(ntb)) 1780 return (0); 1781 1782 return (ntb->bar_info[NTB_MW_TO_BAR(mw)].pbase); 1783} 1784 1785/** 1786 * ntb_get_mw_size() - return size of NTB memory window 1787 * @ntb: pointer to ntb_softc instance 1788 * @mw: memory window number 1789 * 1790 * This function provides the physical size of the memory window specified 1791 * 1792 * RETURNS: the size of the memory window or zero on error 1793 */ 1794u_long 1795ntb_get_mw_size(struct ntb_softc *ntb, unsigned int mw) 1796{ 1797 1798 if (mw >= ntb_get_max_mw(ntb)) 1799 return (0); 1800 1801 return (ntb->bar_info[NTB_MW_TO_BAR(mw)].size); 1802} 1803 1804/** 1805 * ntb_set_mw_addr - set the memory window address 1806 * @ntb: pointer to ntb_softc instance 1807 * @mw: memory window number 1808 * @addr: base address for data 1809 * 1810 * This function sets the base physical address of the memory window. This 1811 * memory address is where data from the remote system will be transfered into 1812 * or out of depending on how the transport is configured. 1813 */ 1814void 1815ntb_set_mw_addr(struct ntb_softc *ntb, unsigned int mw, uint64_t addr) 1816{ 1817 1818 if (mw >= ntb_get_max_mw(ntb)) 1819 return; 1820 1821 switch (NTB_MW_TO_BAR(mw)) { 1822 case NTB_B2B_BAR_1: 1823 ntb_reg_write(8, ntb->reg_ofs.bar2_xlat, addr); 1824 break; 1825 case NTB_B2B_BAR_2: 1826 if (HAS_FEATURE(NTB_SPLIT_BAR)) 1827 ntb_reg_write(4, ntb->reg_ofs.bar4_xlat, addr); 1828 else 1829 ntb_reg_write(8, ntb->reg_ofs.bar4_xlat, addr); 1830 break; 1831 case NTB_B2B_BAR_3: 1832 ntb_reg_write(4, ntb->reg_ofs.bar5_xlat, addr); 1833 break; 1834 } 1835} 1836 1837/** 1838 * ntb_ring_doorbell() - Set the doorbell on the secondary/external side 1839 * @ntb: pointer to ntb_softc instance 1840 * @db: doorbell to ring 1841 * 1842 * This function allows triggering of a doorbell on the secondary/external 1843 * side that will initiate an interrupt on the remote host 1844 */ 1845void 1846ntb_ring_doorbell(struct ntb_softc *ntb, unsigned int db) 1847{ 1848 uint64_t bit; 1849 1850 if (ntb->type == NTB_SOC) 1851 bit = 1 << db; 1852 else 1853 bit = ((1 << ntb->bits_per_vector) - 1) << 1854 (db * ntb->bits_per_vector); 1855 1856 if (HAS_FEATURE(NTB_REGS_THRU_MW)) { 1857 ntb_mw_write(2, XEON_SHADOW_PDOORBELL_OFFSET, bit); 1858 return; 1859 } 1860 1861 db_iowrite(ntb, ntb->reg_ofs.rdb, bit); 1862} 1863 1864/** 1865 * ntb_query_link_status() - return the hardware link status 1866 * @ndev: pointer to ntb_device instance 1867 * 1868 * Returns true if the hardware is connected to the remote system 1869 * 1870 * RETURNS: true or false based on the hardware link state 1871 */ 1872bool 1873ntb_query_link_status(struct ntb_softc *ntb) 1874{ 1875 1876 return (ntb->link_status == NTB_LINK_UP); 1877} 1878 1879static void 1880save_bar_parameters(struct ntb_pci_bar_info *bar) 1881{ 1882 1883 bar->pci_bus_tag = rman_get_bustag(bar->pci_resource); 1884 bar->pci_bus_handle = rman_get_bushandle(bar->pci_resource); 1885 bar->pbase = rman_get_start(bar->pci_resource); 1886 bar->size = rman_get_size(bar->pci_resource); 1887 bar->vbase = rman_get_virtual(bar->pci_resource); 1888} 1889 1890device_t 1891ntb_get_device(struct ntb_softc *ntb) 1892{ 1893 1894 return (ntb->device); 1895} 1896 1897/* Export HW-specific errata information. */ 1898bool 1899ntb_has_feature(struct ntb_softc *ntb, uint64_t feature) 1900{ 1901 1902 return (HAS_FEATURE(feature)); 1903} 1904