ntb_transport.c revision 289273
1/*- 2 * Copyright (C) 2013 Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/dev/ntb/if_ntb/if_ntb.c 289273 2015-10-13 23:42:13Z cem $"); 29 30#include <sys/param.h> 31#include <sys/kernel.h> 32#include <sys/systm.h> 33#include <sys/bus.h> 34#include <sys/ktr.h> 35#include <sys/lock.h> 36#include <sys/malloc.h> 37#include <sys/module.h> 38#include <sys/mutex.h> 39#include <sys/queue.h> 40#include <sys/socket.h> 41#include <sys/sockio.h> 42#include <sys/sysctl.h> 43#include <sys/taskqueue.h> 44#include <net/if.h> 45#include <net/if_media.h> 46#include <net/if_types.h> 47#include <net/if_var.h> 48#include <net/bpf.h> 49#include <net/ethernet.h> 50#include <vm/vm.h> 51#include <vm/pmap.h> 52#include <machine/bus.h> 53#include <machine/cpufunc.h> 54#include <machine/pmap.h> 55 56#include "../ntb_hw/ntb_hw.h" 57 58/* 59 * The Non-Transparent Bridge (NTB) is a device on some Intel processors that 60 * allows you to connect two systems using a PCI-e link. 61 * 62 * This module contains a protocol for sending and receiving messages, and 63 * exposes that protocol through a simulated ethernet device called ntb. 64 * 65 * NOTE: Much of the code in this module is shared with Linux. Any patches may 66 * be picked up and redistributed in Linux with a dual GPL/BSD license. 67 */ 68 69/* TODO: These functions should really be part of the kernel */ 70#define test_bit(pos, bitmap_addr) (*(bitmap_addr) & 1UL << (pos)) 71#define set_bit(pos, bitmap_addr) *(bitmap_addr) |= 1UL << (pos) 72#define clear_bit(pos, bitmap_addr) *(bitmap_addr) &= ~(1UL << (pos)) 73 74#define KTR_NTB KTR_SPARE3 75 76#define NTB_TRANSPORT_VERSION 3 77#define NTB_RX_MAX_PKTS 64 78#define NTB_RXQ_SIZE 300 79 80static unsigned int transport_mtu = 0x4000 + ETHER_HDR_LEN + ETHER_CRC_LEN; 81 82/* 83 * This is an oversimplification to work around Xeon Errata. The second client 84 * may be usable for unidirectional traffic. 85 */ 86static unsigned int max_num_clients = 1; 87 88STAILQ_HEAD(ntb_queue_list, ntb_queue_entry); 89 90struct ntb_queue_entry { 91 /* ntb_queue list reference */ 92 STAILQ_ENTRY(ntb_queue_entry) entry; 93 94 /* info on data to be transfered */ 95 void *cb_data; 96 void *buf; 97 uint64_t len; 98 uint64_t flags; 99}; 100 101struct ntb_rx_info { 102 unsigned int entry; 103}; 104 105struct ntb_transport_qp { 106 struct ntb_netdev *transport; 107 struct ntb_softc *ntb; 108 109 void *cb_data; 110 111 bool client_ready; 112 bool qp_link; 113 uint8_t qp_num; /* Only 64 QPs are allowed. 0-63 */ 114 115 struct ntb_rx_info *rx_info; 116 struct ntb_rx_info *remote_rx_info; 117 118 void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data, 119 void *data, int len); 120 struct ntb_queue_list tx_free_q; 121 struct mtx ntb_tx_free_q_lock; 122 void *tx_mw; 123 uint64_t tx_index; 124 uint64_t tx_max_entry; 125 uint64_t tx_max_frame; 126 127 void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data, 128 void *data, int len); 129 struct ntb_queue_list rx_pend_q; 130 struct ntb_queue_list rx_free_q; 131 struct mtx ntb_rx_pend_q_lock; 132 struct mtx ntb_rx_free_q_lock; 133 struct task rx_completion_task; 134 void *rx_buff; 135 uint64_t rx_index; 136 uint64_t rx_max_entry; 137 uint64_t rx_max_frame; 138 139 void (*event_handler) (void *data, int status); 140 struct callout link_work; 141 struct callout queue_full; 142 struct callout rx_full; 143 144 uint64_t last_rx_no_buf; 145 146 /* Stats */ 147 uint64_t rx_bytes; 148 uint64_t rx_pkts; 149 uint64_t rx_ring_empty; 150 uint64_t rx_err_no_buf; 151 uint64_t rx_err_oflow; 152 uint64_t rx_err_ver; 153 uint64_t tx_bytes; 154 uint64_t tx_pkts; 155 uint64_t tx_ring_full; 156}; 157 158struct ntb_queue_handlers { 159 void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data, 160 void *data, int len); 161 void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data, 162 void *data, int len); 163 void (*event_handler) (void *data, int status); 164}; 165 166 167struct ntb_transport_mw { 168 size_t size; 169 void *virt_addr; 170 vm_paddr_t dma_addr; 171}; 172 173struct ntb_netdev { 174 struct ntb_softc *ntb; 175 struct ifnet *ifp; 176 struct ntb_transport_mw mw[NTB_NUM_MW]; 177 struct ntb_transport_qp *qps; 178 uint64_t max_qps; 179 uint64_t qp_bitmap; 180 bool transport_link; 181 struct callout link_work; 182 struct ntb_transport_qp *qp; 183 uint64_t bufsize; 184 u_char eaddr[ETHER_ADDR_LEN]; 185 struct mtx tx_lock; 186 struct mtx rx_lock; 187}; 188 189static struct ntb_netdev net_softc; 190 191enum { 192 IF_NTB_DESC_DONE_FLAG = 1 << 0, 193 IF_NTB_LINK_DOWN_FLAG = 1 << 1, 194}; 195 196struct ntb_payload_header { 197 uint64_t ver; 198 uint64_t len; 199 uint64_t flags; 200}; 201 202enum { 203 /* 204 * The order of this enum is part of the if_ntb remote protocol. Do 205 * not reorder without bumping protocol version (and it's probably best 206 * to keep the protocol in lock-step with the Linux NTB driver. 207 */ 208 IF_NTB_VERSION = 0, 209 IF_NTB_QP_LINKS, 210 IF_NTB_NUM_QPS, 211 IF_NTB_NUM_MWS, 212 /* 213 * N.B.: transport_link_work assumes MW1 enums = MW0 + 2. 214 */ 215 IF_NTB_MW0_SZ_HIGH, 216 IF_NTB_MW0_SZ_LOW, 217 IF_NTB_MW1_SZ_HIGH, 218 IF_NTB_MW1_SZ_LOW, 219 IF_NTB_MAX_SPAD, 220}; 221 222#define QP_TO_MW(qp) ((qp) % NTB_NUM_MW) 223#define NTB_QP_DEF_NUM_ENTRIES 100 224#define NTB_LINK_DOWN_TIMEOUT 10 225 226static int ntb_handle_module_events(struct module *m, int what, void *arg); 227static int ntb_setup_interface(void); 228static int ntb_teardown_interface(void); 229static void ntb_net_init(void *arg); 230static int ntb_ioctl(struct ifnet *ifp, u_long command, caddr_t data); 231static void ntb_start(struct ifnet *ifp); 232static void ntb_net_tx_handler(struct ntb_transport_qp *qp, void *qp_data, 233 void *data, int len); 234static void ntb_net_rx_handler(struct ntb_transport_qp *qp, void *qp_data, 235 void *data, int len); 236static void ntb_net_event_handler(void *data, int status); 237static int ntb_transport_init(struct ntb_softc *ntb); 238static void ntb_transport_free(void *transport); 239static void ntb_transport_init_queue(struct ntb_netdev *nt, 240 unsigned int qp_num); 241static void ntb_transport_free_queue(struct ntb_transport_qp *qp); 242static struct ntb_transport_qp * ntb_transport_create_queue(void *data, 243 struct ntb_softc *pdev, const struct ntb_queue_handlers *handlers); 244static void ntb_transport_link_up(struct ntb_transport_qp *qp); 245static int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, 246 void *data, unsigned int len); 247static int ntb_process_tx(struct ntb_transport_qp *qp, 248 struct ntb_queue_entry *entry); 249static void ntb_tx_copy_task(struct ntb_transport_qp *qp, 250 struct ntb_queue_entry *entry, void *offset); 251static void ntb_qp_full(void *arg); 252static void ntb_transport_rxc_db(void *data, int db_num); 253static void ntb_rx_pendq_full(void *arg); 254static void ntb_transport_rx(struct ntb_transport_qp *qp); 255static int ntb_process_rxc(struct ntb_transport_qp *qp); 256static void ntb_rx_copy_task(struct ntb_transport_qp *qp, 257 struct ntb_queue_entry *entry, void *offset); 258static void ntb_rx_completion_task(void *arg, int pending); 259static void ntb_transport_event_callback(void *data, enum ntb_hw_event event); 260static void ntb_transport_link_work(void *arg); 261static int ntb_set_mw(struct ntb_netdev *nt, int num_mw, unsigned int size); 262static void ntb_free_mw(struct ntb_netdev *nt, int num_mw); 263static void ntb_transport_setup_qp_mw(struct ntb_netdev *nt, 264 unsigned int qp_num); 265static void ntb_qp_link_work(void *arg); 266static void ntb_transport_link_cleanup(struct ntb_netdev *nt); 267static void ntb_qp_link_down(struct ntb_transport_qp *qp); 268static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp); 269static void ntb_transport_link_down(struct ntb_transport_qp *qp); 270static void ntb_send_link_down(struct ntb_transport_qp *qp); 271static void ntb_list_add(struct mtx *lock, struct ntb_queue_entry *entry, 272 struct ntb_queue_list *list); 273static struct ntb_queue_entry *ntb_list_rm(struct mtx *lock, 274 struct ntb_queue_list *list); 275static void create_random_local_eui48(u_char *eaddr); 276static unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp); 277 278MALLOC_DEFINE(M_NTB_IF, "if_ntb", "ntb network driver"); 279 280/* Module setup and teardown */ 281static int 282ntb_handle_module_events(struct module *m, int what, void *arg) 283{ 284 int err = 0; 285 286 switch (what) { 287 case MOD_LOAD: 288 err = ntb_setup_interface(); 289 break; 290 case MOD_UNLOAD: 291 err = ntb_teardown_interface(); 292 break; 293 default: 294 err = EOPNOTSUPP; 295 break; 296 } 297 return (err); 298} 299 300static moduledata_t if_ntb_mod = { 301 "if_ntb", 302 ntb_handle_module_events, 303 NULL 304}; 305 306DECLARE_MODULE(if_ntb, if_ntb_mod, SI_SUB_KLD, SI_ORDER_ANY); 307MODULE_DEPEND(if_ntb, ntb_hw, 1, 1, 1); 308 309static int 310ntb_setup_interface(void) 311{ 312 struct ifnet *ifp; 313 struct ntb_queue_handlers handlers = { ntb_net_rx_handler, 314 ntb_net_tx_handler, ntb_net_event_handler }; 315 316 net_softc.ntb = devclass_get_softc(devclass_find("ntb_hw"), 0); 317 if (net_softc.ntb == NULL) { 318 printf("ntb: Cannot find devclass\n"); 319 return (ENXIO); 320 } 321 322 ntb_transport_init(net_softc.ntb); 323 324 ifp = net_softc.ifp = if_alloc(IFT_ETHER); 325 if (ifp == NULL) { 326 printf("ntb: cannot allocate ifnet structure\n"); 327 return (ENOMEM); 328 } 329 330 net_softc.qp = ntb_transport_create_queue(ifp, net_softc.ntb, 331 &handlers); 332 if_initname(ifp, "ntb", 0); 333 ifp->if_init = ntb_net_init; 334 ifp->if_softc = &net_softc; 335 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX; 336 ifp->if_ioctl = ntb_ioctl; 337 ifp->if_start = ntb_start; 338 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); 339 ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; 340 IFQ_SET_READY(&ifp->if_snd); 341 create_random_local_eui48(net_softc.eaddr); 342 ether_ifattach(ifp, net_softc.eaddr); 343 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_JUMBO_MTU; 344 ifp->if_capenable = ifp->if_capabilities; 345 346 ntb_transport_link_up(net_softc.qp); 347 net_softc.bufsize = ntb_transport_max_size(net_softc.qp) + 348 sizeof(struct ether_header); 349 return (0); 350} 351 352static int 353ntb_teardown_interface(void) 354{ 355 356 if (net_softc.qp != NULL) 357 ntb_transport_link_down(net_softc.qp); 358 359 if (net_softc.ifp != NULL) { 360 ether_ifdetach(net_softc.ifp); 361 if_free(net_softc.ifp); 362 } 363 364 if (net_softc.qp != NULL) { 365 ntb_transport_free_queue(net_softc.qp); 366 ntb_transport_free(&net_softc); 367 } 368 369 return (0); 370} 371 372/* Network device interface */ 373 374static void 375ntb_net_init(void *arg) 376{ 377 struct ntb_netdev *ntb_softc = arg; 378 struct ifnet *ifp = ntb_softc->ifp; 379 380 ifp->if_drv_flags |= IFF_DRV_RUNNING; 381 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 382 ifp->if_flags |= IFF_UP; 383 if_link_state_change(ifp, LINK_STATE_UP); 384} 385 386static int 387ntb_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 388{ 389 struct ntb_netdev *nt = ifp->if_softc; 390 struct ifreq *ifr = (struct ifreq *)data; 391 int error = 0; 392 393 switch (command) { 394 case SIOCSIFMTU: 395 { 396 if (ifr->ifr_mtu > ntb_transport_max_size(nt->qp) - 397 ETHER_HDR_LEN - ETHER_CRC_LEN) { 398 error = EINVAL; 399 break; 400 } 401 402 ifp->if_mtu = ifr->ifr_mtu; 403 break; 404 } 405 default: 406 error = ether_ioctl(ifp, command, data); 407 break; 408 } 409 410 return (error); 411} 412 413 414static void 415ntb_start(struct ifnet *ifp) 416{ 417 struct mbuf *m_head; 418 struct ntb_netdev *nt = ifp->if_softc; 419 int rc; 420 421 mtx_lock(&nt->tx_lock); 422 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 423 CTR0(KTR_NTB, "TX: ntb_start"); 424 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 425 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 426 CTR1(KTR_NTB, "TX: start mbuf %p", m_head); 427 rc = ntb_transport_tx_enqueue(nt->qp, m_head, m_head, 428 m_length(m_head, NULL)); 429 if (rc != 0) { 430 CTR1(KTR_NTB, 431 "TX: could not tx mbuf %p. Returning to snd q", 432 m_head); 433 if (rc == EAGAIN) { 434 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 435 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 436 callout_reset(&nt->qp->queue_full, hz / 1000, 437 ntb_qp_full, ifp); 438 } 439 break; 440 } 441 442 } 443 mtx_unlock(&nt->tx_lock); 444} 445 446/* Network Device Callbacks */ 447static void 448ntb_net_tx_handler(struct ntb_transport_qp *qp, void *qp_data, void *data, 449 int len) 450{ 451 452 m_freem(data); 453 CTR1(KTR_NTB, "TX: tx_handler freeing mbuf %p", data); 454} 455 456static void 457ntb_net_rx_handler(struct ntb_transport_qp *qp, void *qp_data, void *data, 458 int len) 459{ 460 struct mbuf *m = data; 461 struct ifnet *ifp = qp_data; 462 463 CTR0(KTR_NTB, "RX: rx handler"); 464 (*ifp->if_input)(ifp, m); 465} 466 467static void 468ntb_net_event_handler(void *data, int status) 469{ 470 471} 472 473/* Transport Init and teardown */ 474 475static int 476ntb_transport_init(struct ntb_softc *ntb) 477{ 478 struct ntb_netdev *nt = &net_softc; 479 int rc, i; 480 481 nt->max_qps = max_num_clients; 482 ntb_register_transport(ntb, nt); 483 mtx_init(&nt->tx_lock, "ntb transport tx", NULL, MTX_DEF); 484 mtx_init(&nt->rx_lock, "ntb transport rx", NULL, MTX_DEF); 485 486 nt->qps = malloc(nt->max_qps * sizeof(struct ntb_transport_qp), 487 M_NTB_IF, M_WAITOK|M_ZERO); 488 489 nt->qp_bitmap = ((uint64_t) 1 << nt->max_qps) - 1; 490 491 for (i = 0; i < nt->max_qps; i++) 492 ntb_transport_init_queue(nt, i); 493 494 callout_init(&nt->link_work, 0); 495 496 rc = ntb_register_event_callback(ntb, 497 ntb_transport_event_callback); 498 if (rc != 0) 499 goto err; 500 501 if (ntb_query_link_status(ntb)) { 502 if (bootverbose) 503 device_printf(ntb_get_device(ntb), "link up\n"); 504 callout_reset(&nt->link_work, 0, ntb_transport_link_work, nt); 505 } 506 507 return (0); 508 509err: 510 free(nt->qps, M_NTB_IF); 511 ntb_unregister_transport(ntb); 512 return (rc); 513} 514 515static void 516ntb_transport_free(void *transport) 517{ 518 struct ntb_netdev *nt = transport; 519 struct ntb_softc *ntb = nt->ntb; 520 int i; 521 522 ntb_transport_link_cleanup(nt); 523 524 callout_drain(&nt->link_work); 525 526 /* verify that all the qps are freed */ 527 for (i = 0; i < nt->max_qps; i++) 528 if (!test_bit(i, &nt->qp_bitmap)) 529 ntb_transport_free_queue(&nt->qps[i]); 530 531 ntb_unregister_event_callback(ntb); 532 533 for (i = 0; i < NTB_NUM_MW; i++) 534 ntb_free_mw(nt, i); 535 536 free(nt->qps, M_NTB_IF); 537 ntb_unregister_transport(ntb); 538} 539 540static void 541ntb_transport_init_queue(struct ntb_netdev *nt, unsigned int qp_num) 542{ 543 struct ntb_transport_qp *qp; 544 unsigned int num_qps_mw, tx_size; 545 uint8_t mw_num = QP_TO_MW(qp_num); 546 547 qp = &nt->qps[qp_num]; 548 qp->qp_num = qp_num; 549 qp->transport = nt; 550 qp->ntb = nt->ntb; 551 qp->qp_link = NTB_LINK_DOWN; 552 qp->client_ready = NTB_LINK_DOWN; 553 qp->event_handler = NULL; 554 555 if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW) 556 num_qps_mw = nt->max_qps / NTB_NUM_MW + 1; 557 else 558 num_qps_mw = nt->max_qps / NTB_NUM_MW; 559 560 tx_size = (unsigned int) ntb_get_mw_size(qp->ntb, mw_num) / num_qps_mw; 561 qp->rx_info = (struct ntb_rx_info *) 562 ((char *)ntb_get_mw_vbase(qp->ntb, mw_num) + 563 (qp_num / NTB_NUM_MW * tx_size)); 564 tx_size -= sizeof(struct ntb_rx_info); 565 566 qp->tx_mw = qp->rx_info + 1; 567 /* Due to house-keeping, there must be at least 2 buffs */ 568 qp->tx_max_frame = min(transport_mtu + sizeof(struct ntb_payload_header), 569 tx_size / 2); 570 qp->tx_max_entry = tx_size / qp->tx_max_frame; 571 572 callout_init(&qp->link_work, 0); 573 callout_init(&qp->queue_full, 1); 574 callout_init(&qp->rx_full, 1); 575 576 mtx_init(&qp->ntb_rx_pend_q_lock, "ntb rx pend q", NULL, MTX_SPIN); 577 mtx_init(&qp->ntb_rx_free_q_lock, "ntb rx free q", NULL, MTX_SPIN); 578 mtx_init(&qp->ntb_tx_free_q_lock, "ntb tx free q", NULL, MTX_SPIN); 579 TASK_INIT(&qp->rx_completion_task, 0, ntb_rx_completion_task, qp); 580 581 STAILQ_INIT(&qp->rx_pend_q); 582 STAILQ_INIT(&qp->rx_free_q); 583 STAILQ_INIT(&qp->tx_free_q); 584} 585 586static void 587ntb_transport_free_queue(struct ntb_transport_qp *qp) 588{ 589 struct ntb_queue_entry *entry; 590 591 if (qp == NULL) 592 return; 593 594 callout_drain(&qp->link_work); 595 596 ntb_unregister_db_callback(qp->ntb, qp->qp_num); 597 598 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) 599 free(entry, M_NTB_IF); 600 601 while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) 602 free(entry, M_NTB_IF); 603 604 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 605 free(entry, M_NTB_IF); 606 607 set_bit(qp->qp_num, &qp->transport->qp_bitmap); 608} 609 610/** 611 * ntb_transport_create_queue - Create a new NTB transport layer queue 612 * @rx_handler: receive callback function 613 * @tx_handler: transmit callback function 614 * @event_handler: event callback function 615 * 616 * Create a new NTB transport layer queue and provide the queue with a callback 617 * routine for both transmit and receive. The receive callback routine will be 618 * used to pass up data when the transport has received it on the queue. The 619 * transmit callback routine will be called when the transport has completed the 620 * transmission of the data on the queue and the data is ready to be freed. 621 * 622 * RETURNS: pointer to newly created ntb_queue, NULL on error. 623 */ 624static struct ntb_transport_qp * 625ntb_transport_create_queue(void *data, struct ntb_softc *pdev, 626 const struct ntb_queue_handlers *handlers) 627{ 628 struct ntb_queue_entry *entry; 629 struct ntb_transport_qp *qp; 630 struct ntb_netdev *nt; 631 unsigned int free_queue; 632 int rc, i; 633 634 nt = ntb_find_transport(pdev); 635 if (nt == NULL) 636 goto err; 637 638 free_queue = ffs(nt->qp_bitmap); 639 if (free_queue == 0) 640 goto err; 641 642 /* decrement free_queue to make it zero based */ 643 free_queue--; 644 645 clear_bit(free_queue, &nt->qp_bitmap); 646 647 qp = &nt->qps[free_queue]; 648 qp->cb_data = data; 649 qp->rx_handler = handlers->rx_handler; 650 qp->tx_handler = handlers->tx_handler; 651 qp->event_handler = handlers->event_handler; 652 653 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 654 entry = malloc(sizeof(struct ntb_queue_entry), M_NTB_IF, 655 M_WAITOK|M_ZERO); 656 entry->cb_data = nt->ifp; 657 entry->buf = NULL; 658 entry->len = transport_mtu; 659 ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q); 660 } 661 662 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 663 entry = malloc(sizeof(struct ntb_queue_entry), M_NTB_IF, 664 M_WAITOK|M_ZERO); 665 ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); 666 } 667 668 rc = ntb_register_db_callback(qp->ntb, free_queue, qp, 669 ntb_transport_rxc_db); 670 if (rc != 0) 671 goto err1; 672 673 return (qp); 674 675err1: 676 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 677 free(entry, M_NTB_IF); 678 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) 679 free(entry, M_NTB_IF); 680 set_bit(free_queue, &nt->qp_bitmap); 681err: 682 return (NULL); 683} 684 685/** 686 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue 687 * @qp: NTB transport layer queue to be enabled 688 * 689 * Notify NTB transport layer of client readiness to use queue 690 */ 691static void 692ntb_transport_link_up(struct ntb_transport_qp *qp) 693{ 694 695 if (qp == NULL) 696 return; 697 698 qp->client_ready = NTB_LINK_UP; 699 if (bootverbose) 700 device_printf(ntb_get_device(qp->ntb), "qp client ready\n"); 701 702 if (qp->transport->transport_link == NTB_LINK_UP) 703 callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp); 704} 705 706 707 708/* Transport Tx */ 709 710/** 711 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry 712 * @qp: NTB transport layer queue the entry is to be enqueued on 713 * @cb: per buffer pointer for callback function to use 714 * @data: pointer to data buffer that will be sent 715 * @len: length of the data buffer 716 * 717 * Enqueue a new transmit buffer onto the transport queue from which a NTB 718 * payload will be transmitted. This assumes that a lock is being held to 719 * serialize access to the qp. 720 * 721 * RETURNS: An appropriate ERRNO error value on error, or zero for success. 722 */ 723static int 724ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, 725 unsigned int len) 726{ 727 struct ntb_queue_entry *entry; 728 int rc; 729 730 if (qp == NULL || qp->qp_link != NTB_LINK_UP || len == 0) { 731 CTR0(KTR_NTB, "TX: link not up"); 732 return (EINVAL); 733 } 734 735 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); 736 if (entry == NULL) { 737 CTR0(KTR_NTB, "TX: could not get entry from tx_free_q"); 738 return (ENOMEM); 739 } 740 CTR1(KTR_NTB, "TX: got entry %p from tx_free_q", entry); 741 742 entry->cb_data = cb; 743 entry->buf = data; 744 entry->len = len; 745 entry->flags = 0; 746 747 rc = ntb_process_tx(qp, entry); 748 if (rc != 0) { 749 ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); 750 CTR1(KTR_NTB, 751 "TX: process_tx failed. Returning entry %p to tx_free_q", 752 entry); 753 } 754 return (rc); 755} 756 757static int 758ntb_process_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry) 759{ 760 void *offset; 761 762 offset = (char *)qp->tx_mw + qp->tx_max_frame * qp->tx_index; 763 CTR3(KTR_NTB, 764 "TX: process_tx: tx_pkts=%u, tx_index=%u, remote entry=%u", 765 qp->tx_pkts, qp->tx_index, qp->remote_rx_info->entry); 766 if (qp->tx_index == qp->remote_rx_info->entry) { 767 CTR0(KTR_NTB, "TX: ring full"); 768 qp->tx_ring_full++; 769 return (EAGAIN); 770 } 771 772 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) { 773 if (qp->tx_handler != NULL) 774 qp->tx_handler(qp, qp->cb_data, entry->buf, 775 EIO); 776 777 ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); 778 CTR1(KTR_NTB, 779 "TX: frame too big. returning entry %p to tx_free_q", 780 entry); 781 return (0); 782 } 783 CTR2(KTR_NTB, "TX: copying entry %p to offset %p", entry, offset); 784 ntb_tx_copy_task(qp, entry, offset); 785 786 qp->tx_index++; 787 qp->tx_index %= qp->tx_max_entry; 788 789 qp->tx_pkts++; 790 791 return (0); 792} 793 794static void 795ntb_tx_copy_task(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry, 796 void *offset) 797{ 798 struct ntb_payload_header *hdr; 799 800 CTR2(KTR_NTB, "TX: copying %d bytes to offset %p", entry->len, offset); 801 if (entry->buf != NULL) 802 m_copydata((struct mbuf *)entry->buf, 0, entry->len, offset); 803 804 hdr = (struct ntb_payload_header *)((char *)offset + qp->tx_max_frame - 805 sizeof(struct ntb_payload_header)); 806 hdr->len = entry->len; /* TODO: replace with bus_space_write */ 807 hdr->ver = qp->tx_pkts; /* TODO: replace with bus_space_write */ 808 wmb(); 809 /* TODO: replace with bus_space_write */ 810 hdr->flags = entry->flags | IF_NTB_DESC_DONE_FLAG; 811 812 ntb_ring_doorbell(qp->ntb, qp->qp_num); 813 814 /* 815 * The entry length can only be zero if the packet is intended to be a 816 * "link down" or similar. Since no payload is being sent in these 817 * cases, there is nothing to add to the completion queue. 818 */ 819 if (entry->len > 0) { 820 qp->tx_bytes += entry->len; 821 822 if (qp->tx_handler) 823 qp->tx_handler(qp, qp->cb_data, entry->cb_data, 824 entry->len); 825 } 826 827 CTR2(KTR_NTB, 828 "TX: entry %p sent. hdr->ver = %d, Returning to tx_free_q", entry, 829 hdr->ver); 830 ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); 831} 832 833static void 834ntb_qp_full(void *arg) 835{ 836 837 CTR0(KTR_NTB, "TX: qp_full callout"); 838 ntb_start(arg); 839} 840 841/* Transport Rx */ 842static void 843ntb_transport_rxc_db(void *data, int db_num) 844{ 845 struct ntb_transport_qp *qp = data; 846 847 ntb_transport_rx(qp); 848} 849 850static void 851ntb_rx_pendq_full(void *arg) 852{ 853 854 CTR0(KTR_NTB, "RX: ntb_rx_pendq_full callout"); 855 ntb_transport_rx(arg); 856} 857 858static void 859ntb_transport_rx(struct ntb_transport_qp *qp) 860{ 861 uint64_t i; 862 int rc; 863 864 /* 865 * Limit the number of packets processed in a single interrupt to 866 * provide fairness to others 867 */ 868 mtx_lock(&qp->transport->rx_lock); 869 CTR0(KTR_NTB, "RX: transport_rx"); 870 for (i = 0; i < qp->rx_max_entry; i++) { 871 rc = ntb_process_rxc(qp); 872 if (rc != 0) { 873 CTR0(KTR_NTB, "RX: process_rxc failed"); 874 break; 875 } 876 } 877 mtx_unlock(&qp->transport->rx_lock); 878} 879 880static int 881ntb_process_rxc(struct ntb_transport_qp *qp) 882{ 883 struct ntb_payload_header *hdr; 884 struct ntb_queue_entry *entry; 885 void *offset; 886 887 offset = (void *) 888 ((char *)qp->rx_buff + qp->rx_max_frame * qp->rx_index); 889 hdr = (void *) 890 ((char *)offset + qp->rx_max_frame - 891 sizeof(struct ntb_payload_header)); 892 893 CTR1(KTR_NTB, "RX: process_rxc rx_index = %u", qp->rx_index); 894 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); 895 if (entry == NULL) { 896 qp->rx_err_no_buf++; 897 CTR0(KTR_NTB, "RX: No entries in rx_pend_q"); 898 return (ENOMEM); 899 } 900 callout_stop(&qp->rx_full); 901 CTR1(KTR_NTB, "RX: rx entry %p from rx_pend_q", entry); 902 903 if ((hdr->flags & IF_NTB_DESC_DONE_FLAG) == 0) { 904 CTR1(KTR_NTB, 905 "RX: hdr not done. Returning entry %p to rx_pend_q", entry); 906 ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q); 907 qp->rx_ring_empty++; 908 return (EAGAIN); 909 } 910 911 if (hdr->ver != (uint32_t) qp->rx_pkts) { 912 CTR3(KTR_NTB,"RX: ver != rx_pkts (%x != %lx). " 913 "Returning entry %p to rx_pend_q", hdr->ver, qp->rx_pkts, 914 entry); 915 ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q); 916 qp->rx_err_ver++; 917 return (EIO); 918 } 919 920 if ((hdr->flags & IF_NTB_LINK_DOWN_FLAG) != 0) { 921 ntb_qp_link_down(qp); 922 CTR1(KTR_NTB, 923 "RX: link down. adding entry %p back to rx_pend_q", entry); 924 ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q); 925 goto out; 926 } 927 928 if (hdr->len <= entry->len) { 929 entry->len = hdr->len; 930 ntb_rx_copy_task(qp, entry, offset); 931 } else { 932 CTR1(KTR_NTB, 933 "RX: len too long. Returning entry %p to rx_pend_q", entry); 934 ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q); 935 936 qp->rx_err_oflow++; 937 } 938 939 qp->rx_bytes += hdr->len; 940 qp->rx_pkts++; 941 CTR1(KTR_NTB, "RX: received %ld rx_pkts", qp->rx_pkts); 942 943 944out: 945 /* Ensure that the data is globally visible before clearing the flag */ 946 wmb(); 947 hdr->flags = 0; 948 /* TODO: replace with bus_space_write */ 949 qp->rx_info->entry = qp->rx_index; 950 951 qp->rx_index++; 952 qp->rx_index %= qp->rx_max_entry; 953 954 return (0); 955} 956 957static void 958ntb_rx_copy_task(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry, 959 void *offset) 960{ 961 struct ifnet *ifp = entry->cb_data; 962 unsigned int len = entry->len; 963 struct mbuf *m; 964 965 CTR2(KTR_NTB, "RX: copying %d bytes from offset %p", len, offset); 966 m = m_devget(offset, len, 0, ifp, NULL); 967 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID; 968 969 entry->buf = (void *)m; 970 971 CTR2(KTR_NTB, 972 "RX: copied entry %p to mbuf %p. Adding entry to rx_free_q", entry, 973 m); 974 ntb_list_add(&qp->ntb_rx_free_q_lock, entry, &qp->rx_free_q); 975 976 taskqueue_enqueue(taskqueue_swi, &qp->rx_completion_task); 977} 978 979static void 980ntb_rx_completion_task(void *arg, int pending) 981{ 982 struct ntb_transport_qp *qp = arg; 983 struct mbuf *m; 984 struct ntb_queue_entry *entry; 985 986 CTR0(KTR_NTB, "RX: rx_completion_task"); 987 988 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) { 989 m = entry->buf; 990 CTR2(KTR_NTB, "RX: completing entry %p, mbuf %p", entry, m); 991 if (qp->rx_handler && qp->client_ready == NTB_LINK_UP) 992 qp->rx_handler(qp, qp->cb_data, m, entry->len); 993 994 entry->buf = NULL; 995 entry->len = qp->transport->bufsize; 996 997 CTR1(KTR_NTB,"RX: entry %p removed from rx_free_q " 998 "and added to rx_pend_q", entry); 999 ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q); 1000 if (qp->rx_err_no_buf > qp->last_rx_no_buf) { 1001 qp->last_rx_no_buf = qp->rx_err_no_buf; 1002 CTR0(KTR_NTB, "RX: could spawn rx task"); 1003 callout_reset(&qp->rx_full, hz / 1000, ntb_rx_pendq_full, 1004 qp); 1005 } 1006 } 1007} 1008 1009/* Link Event handler */ 1010static void 1011ntb_transport_event_callback(void *data, enum ntb_hw_event event) 1012{ 1013 struct ntb_netdev *nt = data; 1014 1015 switch (event) { 1016 case NTB_EVENT_HW_LINK_UP: 1017 if (bootverbose) 1018 device_printf(ntb_get_device(nt->ntb), "HW link up\n"); 1019 callout_reset(&nt->link_work, 0, ntb_transport_link_work, nt); 1020 break; 1021 case NTB_EVENT_HW_LINK_DOWN: 1022 if (bootverbose) 1023 device_printf(ntb_get_device(nt->ntb), "HW link down\n"); 1024 ntb_transport_link_cleanup(nt); 1025 break; 1026 default: 1027 panic("ntb: Unknown NTB event"); 1028 } 1029} 1030 1031/* Link bring up */ 1032static void 1033ntb_transport_link_work(void *arg) 1034{ 1035 struct ntb_netdev *nt = arg; 1036 struct ntb_softc *ntb = nt->ntb; 1037 struct ntb_transport_qp *qp; 1038 uint64_t val64; 1039 uint32_t val, i, num_mw; 1040 int rc; 1041 1042 if (ntb_has_feature(ntb, NTB_REGS_THRU_MW)) 1043 num_mw = NTB_NUM_MW - 1; 1044 else 1045 num_mw = NTB_NUM_MW; 1046 1047 /* send the local info, in the opposite order of the way we read it */ 1048 for (i = 0; i < num_mw; i++) { 1049 rc = ntb_write_remote_spad(ntb, IF_NTB_MW0_SZ_HIGH + (i * 2), 1050 (uint64_t)ntb_get_mw_size(ntb, i) >> 32); 1051 if (rc != 0) 1052 goto out; 1053 1054 rc = ntb_write_remote_spad(ntb, IF_NTB_MW0_SZ_LOW + (i * 2), 1055 (uint32_t)ntb_get_mw_size(ntb, i)); 1056 if (rc != 0) 1057 goto out; 1058 } 1059 1060 rc = ntb_write_remote_spad(ntb, IF_NTB_NUM_MWS, num_mw); 1061 if (rc != 0) 1062 goto out; 1063 1064 rc = ntb_write_remote_spad(ntb, IF_NTB_NUM_QPS, nt->max_qps); 1065 if (rc != 0) 1066 goto out; 1067 1068 rc = ntb_write_remote_spad(ntb, IF_NTB_VERSION, NTB_TRANSPORT_VERSION); 1069 if (rc != 0) 1070 goto out; 1071 1072 /* Query the remote side for its info */ 1073 rc = ntb_read_local_spad(ntb, IF_NTB_VERSION, &val); 1074 if (rc != 0) 1075 goto out; 1076 1077 if (val != NTB_TRANSPORT_VERSION) 1078 goto out; 1079 1080 rc = ntb_read_local_spad(ntb, IF_NTB_NUM_QPS, &val); 1081 if (rc != 0) 1082 goto out; 1083 1084 if (val != nt->max_qps) 1085 goto out; 1086 1087 rc = ntb_read_local_spad(ntb, IF_NTB_NUM_MWS, &val); 1088 if (rc != 0) 1089 goto out; 1090 1091 if (val != num_mw) 1092 goto out; 1093 1094 for (i = 0; i < num_mw; i++) { 1095 rc = ntb_read_local_spad(ntb, IF_NTB_MW0_SZ_HIGH + (i * 2), 1096 &val); 1097 if (rc != 0) 1098 goto free_mws; 1099 1100 val64 = (uint64_t)val << 32; 1101 1102 rc = ntb_read_local_spad(ntb, IF_NTB_MW0_SZ_LOW + (i * 2), 1103 &val); 1104 if (rc != 0) 1105 goto free_mws; 1106 1107 val64 |= val; 1108 1109 rc = ntb_set_mw(nt, i, val64); 1110 if (rc != 0) 1111 goto free_mws; 1112 } 1113 1114 nt->transport_link = NTB_LINK_UP; 1115 if (bootverbose) 1116 device_printf(ntb_get_device(ntb), "transport link up\n"); 1117 1118 for (i = 0; i < nt->max_qps; i++) { 1119 qp = &nt->qps[i]; 1120 1121 ntb_transport_setup_qp_mw(nt, i); 1122 1123 if (qp->client_ready == NTB_LINK_UP) 1124 callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp); 1125 } 1126 1127 return; 1128 1129free_mws: 1130 for (i = 0; i < NTB_NUM_MW; i++) 1131 ntb_free_mw(nt, i); 1132out: 1133 if (ntb_query_link_status(ntb)) 1134 callout_reset(&nt->link_work, 1135 NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_transport_link_work, nt); 1136} 1137 1138static int 1139ntb_set_mw(struct ntb_netdev *nt, int num_mw, unsigned int size) 1140{ 1141 struct ntb_transport_mw *mw = &nt->mw[num_mw]; 1142 1143 /* No need to re-setup */ 1144 if (mw->size == size) 1145 return (0); 1146 1147 if (mw->size != 0) 1148 ntb_free_mw(nt, num_mw); 1149 1150 /* Alloc memory for receiving data. Must be 4k aligned */ 1151 mw->size = size; 1152 1153 mw->virt_addr = contigmalloc(mw->size, M_NTB_IF, M_ZERO, 0, 1154 BUS_SPACE_MAXADDR, mw->size, 0); 1155 if (mw->virt_addr == NULL) { 1156 mw->size = 0; 1157 printf("ntb: Unable to allocate MW buffer of size %d\n", 1158 (int)mw->size); 1159 return (ENOMEM); 1160 } 1161 /* TODO: replace with bus_space_* functions */ 1162 mw->dma_addr = vtophys(mw->virt_addr); 1163 1164 /* Notify HW the memory location of the receive buffer */ 1165 ntb_set_mw_addr(nt->ntb, num_mw, mw->dma_addr); 1166 1167 return (0); 1168} 1169 1170static void 1171ntb_free_mw(struct ntb_netdev *nt, int num_mw) 1172{ 1173 struct ntb_transport_mw *mw = &nt->mw[num_mw]; 1174 1175 if (mw->virt_addr == NULL) 1176 return; 1177 1178 contigfree(mw->virt_addr, mw->size, M_NTB_IF); 1179 mw->virt_addr = NULL; 1180} 1181 1182static void 1183ntb_transport_setup_qp_mw(struct ntb_netdev *nt, unsigned int qp_num) 1184{ 1185 struct ntb_transport_qp *qp = &nt->qps[qp_num]; 1186 void *offset; 1187 unsigned int rx_size, num_qps_mw; 1188 uint8_t mw_num = QP_TO_MW(qp_num); 1189 unsigned int i; 1190 1191 if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW) 1192 num_qps_mw = nt->max_qps / NTB_NUM_MW + 1; 1193 else 1194 num_qps_mw = nt->max_qps / NTB_NUM_MW; 1195 1196 rx_size = (unsigned int) nt->mw[mw_num].size / num_qps_mw; 1197 qp->remote_rx_info = (void *)((uint8_t *)nt->mw[mw_num].virt_addr + 1198 (qp_num / NTB_NUM_MW * rx_size)); 1199 rx_size -= sizeof(struct ntb_rx_info); 1200 1201 qp->rx_buff = qp->remote_rx_info + 1; 1202 /* Due to house-keeping, there must be at least 2 buffs */ 1203 qp->rx_max_frame = min(transport_mtu + sizeof(struct ntb_payload_header), 1204 rx_size / 2); 1205 qp->rx_max_entry = rx_size / qp->rx_max_frame; 1206 qp->rx_index = 0; 1207 1208 qp->remote_rx_info->entry = qp->rx_max_entry - 1; 1209 1210 /* setup the hdr offsets with 0's */ 1211 for (i = 0; i < qp->rx_max_entry; i++) { 1212 offset = (void *)((uint8_t *)qp->rx_buff + 1213 qp->rx_max_frame * (i + 1) - 1214 sizeof(struct ntb_payload_header)); 1215 memset(offset, 0, sizeof(struct ntb_payload_header)); 1216 } 1217 1218 qp->rx_pkts = 0; 1219 qp->tx_pkts = 0; 1220 qp->tx_index = 0; 1221} 1222 1223static void 1224ntb_qp_link_work(void *arg) 1225{ 1226 struct ntb_transport_qp *qp = arg; 1227 struct ntb_softc *ntb = qp->ntb; 1228 struct ntb_netdev *nt = qp->transport; 1229 int rc, val; 1230 1231 1232 rc = ntb_read_remote_spad(ntb, IF_NTB_QP_LINKS, &val); 1233 if (rc != 0) 1234 return; 1235 1236 rc = ntb_write_remote_spad(ntb, IF_NTB_QP_LINKS, val | 1 << qp->qp_num); 1237 1238 /* query remote spad for qp ready bits */ 1239 rc = ntb_read_local_spad(ntb, IF_NTB_QP_LINKS, &val); 1240 1241 /* See if the remote side is up */ 1242 if ((1 << qp->qp_num & val) != 0) { 1243 qp->qp_link = NTB_LINK_UP; 1244 if (qp->event_handler != NULL) 1245 qp->event_handler(qp->cb_data, NTB_LINK_UP); 1246 if (bootverbose) 1247 device_printf(ntb_get_device(ntb), "qp link up\n"); 1248 } else if (nt->transport_link == NTB_LINK_UP) { 1249 callout_reset(&qp->link_work, 1250 NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_qp_link_work, qp); 1251 } 1252} 1253 1254/* Link down event*/ 1255static void 1256ntb_transport_link_cleanup(struct ntb_netdev *nt) 1257{ 1258 int i; 1259 1260 /* Pass along the info to any clients */ 1261 for (i = 0; i < nt->max_qps; i++) 1262 if (!test_bit(i, &nt->qp_bitmap)) 1263 ntb_qp_link_down(&nt->qps[i]); 1264 1265 if (nt->transport_link == NTB_LINK_DOWN) 1266 callout_drain(&nt->link_work); 1267 else 1268 nt->transport_link = NTB_LINK_DOWN; 1269 1270 /* 1271 * The scratchpad registers keep the values if the remote side 1272 * goes down, blast them now to give them a sane value the next 1273 * time they are accessed 1274 */ 1275 for (i = 0; i < IF_NTB_MAX_SPAD; i++) 1276 ntb_write_local_spad(nt->ntb, i, 0); 1277} 1278 1279 1280static void 1281ntb_qp_link_down(struct ntb_transport_qp *qp) 1282{ 1283 1284 ntb_qp_link_cleanup(qp); 1285} 1286 1287static void 1288ntb_qp_link_cleanup(struct ntb_transport_qp *qp) 1289{ 1290 struct ntb_netdev *nt = qp->transport; 1291 1292 if (qp->qp_link == NTB_LINK_DOWN) { 1293 callout_drain(&qp->link_work); 1294 return; 1295 } 1296 1297 if (qp->event_handler != NULL) 1298 qp->event_handler(qp->cb_data, NTB_LINK_DOWN); 1299 1300 qp->qp_link = NTB_LINK_DOWN; 1301 1302 if (nt->transport_link == NTB_LINK_UP) 1303 callout_reset(&qp->link_work, 1304 NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_qp_link_work, qp); 1305} 1306 1307/* Link commanded down */ 1308/** 1309 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data 1310 * @qp: NTB transport layer queue to be disabled 1311 * 1312 * Notify NTB transport layer of client's desire to no longer receive data on 1313 * transport queue specified. It is the client's responsibility to ensure all 1314 * entries on queue are purged or otherwise handled appropriately. 1315 */ 1316static void 1317ntb_transport_link_down(struct ntb_transport_qp *qp) 1318{ 1319 int rc, val; 1320 1321 if (qp == NULL) 1322 return; 1323 1324 qp->client_ready = NTB_LINK_DOWN; 1325 1326 rc = ntb_read_remote_spad(qp->ntb, IF_NTB_QP_LINKS, &val); 1327 if (rc != 0) 1328 return; 1329 1330 rc = ntb_write_remote_spad(qp->ntb, IF_NTB_QP_LINKS, 1331 val & ~(1 << qp->qp_num)); 1332 1333 if (qp->qp_link == NTB_LINK_UP) 1334 ntb_send_link_down(qp); 1335 else 1336 callout_drain(&qp->link_work); 1337 1338} 1339 1340static void 1341ntb_send_link_down(struct ntb_transport_qp *qp) 1342{ 1343 struct ntb_queue_entry *entry; 1344 int i, rc; 1345 1346 if (qp->qp_link == NTB_LINK_DOWN) 1347 return; 1348 1349 qp->qp_link = NTB_LINK_DOWN; 1350 1351 for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) { 1352 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); 1353 if (entry != NULL) 1354 break; 1355 pause("NTB Wait for link down", hz / 10); 1356 } 1357 1358 if (entry == NULL) 1359 return; 1360 1361 entry->cb_data = NULL; 1362 entry->buf = NULL; 1363 entry->len = 0; 1364 entry->flags = IF_NTB_LINK_DOWN_FLAG; 1365 1366 mtx_lock(&qp->transport->tx_lock); 1367 rc = ntb_process_tx(qp, entry); 1368 if (rc != 0) 1369 printf("ntb: Failed to send link down\n"); 1370 mtx_unlock(&qp->transport->tx_lock); 1371} 1372 1373 1374/* List Management */ 1375 1376static void 1377ntb_list_add(struct mtx *lock, struct ntb_queue_entry *entry, 1378 struct ntb_queue_list *list) 1379{ 1380 1381 mtx_lock_spin(lock); 1382 STAILQ_INSERT_TAIL(list, entry, entry); 1383 mtx_unlock_spin(lock); 1384} 1385 1386static struct ntb_queue_entry * 1387ntb_list_rm(struct mtx *lock, struct ntb_queue_list *list) 1388{ 1389 struct ntb_queue_entry *entry; 1390 1391 mtx_lock_spin(lock); 1392 if (STAILQ_EMPTY(list)) { 1393 entry = NULL; 1394 goto out; 1395 } 1396 entry = STAILQ_FIRST(list); 1397 STAILQ_REMOVE_HEAD(list, entry); 1398out: 1399 mtx_unlock_spin(lock); 1400 1401 return (entry); 1402} 1403 1404/* Helper functions */ 1405/* TODO: This too should really be part of the kernel */ 1406#define EUI48_MULTICAST 1 << 0 1407#define EUI48_LOCALLY_ADMINISTERED 1 << 1 1408static void 1409create_random_local_eui48(u_char *eaddr) 1410{ 1411 static uint8_t counter = 0; 1412 uint32_t seed = ticks; 1413 1414 eaddr[0] = EUI48_LOCALLY_ADMINISTERED; 1415 memcpy(&eaddr[1], &seed, sizeof(uint32_t)); 1416 eaddr[5] = counter++; 1417} 1418 1419/** 1420 * ntb_transport_max_size - Query the max payload size of a qp 1421 * @qp: NTB transport layer queue to be queried 1422 * 1423 * Query the maximum payload size permissible on the given qp 1424 * 1425 * RETURNS: the max payload size of a qp 1426 */ 1427static unsigned int 1428ntb_transport_max_size(struct ntb_transport_qp *qp) 1429{ 1430 1431 if (qp == NULL) 1432 return (0); 1433 1434 return (qp->tx_max_frame - sizeof(struct ntb_payload_header)); 1435} 1436