ntb_transport.c revision 255271
1/*- 2 * Copyright (C) 2013 Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/dev/ntb/if_ntb/if_ntb.c 255271 2013-09-05 22:56:52Z carl $"); 29 30#include <sys/param.h> 31#include <sys/kernel.h> 32#include <sys/systm.h> 33#include <sys/bus.h> 34#include <sys/ktr.h> 35#include <sys/lock.h> 36#include <sys/malloc.h> 37#include <sys/module.h> 38#include <sys/mutex.h> 39#include <sys/queue.h> 40#include <sys/socket.h> 41#include <sys/sockio.h> 42#include <sys/taskqueue.h> 43#include <net/if.h> 44#include <net/if_media.h> 45#include <net/if_types.h> 46#include <net/if_var.h> 47#include <net/bpf.h> 48#include <net/ethernet.h> 49#include <vm/vm.h> 50#include <vm/pmap.h> 51#include <machine/bus.h> 52#include <machine/cpufunc.h> 53#include <machine/pmap.h> 54 55#include "../ntb_hw/ntb_hw.h" 56 57/* 58 * The Non-Transparent Bridge (NTB) is a device on some Intel processors that 59 * allows you to connect two systems using a PCI-e link. 60 * 61 * This module contains a protocol for sending and receiving messages, and 62 * exposes that protocol through a simulated ethernet device called ntb. 63 * 64 * NOTE: Much of the code in this module is shared with Linux. Any patches may 65 * be picked up and redistributed in Linux with a dual GPL/BSD license. 66 */ 67 68/* TODO: These functions should really be part of the kernel */ 69#define test_bit(pos, bitmap_addr) (*(bitmap_addr) & 1UL << (pos)) 70#define set_bit(pos, bitmap_addr) *(bitmap_addr) |= 1UL << (pos) 71#define clear_bit(pos, bitmap_addr) *(bitmap_addr) &= ~(1UL << (pos)) 72 73#define KTR_NTB KTR_SPARE3 74 75#define NTB_TRANSPORT_VERSION 3 76#define NTB_RX_MAX_PKTS 64 77#define NTB_RXQ_SIZE 300 78 79static unsigned int transport_mtu = 0x4000 + ETHER_HDR_LEN + ETHER_CRC_LEN; 80static unsigned int max_num_clients = 1; 81 82STAILQ_HEAD(ntb_queue_list, ntb_queue_entry); 83 84struct ntb_queue_entry { 85 /* ntb_queue list reference */ 86 STAILQ_ENTRY(ntb_queue_entry) entry; 87 88 /* info on data to be transfered */ 89 void *cb_data; 90 void *buf; 91 uint64_t len; 92 uint64_t flags; 93}; 94 95struct ntb_rx_info { 96 unsigned int entry; 97}; 98 99struct ntb_transport_qp { 100 struct ntb_netdev *transport; 101 struct ntb_softc *ntb; 102 103 void *cb_data; 104 105 bool client_ready; 106 bool qp_link; 107 uint8_t qp_num; /* Only 64 QP's are allowed. 0-63 */ 108 109 struct ntb_rx_info *rx_info; 110 struct ntb_rx_info *remote_rx_info; 111 112 void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data, 113 void *data, int len); 114 struct ntb_queue_list tx_free_q; 115 struct mtx ntb_tx_free_q_lock; 116 void *tx_mw; 117 uint64_t tx_index; 118 uint64_t tx_max_entry; 119 uint64_t tx_max_frame; 120 121 void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data, 122 void *data, int len); 123 struct ntb_queue_list rx_pend_q; 124 struct ntb_queue_list rx_free_q; 125 struct mtx ntb_rx_pend_q_lock; 126 struct mtx ntb_rx_free_q_lock; 127 struct task rx_completion_task; 128 void *rx_buff; 129 uint64_t rx_index; 130 uint64_t rx_max_entry; 131 uint64_t rx_max_frame; 132 133 void (*event_handler) (void *data, int status); 134 struct callout link_work; 135 struct callout queue_full; 136 struct callout rx_full; 137 138 uint64_t last_rx_no_buf; 139 140 /* Stats */ 141 uint64_t rx_bytes; 142 uint64_t rx_pkts; 143 uint64_t rx_ring_empty; 144 uint64_t rx_err_no_buf; 145 uint64_t rx_err_oflow; 146 uint64_t rx_err_ver; 147 uint64_t tx_bytes; 148 uint64_t tx_pkts; 149 uint64_t tx_ring_full; 150}; 151 152struct ntb_queue_handlers { 153 void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data, 154 void *data, int len); 155 void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data, 156 void *data, int len); 157 void (*event_handler) (void *data, int status); 158}; 159 160 161struct ntb_transport_mw { 162 size_t size; 163 void *virt_addr; 164 vm_paddr_t dma_addr; 165}; 166 167struct ntb_netdev { 168 struct ntb_softc *ntb; 169 struct ifnet *ifp; 170 struct ntb_transport_mw mw[NTB_NUM_MW]; 171 struct ntb_transport_qp *qps; 172 uint64_t max_qps; 173 uint64_t qp_bitmap; 174 bool transport_link; 175 struct callout link_work; 176 struct ntb_transport_qp *qp; 177 uint64_t bufsize; 178 u_char eaddr[ETHER_ADDR_LEN]; 179 struct mtx tx_lock; 180 struct mtx rx_lock; 181}; 182 183static struct ntb_netdev net_softc; 184 185enum { 186 IF_NTB_DESC_DONE_FLAG = 1 << 0, 187 IF_NTB_LINK_DOWN_FLAG = 1 << 1, 188}; 189 190struct ntb_payload_header { 191 uint64_t ver; 192 uint64_t len; 193 uint64_t flags; 194}; 195 196enum { 197 IF_NTB_VERSION = 0, 198 IF_NTB_MW0_SZ, 199 IF_NTB_MW1_SZ, 200 IF_NTB_NUM_QPS, 201 IF_NTB_QP_LINKS, 202 IF_NTB_MAX_SPAD, 203}; 204 205#define QP_TO_MW(qp) ((qp) % NTB_NUM_MW) 206#define NTB_QP_DEF_NUM_ENTRIES 100 207#define NTB_LINK_DOWN_TIMEOUT 10 208 209static int ntb_handle_module_events(struct module *m, int what, void *arg); 210static int ntb_setup_interface(void); 211static int ntb_teardown_interface(void); 212static void ntb_net_init(void *arg); 213static int ntb_ioctl(struct ifnet *ifp, u_long command, caddr_t data); 214static void ntb_start(struct ifnet *ifp); 215static void ntb_net_tx_handler(struct ntb_transport_qp *qp, void *qp_data, 216 void *data, int len); 217static void ntb_net_rx_handler(struct ntb_transport_qp *qp, void *qp_data, 218 void *data, int len); 219static void ntb_net_event_handler(void *data, int status); 220static int ntb_transport_init(struct ntb_softc *ntb); 221static void ntb_transport_free(void *transport); 222static void ntb_transport_init_queue(struct ntb_netdev *nt, 223 unsigned int qp_num); 224static void ntb_transport_free_queue(struct ntb_transport_qp *qp); 225static struct ntb_transport_qp * ntb_transport_create_queue(void *data, 226 struct ntb_softc *pdev, const struct ntb_queue_handlers *handlers); 227static void ntb_transport_link_up(struct ntb_transport_qp *qp); 228static int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, 229 void *data, unsigned int len); 230static int ntb_process_tx(struct ntb_transport_qp *qp, 231 struct ntb_queue_entry *entry); 232static void ntb_tx_copy_task(struct ntb_transport_qp *qp, 233 struct ntb_queue_entry *entry, void *offset); 234static void ntb_qp_full(void *arg); 235static void ntb_transport_rxc_db(void *data, int db_num); 236static void ntb_rx_pendq_full(void *arg); 237static void ntb_transport_rx(struct ntb_transport_qp *qp); 238static int ntb_process_rxc(struct ntb_transport_qp *qp); 239static void ntb_rx_copy_task(struct ntb_transport_qp *qp, 240 struct ntb_queue_entry *entry, void *offset); 241static void ntb_rx_completion_task(void *arg, int pending); 242static void ntb_transport_event_callback(void *data, enum ntb_hw_event event); 243static void ntb_transport_link_work(void *arg); 244static int ntb_set_mw(struct ntb_netdev *nt, int num_mw, unsigned int size); 245static void ntb_transport_setup_qp_mw(struct ntb_netdev *nt, 246 unsigned int qp_num); 247static void ntb_qp_link_work(void *arg); 248static void ntb_transport_link_cleanup(struct ntb_netdev *nt); 249static void ntb_qp_link_down(struct ntb_transport_qp *qp); 250static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp); 251static void ntb_transport_link_down(struct ntb_transport_qp *qp); 252static void ntb_send_link_down(struct ntb_transport_qp *qp); 253static void ntb_list_add(struct mtx *lock, struct ntb_queue_entry *entry, 254 struct ntb_queue_list *list); 255static struct ntb_queue_entry *ntb_list_rm(struct mtx *lock, 256 struct ntb_queue_list *list); 257static void create_random_local_eui48(u_char *eaddr); 258static unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp); 259 260MALLOC_DEFINE(M_NTB_IF, "if_ntb", "ntb network driver"); 261 262/* Module setup and teardown */ 263static int 264ntb_handle_module_events(struct module *m, int what, void *arg) 265{ 266 int err = 0; 267 268 switch (what) { 269 case MOD_LOAD: 270 err = ntb_setup_interface(); 271 break; 272 case MOD_UNLOAD: 273 err = ntb_teardown_interface(); 274 break; 275 default: 276 err = EOPNOTSUPP; 277 break; 278 } 279 return (err); 280} 281 282static moduledata_t if_ntb_mod = { 283 "if_ntb", 284 ntb_handle_module_events, 285 NULL 286}; 287 288DECLARE_MODULE(if_ntb, if_ntb_mod, SI_SUB_KLD, SI_ORDER_ANY); 289MODULE_DEPEND(if_ntb, ntb_hw, 1, 1, 1); 290 291static int 292ntb_setup_interface() 293{ 294 struct ifnet *ifp; 295 struct ntb_queue_handlers handlers = { ntb_net_rx_handler, 296 ntb_net_tx_handler, ntb_net_event_handler }; 297 298 net_softc.ntb = devclass_get_softc(devclass_find("ntb_hw"), 0); 299 if (net_softc.ntb == NULL) { 300 printf("ntb: Can't find devclass\n"); 301 return (ENXIO); 302 } 303 304 ntb_transport_init(net_softc.ntb); 305 306 ifp = net_softc.ifp = if_alloc(IFT_ETHER); 307 if (ifp == NULL) { 308 printf("ntb: cannot allocate ifnet structure\n"); 309 return (ENOMEM); 310 } 311 312 net_softc.qp = ntb_transport_create_queue(ifp, net_softc.ntb, 313 &handlers); 314 if_initname(ifp, "ntb", 0); 315 ifp->if_init = ntb_net_init; 316 ifp->if_softc = &net_softc; 317 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX; 318 ifp->if_ioctl = ntb_ioctl; 319 ifp->if_start = ntb_start; 320 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); 321 ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; 322 IFQ_SET_READY(&ifp->if_snd); 323 create_random_local_eui48(net_softc.eaddr); 324 ether_ifattach(ifp, net_softc.eaddr); 325 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_JUMBO_MTU; 326 ifp->if_capenable = ifp->if_capabilities; 327 328 ntb_transport_link_up(net_softc.qp); 329 net_softc.bufsize = ntb_transport_max_size(net_softc.qp) + 330 sizeof(struct ether_header); 331 return (0); 332} 333 334static int 335ntb_teardown_interface() 336{ 337 struct ifnet *ifp = net_softc.ifp; 338 339 ntb_transport_link_down(net_softc.qp); 340 341 ether_ifdetach(ifp); 342 if_free(ifp); 343 ntb_transport_free_queue(net_softc.qp); 344 ntb_transport_free(&net_softc); 345 346 return (0); 347} 348 349/* Network device interface */ 350 351static void 352ntb_net_init(void *arg) 353{ 354 struct ntb_netdev *ntb_softc = arg; 355 struct ifnet *ifp = ntb_softc->ifp; 356 357 ifp->if_drv_flags |= IFF_DRV_RUNNING; 358 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 359 ifp->if_flags |= IFF_UP; 360 if_link_state_change(ifp, LINK_STATE_UP); 361} 362 363static int 364ntb_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 365{ 366 struct ntb_netdev *nt = ifp->if_softc; 367 struct ifreq *ifr = (struct ifreq *)data; 368 int error = 0; 369 370 switch (command) { 371 case SIOCSIFMTU: 372 { 373 if (ifr->ifr_mtu > ntb_transport_max_size(nt->qp) - 374 ETHER_HDR_LEN - ETHER_CRC_LEN) { 375 error = EINVAL; 376 break; 377 } 378 379 ifp->if_mtu = ifr->ifr_mtu; 380 break; 381 } 382 default: 383 error = ether_ioctl(ifp, command, data); 384 break; 385 } 386 387 return (error); 388} 389 390 391static void 392ntb_start(struct ifnet *ifp) 393{ 394 struct mbuf *m_head; 395 struct ntb_netdev *nt = ifp->if_softc; 396 int rc; 397 398 mtx_lock(&nt->tx_lock); 399 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 400 CTR0(KTR_NTB, "TX: ntb_start"); 401 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 402 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 403 CTR1(KTR_NTB, "TX: start mbuf %p", m_head); 404 rc = ntb_transport_tx_enqueue(nt->qp, m_head, m_head, 405 m_length(m_head, NULL)); 406 if (rc != 0) { 407 CTR1(KTR_NTB, 408 "TX: couldn't tx mbuf %p. Returning to snd q", 409 m_head); 410 if (rc == EAGAIN) { 411 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 412 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 413 callout_reset(&nt->qp->queue_full, hz / 1000, 414 ntb_qp_full, ifp); 415 } 416 break; 417 } 418 419 } 420 mtx_unlock(&nt->tx_lock); 421} 422 423/* Network Device Callbacks */ 424static void 425ntb_net_tx_handler(struct ntb_transport_qp *qp, void *qp_data, void *data, 426 int len) 427{ 428 429 m_freem(data); 430 CTR1(KTR_NTB, "TX: tx_handler freeing mbuf %p", data); 431} 432 433static void 434ntb_net_rx_handler(struct ntb_transport_qp *qp, void *qp_data, void *data, 435 int len) 436{ 437 struct mbuf *m = data; 438 struct ifnet *ifp = qp_data; 439 440 CTR0(KTR_NTB, "RX: rx handler"); 441 (*ifp->if_input)(ifp, m); 442} 443 444static void 445ntb_net_event_handler(void *data, int status) 446{ 447 448} 449 450/* Transport Init and teardown */ 451 452static int 453ntb_transport_init(struct ntb_softc *ntb) 454{ 455 struct ntb_netdev *nt = &net_softc; 456 int rc, i; 457 458 nt->max_qps = max_num_clients; 459 ntb_register_transport(ntb, nt); 460 mtx_init(&nt->tx_lock, "ntb transport tx", NULL, MTX_DEF); 461 mtx_init(&nt->rx_lock, "ntb transport rx", NULL, MTX_DEF); 462 463 nt->qps = malloc(nt->max_qps * sizeof(struct ntb_transport_qp), 464 M_NTB_IF, M_WAITOK|M_ZERO); 465 466 nt->qp_bitmap = ((uint64_t) 1 << nt->max_qps) - 1; 467 468 for (i = 0; i < nt->max_qps; i++) 469 ntb_transport_init_queue(nt, i); 470 471 callout_init(&nt->link_work, 0); 472 473 rc = ntb_register_event_callback(ntb, 474 ntb_transport_event_callback); 475 if (rc != 0) 476 goto err; 477 478 if (ntb_query_link_status(ntb)) { 479 if (bootverbose) 480 device_printf(ntb_get_device(ntb), "link up\n"); 481 callout_reset(&nt->link_work, 0, ntb_transport_link_work, nt); 482 } 483 484 return (0); 485 486err: 487 free(nt->qps, M_NTB_IF); 488 ntb_unregister_transport(ntb); 489 return (rc); 490} 491 492static void 493ntb_transport_free(void *transport) 494{ 495 struct ntb_netdev *nt = transport; 496 struct ntb_softc *ntb = nt->ntb; 497 int i; 498 499 nt->transport_link = NTB_LINK_DOWN; 500 501 callout_drain(&nt->link_work); 502 503 /* verify that all the qp's are freed */ 504 for (i = 0; i < nt->max_qps; i++) 505 if (!test_bit(i, &nt->qp_bitmap)) 506 ntb_transport_free_queue(&nt->qps[i]); 507 508 509 ntb_unregister_event_callback(ntb); 510 511 for (i = 0; i < NTB_NUM_MW; i++) 512 if (nt->mw[i].virt_addr != NULL) 513 contigfree(nt->mw[i].virt_addr, nt->mw[i].size, 514 M_NTB_IF); 515 516 free(nt->qps, M_NTB_IF); 517 ntb_unregister_transport(ntb); 518} 519 520static void 521ntb_transport_init_queue(struct ntb_netdev *nt, unsigned int qp_num) 522{ 523 struct ntb_transport_qp *qp; 524 unsigned int num_qps_mw, tx_size; 525 uint8_t mw_num = QP_TO_MW(qp_num); 526 527 qp = &nt->qps[qp_num]; 528 qp->qp_num = qp_num; 529 qp->transport = nt; 530 qp->ntb = nt->ntb; 531 qp->qp_link = NTB_LINK_DOWN; 532 qp->client_ready = NTB_LINK_DOWN; 533 qp->event_handler = NULL; 534 535 if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW) 536 num_qps_mw = nt->max_qps / NTB_NUM_MW + 1; 537 else 538 num_qps_mw = nt->max_qps / NTB_NUM_MW; 539 540 tx_size = (unsigned int) ntb_get_mw_size(qp->ntb, mw_num) / num_qps_mw; 541 qp->rx_info = (struct ntb_rx_info *) 542 ((char *)ntb_get_mw_vbase(qp->ntb, mw_num) + 543 (qp_num / NTB_NUM_MW * tx_size)); 544 tx_size -= sizeof(struct ntb_rx_info); 545 546 qp->tx_mw = qp->rx_info + sizeof(struct ntb_rx_info); 547 qp->tx_max_frame = min(transport_mtu + sizeof(struct ntb_payload_header), 548 tx_size); 549 qp->tx_max_entry = tx_size / qp->tx_max_frame; 550 qp->tx_index = 0; 551 552 callout_init(&qp->link_work, 0); 553 callout_init(&qp->queue_full, CALLOUT_MPSAFE); 554 callout_init(&qp->rx_full, CALLOUT_MPSAFE); 555 556 mtx_init(&qp->ntb_rx_pend_q_lock, "ntb rx pend q", NULL, MTX_SPIN); 557 mtx_init(&qp->ntb_rx_free_q_lock, "ntb rx free q", NULL, MTX_SPIN); 558 mtx_init(&qp->ntb_tx_free_q_lock, "ntb tx free q", NULL, MTX_SPIN); 559 TASK_INIT(&qp->rx_completion_task, 0, ntb_rx_completion_task, qp); 560 561 STAILQ_INIT(&qp->rx_pend_q); 562 STAILQ_INIT(&qp->rx_free_q); 563 STAILQ_INIT(&qp->tx_free_q); 564} 565 566static void 567ntb_transport_free_queue(struct ntb_transport_qp *qp) 568{ 569 struct ntb_queue_entry *entry; 570 571 if (qp == NULL) 572 return; 573 574 callout_drain(&qp->link_work); 575 576 ntb_unregister_db_callback(qp->ntb, qp->qp_num); 577 578 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) 579 free(entry, M_NTB_IF); 580 581 while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) 582 free(entry, M_NTB_IF); 583 584 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 585 free(entry, M_NTB_IF); 586 587 set_bit(qp->qp_num, &qp->transport->qp_bitmap); 588} 589 590/** 591 * ntb_transport_create_queue - Create a new NTB transport layer queue 592 * @rx_handler: receive callback function 593 * @tx_handler: transmit callback function 594 * @event_handler: event callback function 595 * 596 * Create a new NTB transport layer queue and provide the queue with a callback 597 * routine for both transmit and receive. The receive callback routine will be 598 * used to pass up data when the transport has received it on the queue. The 599 * transmit callback routine will be called when the transport has completed the 600 * transmission of the data on the queue and the data is ready to be freed. 601 * 602 * RETURNS: pointer to newly created ntb_queue, NULL on error. 603 */ 604static struct ntb_transport_qp * 605ntb_transport_create_queue(void *data, struct ntb_softc *pdev, 606 const struct ntb_queue_handlers *handlers) 607{ 608 struct ntb_queue_entry *entry; 609 struct ntb_transport_qp *qp; 610 struct ntb_netdev *nt; 611 unsigned int free_queue; 612 int rc, i; 613 614 nt = ntb_find_transport(pdev); 615 if (nt == NULL) 616 goto err; 617 618 free_queue = ffs(nt->qp_bitmap); 619 if (free_queue == 0) 620 goto err; 621 622 /* decrement free_queue to make it zero based */ 623 free_queue--; 624 625 clear_bit(free_queue, &nt->qp_bitmap); 626 627 qp = &nt->qps[free_queue]; 628 qp->cb_data = data; 629 qp->rx_handler = handlers->rx_handler; 630 qp->tx_handler = handlers->tx_handler; 631 qp->event_handler = handlers->event_handler; 632 633 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 634 entry = malloc(sizeof(struct ntb_queue_entry), M_NTB_IF, 635 M_WAITOK|M_ZERO); 636 entry->cb_data = nt->ifp; 637 entry->buf = NULL; 638 entry->len = transport_mtu; 639 ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q); 640 } 641 642 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 643 entry = malloc(sizeof(struct ntb_queue_entry), M_NTB_IF, 644 M_WAITOK|M_ZERO); 645 ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); 646 } 647 648 rc = ntb_register_db_callback(qp->ntb, free_queue, qp, 649 ntb_transport_rxc_db); 650 if (rc != 0) 651 goto err1; 652 653 return (qp); 654 655err1: 656 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 657 free(entry, M_NTB_IF); 658 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) 659 free(entry, M_NTB_IF); 660 set_bit(free_queue, &nt->qp_bitmap); 661err: 662 return (NULL); 663} 664 665/** 666 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue 667 * @qp: NTB transport layer queue to be enabled 668 * 669 * Notify NTB transport layer of client readiness to use queue 670 */ 671static void 672ntb_transport_link_up(struct ntb_transport_qp *qp) 673{ 674 675 if (qp == NULL) 676 return; 677 678 qp->client_ready = NTB_LINK_UP; 679 if (bootverbose) 680 device_printf(ntb_get_device(qp->ntb), "qp client ready\n"); 681 682 if (qp->transport->transport_link == NTB_LINK_UP) 683 callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp); 684} 685 686 687 688/* Transport Tx */ 689 690/** 691 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry 692 * @qp: NTB transport layer queue the entry is to be enqueued on 693 * @cb: per buffer pointer for callback function to use 694 * @data: pointer to data buffer that will be sent 695 * @len: length of the data buffer 696 * 697 * Enqueue a new transmit buffer onto the transport queue from which a NTB 698 * payload will be transmitted. This assumes that a lock is behing held to 699 * serialize access to the qp. 700 * 701 * RETURNS: An appropriate ERRNO error value on error, or zero for success. 702 */ 703static int 704ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, 705 unsigned int len) 706{ 707 struct ntb_queue_entry *entry; 708 int rc; 709 710 if (qp == NULL || qp->qp_link != NTB_LINK_UP || len == 0) { 711 CTR0(KTR_NTB, "TX: link not up"); 712 return (EINVAL); 713 } 714 715 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); 716 if (entry == NULL) { 717 CTR0(KTR_NTB, "TX: couldn't get entry from tx_free_q"); 718 return (ENOMEM); 719 } 720 CTR1(KTR_NTB, "TX: got entry %p from tx_free_q", entry); 721 722 entry->cb_data = cb; 723 entry->buf = data; 724 entry->len = len; 725 entry->flags = 0; 726 727 rc = ntb_process_tx(qp, entry); 728 if (rc != 0) { 729 ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); 730 CTR1(KTR_NTB, 731 "TX: process_tx failed. Returning entry %p to tx_free_q", 732 entry); 733 } 734 return (rc); 735} 736 737static int 738ntb_process_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry) 739{ 740 void *offset; 741 742 offset = (char *)qp->tx_mw + qp->tx_max_frame * qp->tx_index; 743 CTR3(KTR_NTB, 744 "TX: process_tx: tx_pkts=%u, tx_index=%u, remote entry=%u", 745 qp->tx_pkts, qp->tx_index, qp->remote_rx_info->entry); 746 if (qp->tx_index == qp->remote_rx_info->entry) { 747 CTR0(KTR_NTB, "TX: ring full"); 748 qp->tx_ring_full++; 749 return (EAGAIN); 750 } 751 752 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) { 753 if (qp->tx_handler != NULL) 754 qp->tx_handler(qp, qp->cb_data, entry->buf, 755 EIO); 756 757 ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); 758 CTR1(KTR_NTB, 759 "TX: frame too big. returning entry %p to tx_free_q", 760 entry); 761 return (0); 762 } 763 CTR2(KTR_NTB, "TX: copying entry %p to offset %p", entry, offset); 764 ntb_tx_copy_task(qp, entry, offset); 765 766 qp->tx_index++; 767 qp->tx_index %= qp->tx_max_entry; 768 769 qp->tx_pkts++; 770 771 return (0); 772} 773 774static void 775ntb_tx_copy_task(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry, 776 void *offset) 777{ 778 struct ntb_payload_header *hdr; 779 780 CTR2(KTR_NTB, "TX: copying %d bytes to offset %p", entry->len, offset); 781 if (entry->buf != NULL) 782 m_copydata((struct mbuf *)entry->buf, 0, entry->len, offset); 783 784 hdr = (struct ntb_payload_header *)((char *)offset + qp->tx_max_frame - 785 sizeof(struct ntb_payload_header)); 786 hdr->len = entry->len; /* TODO: replace with bus_space_write */ 787 hdr->ver = qp->tx_pkts; /* TODO: replace with bus_space_write */ 788 wmb(); 789 /* TODO: replace with bus_space_write */ 790 hdr->flags = entry->flags | IF_NTB_DESC_DONE_FLAG; 791 792 ntb_ring_sdb(qp->ntb, qp->qp_num); 793 794 /* 795 * The entry length can only be zero if the packet is intended to be a 796 * "link down" or similar. Since no payload is being sent in these 797 * cases, there is nothing to add to the completion queue. 798 */ 799 if (entry->len > 0) { 800 qp->tx_bytes += entry->len; 801 802 if (qp->tx_handler) 803 qp->tx_handler(qp, qp->cb_data, entry->cb_data, 804 entry->len); 805 } 806 807 CTR2(KTR_NTB, 808 "TX: entry %p sent. hdr->ver = %d, Returning to tx_free_q", entry, 809 hdr->ver); 810 ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); 811} 812 813static void 814ntb_qp_full(void *arg) 815{ 816 817 CTR0(KTR_NTB, "TX: qp_full callout"); 818 ntb_start(arg); 819} 820 821/* Transport Rx */ 822static void 823ntb_transport_rxc_db(void *data, int db_num) 824{ 825 struct ntb_transport_qp *qp = data; 826 827 ntb_transport_rx(qp); 828} 829 830static void 831ntb_rx_pendq_full(void *arg) 832{ 833 834 CTR0(KTR_NTB, "RX: ntb_rx_pendq_full callout"); 835 ntb_transport_rx(arg); 836} 837 838static void 839ntb_transport_rx(struct ntb_transport_qp *qp) 840{ 841 int rc, i; 842 843 /* 844 * Limit the number of packets processed in a single interrupt to 845 * provide fairness to others 846 */ 847 mtx_lock(&qp->transport->rx_lock); 848 CTR0(KTR_NTB, "RX: transport_rx"); 849 for (i = 0; i < NTB_RX_MAX_PKTS; i++) { 850 rc = ntb_process_rxc(qp); 851 if (rc != 0) { 852 CTR0(KTR_NTB, "RX: process_rxc failed"); 853 break; 854 } 855 } 856 mtx_unlock(&qp->transport->rx_lock); 857} 858 859static int 860ntb_process_rxc(struct ntb_transport_qp *qp) 861{ 862 struct ntb_payload_header *hdr; 863 struct ntb_queue_entry *entry; 864 void *offset; 865 866 offset = (void *) 867 ((char *)qp->rx_buff + qp->rx_max_frame * qp->rx_index); 868 hdr = (void *) 869 ((char *)offset + qp->rx_max_frame - 870 sizeof(struct ntb_payload_header)); 871 872 CTR1(KTR_NTB, "RX: process_rxc rx_index = %u", qp->rx_index); 873 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); 874 if (entry == NULL) { 875 qp->rx_err_no_buf++; 876 CTR0(KTR_NTB, "RX: No entries in rx_pend_q"); 877 return (ENOMEM); 878 } 879 callout_stop(&qp->rx_full); 880 CTR1(KTR_NTB, "RX: rx entry %p from rx_pend_q", entry); 881 882 if ((hdr->flags & IF_NTB_DESC_DONE_FLAG) == 0) { 883 CTR1(KTR_NTB, 884 "RX: hdr not done. Returning entry %p to rx_pend_q", entry); 885 ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q); 886 qp->rx_ring_empty++; 887 return (EAGAIN); 888 } 889 890 if (hdr->ver != (uint32_t) qp->rx_pkts) { 891 CTR3(KTR_NTB,"RX: ver != rx_pkts (%x != %lx). " 892 "Returning entry %p to rx_pend_q", hdr->ver, qp->rx_pkts, 893 entry); 894 ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q); 895 qp->rx_err_ver++; 896 return (EIO); 897 } 898 899 if ((hdr->flags & IF_NTB_LINK_DOWN_FLAG) != 0) { 900 ntb_qp_link_down(qp); 901 CTR1(KTR_NTB, 902 "RX: link down. adding entry %p back to rx_pend_q", entry); 903 ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q); 904 goto out; 905 } 906 907 if (hdr->len <= entry->len) { 908 entry->len = hdr->len; 909 ntb_rx_copy_task(qp, entry, offset); 910 } else { 911 CTR1(KTR_NTB, 912 "RX: len too long. Returning entry %p to rx_pend_q", entry); 913 ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q); 914 915 qp->rx_err_oflow++; 916 } 917 918 qp->rx_bytes += hdr->len; 919 qp->rx_pkts++; 920 CTR1(KTR_NTB, "RX: received %ld rx_pkts", qp->rx_pkts); 921 922 923out: 924 /* Ensure that the data is globally visible before clearing the flag */ 925 wmb(); 926 hdr->flags = 0; 927 /* TODO: replace with bus_space_write */ 928 qp->rx_info->entry = qp->rx_index; 929 930 qp->rx_index++; 931 qp->rx_index %= qp->rx_max_entry; 932 933 return (0); 934} 935 936static void 937ntb_rx_copy_task(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry, 938 void *offset) 939{ 940 struct ifnet *ifp = entry->cb_data; 941 unsigned int len = entry->len; 942 struct mbuf *m; 943 944 CTR2(KTR_NTB, "RX: copying %d bytes from offset %p", len, offset); 945 m = m_devget(offset, len, 0, ifp, NULL); 946 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID; 947 948 entry->buf = (void *)m; 949 950 CTR2(KTR_NTB, 951 "RX: copied entry %p to mbuf %p. Adding entry to rx_free_q", entry, 952 m); 953 ntb_list_add(&qp->ntb_rx_free_q_lock, entry, &qp->rx_free_q); 954 955 taskqueue_enqueue(taskqueue_swi, &qp->rx_completion_task); 956} 957 958static void 959ntb_rx_completion_task(void *arg, int pending) 960{ 961 struct ntb_transport_qp *qp = arg; 962 struct mbuf *m; 963 struct ntb_queue_entry *entry; 964 965 CTR0(KTR_NTB, "RX: rx_completion_task"); 966 967 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) { 968 m = entry->buf; 969 CTR2(KTR_NTB, "RX: completing entry %p, mbuf %p", entry, m); 970 if (qp->rx_handler && qp->client_ready == NTB_LINK_UP) 971 qp->rx_handler(qp, qp->cb_data, m, entry->len); 972 973 entry->buf = NULL; 974 entry->len = qp->transport->bufsize; 975 976 CTR1(KTR_NTB,"RX: entry %p removed from rx_free_q " 977 "and added to rx_pend_q", entry); 978 ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q); 979 if (qp->rx_err_no_buf > qp->last_rx_no_buf) { 980 qp->last_rx_no_buf = qp->rx_err_no_buf; 981 CTR0(KTR_NTB, "RX: could spawn rx task"); 982 callout_reset(&qp->rx_full, hz / 1000, ntb_rx_pendq_full, 983 qp); 984 } 985 } 986} 987 988/* Link Event handler */ 989static void 990ntb_transport_event_callback(void *data, enum ntb_hw_event event) 991{ 992 struct ntb_netdev *nt = data; 993 994 switch (event) { 995 case NTB_EVENT_HW_LINK_UP: 996 if (bootverbose) 997 device_printf(ntb_get_device(nt->ntb), "HW link up\n"); 998 callout_reset(&nt->link_work, 0, ntb_transport_link_work, nt); 999 break; 1000 case NTB_EVENT_HW_LINK_DOWN: 1001 if (bootverbose) 1002 device_printf(ntb_get_device(nt->ntb), "HW link down\n"); 1003 ntb_transport_link_cleanup(nt); 1004 break; 1005 default: 1006 panic("ntb: Unknown NTB event"); 1007 } 1008} 1009 1010/* Link bring up */ 1011static void 1012ntb_transport_link_work(void *arg) 1013{ 1014 struct ntb_netdev *nt = arg; 1015 struct ntb_softc *ntb = nt->ntb; 1016 struct ntb_transport_qp *qp; 1017 uint32_t val; 1018 int rc, i; 1019 1020 /* send the local info */ 1021 rc = ntb_write_remote_spad(ntb, IF_NTB_VERSION, NTB_TRANSPORT_VERSION); 1022 if (rc != 0) 1023 goto out; 1024 1025 rc = ntb_write_remote_spad(ntb, IF_NTB_MW0_SZ, ntb_get_mw_size(ntb, 0)); 1026 if (rc != 0) 1027 goto out; 1028 1029 rc = ntb_write_remote_spad(ntb, IF_NTB_MW1_SZ, ntb_get_mw_size(ntb, 1)); 1030 if (rc != 0) 1031 goto out; 1032 1033 rc = ntb_write_remote_spad(ntb, IF_NTB_NUM_QPS, nt->max_qps); 1034 if (rc != 0) 1035 goto out; 1036 1037 rc = ntb_read_remote_spad(ntb, IF_NTB_QP_LINKS, &val); 1038 if (rc != 0) 1039 goto out; 1040 1041 rc = ntb_write_remote_spad(ntb, IF_NTB_QP_LINKS, val); 1042 if (rc != 0) 1043 goto out; 1044 1045 /* Query the remote side for its info */ 1046 rc = ntb_read_local_spad(ntb, IF_NTB_VERSION, &val); 1047 if (rc != 0) 1048 goto out; 1049 1050 if (val != NTB_TRANSPORT_VERSION) 1051 goto out; 1052 1053 rc = ntb_read_local_spad(ntb, IF_NTB_NUM_QPS, &val); 1054 if (rc != 0) 1055 goto out; 1056 1057 if (val != nt->max_qps) 1058 goto out; 1059 1060 rc = ntb_read_local_spad(ntb, IF_NTB_MW0_SZ, &val); 1061 if (rc != 0) 1062 goto out; 1063 1064 if (val == 0) 1065 goto out; 1066 1067 rc = ntb_set_mw(nt, 0, val); 1068 if (rc != 0) 1069 return; 1070 1071 rc = ntb_read_local_spad(ntb, IF_NTB_MW1_SZ, &val); 1072 if (rc != 0) 1073 goto out; 1074 1075 if (val == 0) 1076 goto out; 1077 1078 rc = ntb_set_mw(nt, 1, val); 1079 if (rc != 0) 1080 return; 1081 1082 nt->transport_link = NTB_LINK_UP; 1083 if (bootverbose) 1084 device_printf(ntb_get_device(ntb), "transport link up\n"); 1085 1086 for (i = 0; i < nt->max_qps; i++) { 1087 qp = &nt->qps[i]; 1088 1089 ntb_transport_setup_qp_mw(nt, i); 1090 1091 if (qp->client_ready == NTB_LINK_UP) 1092 callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp); 1093 } 1094 1095 return; 1096 1097out: 1098 if (ntb_query_link_status(ntb)) 1099 callout_reset(&nt->link_work, 1100 NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_transport_link_work, nt); 1101} 1102 1103static int 1104ntb_set_mw(struct ntb_netdev *nt, int num_mw, unsigned int size) 1105{ 1106 struct ntb_transport_mw *mw = &nt->mw[num_mw]; 1107 1108 /* Alloc memory for receiving data. Must be 4k aligned */ 1109 mw->size = size; 1110 1111 mw->virt_addr = contigmalloc(mw->size, M_NTB_IF, M_ZERO, 0, 1112 BUS_SPACE_MAXADDR, mw->size, 0); 1113 if (mw->virt_addr == NULL) { 1114 printf("ntb: Unable to allocate MW buffer of size %d\n", 1115 (int)mw->size); 1116 return (ENOMEM); 1117 } 1118 /* TODO: replace with bus_space_* functions */ 1119 mw->dma_addr = vtophys(mw->virt_addr); 1120 1121 /* Notify HW the memory location of the receive buffer */ 1122 ntb_set_mw_addr(nt->ntb, num_mw, mw->dma_addr); 1123 1124 return (0); 1125} 1126 1127static void 1128ntb_transport_setup_qp_mw(struct ntb_netdev *nt, unsigned int qp_num) 1129{ 1130 struct ntb_transport_qp *qp = &nt->qps[qp_num]; 1131 void *offset; 1132 unsigned int rx_size, num_qps_mw; 1133 uint8_t mw_num = QP_TO_MW(qp_num); 1134 unsigned int i; 1135 1136 if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW) 1137 num_qps_mw = nt->max_qps / NTB_NUM_MW + 1; 1138 else 1139 num_qps_mw = nt->max_qps / NTB_NUM_MW; 1140 1141 rx_size = (unsigned int) nt->mw[mw_num].size / num_qps_mw; 1142 qp->remote_rx_info = (void *)((uint8_t *)nt->mw[mw_num].virt_addr + 1143 (qp_num / NTB_NUM_MW * rx_size)); 1144 rx_size -= sizeof(struct ntb_rx_info); 1145 1146 qp->rx_buff = qp->remote_rx_info + sizeof(struct ntb_rx_info); 1147 qp->rx_max_frame = min(transport_mtu + sizeof(struct ntb_payload_header), 1148 rx_size); 1149 qp->rx_max_entry = rx_size / qp->rx_max_frame; 1150 qp->rx_index = 0; 1151 qp->tx_index = 0; 1152 1153 qp->remote_rx_info->entry = qp->rx_max_entry; 1154 1155 /* setup the hdr offsets with 0's */ 1156 for (i = 0; i < qp->rx_max_entry; i++) { 1157 offset = (void *)((uint8_t *)qp->rx_buff + 1158 qp->rx_max_frame * (i + 1) - 1159 sizeof(struct ntb_payload_header)); 1160 memset(offset, 0, sizeof(struct ntb_payload_header)); 1161 } 1162 1163 qp->rx_pkts = 0; 1164 qp->tx_pkts = 0; 1165} 1166 1167static void 1168ntb_qp_link_work(void *arg) 1169{ 1170 struct ntb_transport_qp *qp = arg; 1171 struct ntb_softc *ntb = qp->ntb; 1172 struct ntb_netdev *nt = qp->transport; 1173 int rc, val; 1174 1175 1176 rc = ntb_read_remote_spad(ntb, IF_NTB_QP_LINKS, &val); 1177 if (rc != 0) 1178 return; 1179 1180 rc = ntb_write_remote_spad(ntb, IF_NTB_QP_LINKS, val | 1 << qp->qp_num); 1181 1182 /* query remote spad for qp ready bits */ 1183 rc = ntb_read_local_spad(ntb, IF_NTB_QP_LINKS, &val); 1184 1185 /* See if the remote side is up */ 1186 if ((1 << qp->qp_num & val) != 0) { 1187 qp->qp_link = NTB_LINK_UP; 1188 if (qp->event_handler != NULL) 1189 qp->event_handler(qp->cb_data, NTB_LINK_UP); 1190 if (bootverbose) 1191 device_printf(ntb_get_device(ntb), "qp link up\n"); 1192 } else if (nt->transport_link == NTB_LINK_UP) { 1193 callout_reset(&qp->link_work, 1194 NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_qp_link_work, qp); 1195 } 1196} 1197 1198/* Link down event*/ 1199static void 1200ntb_transport_link_cleanup(struct ntb_netdev *nt) 1201{ 1202 int i; 1203 1204 if (nt->transport_link == NTB_LINK_DOWN) 1205 callout_drain(&nt->link_work); 1206 else 1207 nt->transport_link = NTB_LINK_DOWN; 1208 1209 /* Pass along the info to any clients */ 1210 for (i = 0; i < nt->max_qps; i++) 1211 if (!test_bit(i, &nt->qp_bitmap)) 1212 ntb_qp_link_down(&nt->qps[i]); 1213 1214 /* 1215 * The scratchpad registers keep the values if the remote side 1216 * goes down, blast them now to give them a sane value the next 1217 * time they are accessed 1218 */ 1219 for (i = 0; i < IF_NTB_MAX_SPAD; i++) 1220 ntb_write_local_spad(nt->ntb, i, 0); 1221} 1222 1223 1224static void 1225ntb_qp_link_down(struct ntb_transport_qp *qp) 1226{ 1227 1228 ntb_qp_link_cleanup(qp); 1229} 1230 1231static void 1232ntb_qp_link_cleanup(struct ntb_transport_qp *qp) 1233{ 1234 struct ntb_netdev *nt = qp->transport; 1235 1236 if (qp->qp_link == NTB_LINK_DOWN) { 1237 callout_drain(&qp->link_work); 1238 return; 1239 } 1240 1241 if (qp->event_handler != NULL) 1242 qp->event_handler(qp->cb_data, NTB_LINK_DOWN); 1243 1244 qp->qp_link = NTB_LINK_DOWN; 1245 1246 if (nt->transport_link == NTB_LINK_UP) 1247 callout_reset(&qp->link_work, 1248 NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_qp_link_work, qp); 1249} 1250 1251/* Link commanded down */ 1252/** 1253 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data 1254 * @qp: NTB transport layer queue to be disabled 1255 * 1256 * Notify NTB transport layer of client's desire to no longer receive data on 1257 * transport queue specified. It is the client's responsibility to ensure all 1258 * entries on queue are purged or otherwise handled appropraitely. 1259 */ 1260static void 1261ntb_transport_link_down(struct ntb_transport_qp *qp) 1262{ 1263 int rc, val; 1264 1265 if (qp == NULL) 1266 return; 1267 1268 qp->client_ready = NTB_LINK_DOWN; 1269 1270 rc = ntb_read_remote_spad(qp->ntb, IF_NTB_QP_LINKS, &val); 1271 if (rc != 0) 1272 return; 1273 1274 rc = ntb_write_remote_spad(qp->ntb, IF_NTB_QP_LINKS, 1275 val & ~(1 << qp->qp_num)); 1276 1277 if (qp->qp_link == NTB_LINK_UP) 1278 ntb_send_link_down(qp); 1279 else 1280 callout_drain(&qp->link_work); 1281 1282} 1283 1284static void 1285ntb_send_link_down(struct ntb_transport_qp *qp) 1286{ 1287 struct ntb_queue_entry *entry; 1288 int i, rc; 1289 1290 if (qp->qp_link == NTB_LINK_DOWN) 1291 return; 1292 1293 qp->qp_link = NTB_LINK_DOWN; 1294 1295 for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) { 1296 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); 1297 if (entry != NULL) 1298 break; 1299 pause("NTB Wait for link down", hz / 10); 1300 } 1301 1302 if (entry == NULL) 1303 return; 1304 1305 entry->cb_data = NULL; 1306 entry->buf = NULL; 1307 entry->len = 0; 1308 entry->flags = IF_NTB_LINK_DOWN_FLAG; 1309 1310 mtx_lock(&qp->transport->tx_lock); 1311 rc = ntb_process_tx(qp, entry); 1312 if (rc != 0) 1313 printf("ntb: Failed to send link down\n"); 1314 mtx_unlock(&qp->transport->tx_lock); 1315} 1316 1317 1318/* List Management */ 1319 1320static void 1321ntb_list_add(struct mtx *lock, struct ntb_queue_entry *entry, 1322 struct ntb_queue_list *list) 1323{ 1324 1325 mtx_lock_spin(lock); 1326 STAILQ_INSERT_TAIL(list, entry, entry); 1327 mtx_unlock_spin(lock); 1328} 1329 1330static struct ntb_queue_entry * 1331ntb_list_rm(struct mtx *lock, struct ntb_queue_list *list) 1332{ 1333 struct ntb_queue_entry *entry; 1334 1335 mtx_lock_spin(lock); 1336 if (STAILQ_EMPTY(list)) { 1337 entry = NULL; 1338 goto out; 1339 } 1340 entry = STAILQ_FIRST(list); 1341 STAILQ_REMOVE_HEAD(list, entry); 1342out: 1343 mtx_unlock_spin(lock); 1344 1345 return (entry); 1346} 1347 1348/* Helper functions */ 1349/* TODO: This too should really be part of the kernel */ 1350#define EUI48_MULTICAST 1 << 0 1351#define EUI48_LOCALLY_ADMINISTERED 1 << 1 1352static void 1353create_random_local_eui48(u_char *eaddr) 1354{ 1355 static uint8_t counter = 0; 1356 uint32_t seed = ticks; 1357 1358 eaddr[0] = EUI48_LOCALLY_ADMINISTERED; 1359 memcpy(&eaddr[1], &seed, sizeof(uint32_t)); 1360 eaddr[5] = counter++; 1361} 1362 1363/** 1364 * ntb_transport_max_size - Query the max payload size of a qp 1365 * @qp: NTB transport layer queue to be queried 1366 * 1367 * Query the maximum payload size permissible on the given qp 1368 * 1369 * RETURNS: the max payload size of a qp 1370 */ 1371static unsigned int 1372ntb_transport_max_size(struct ntb_transport_qp *qp) 1373{ 1374 1375 if (qp == NULL) 1376 return (0); 1377 1378 return (qp->tx_max_frame - sizeof(struct ntb_payload_header)); 1379} 1380