ntb_transport.c revision 291084
1250079Scarl/*- 2250079Scarl * Copyright (C) 2013 Intel Corporation 3289545Scem * Copyright (C) 2015 EMC Corporation 4250079Scarl * All rights reserved. 5250079Scarl * 6250079Scarl * Redistribution and use in source and binary forms, with or without 7250079Scarl * modification, are permitted provided that the following conditions 8250079Scarl * are met: 9250079Scarl * 1. Redistributions of source code must retain the above copyright 10250079Scarl * notice, this list of conditions and the following disclaimer. 11250079Scarl * 2. Redistributions in binary form must reproduce the above copyright 12250079Scarl * notice, this list of conditions and the following disclaimer in the 13250079Scarl * documentation and/or other materials provided with the distribution. 14250079Scarl * 15250079Scarl * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16250079Scarl * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17250079Scarl * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18250079Scarl * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19250079Scarl * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20250079Scarl * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21250079Scarl * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22250079Scarl * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23250079Scarl * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24250079Scarl * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25250079Scarl * SUCH DAMAGE. 26250079Scarl */ 27250079Scarl 28250079Scarl#include <sys/cdefs.h> 29250079Scarl__FBSDID("$FreeBSD: head/sys/dev/ntb/if_ntb/if_ntb.c 291084 2015-11-19 19:53:09Z cem $"); 30250079Scarl 31250079Scarl#include <sys/param.h> 32250079Scarl#include <sys/kernel.h> 33250079Scarl#include <sys/systm.h> 34289544Scem#include <sys/bitset.h> 35250079Scarl#include <sys/bus.h> 36250079Scarl#include <sys/ktr.h> 37289281Scem#include <sys/limits.h> 38250079Scarl#include <sys/lock.h> 39250079Scarl#include <sys/malloc.h> 40250079Scarl#include <sys/module.h> 41250079Scarl#include <sys/mutex.h> 42250079Scarl#include <sys/queue.h> 43250079Scarl#include <sys/socket.h> 44250079Scarl#include <sys/sockio.h> 45289207Scem#include <sys/sysctl.h> 46250079Scarl#include <sys/taskqueue.h> 47289544Scem 48250079Scarl#include <net/if.h> 49250079Scarl#include <net/if_media.h> 50250079Scarl#include <net/if_types.h> 51250079Scarl#include <net/if_var.h> 52250079Scarl#include <net/bpf.h> 53250079Scarl#include <net/ethernet.h> 54289544Scem 55250079Scarl#include <vm/vm.h> 56250079Scarl#include <vm/pmap.h> 57289544Scem 58250079Scarl#include <machine/bus.h> 59250079Scarl#include <machine/cpufunc.h> 60250079Scarl#include <machine/pmap.h> 61250079Scarl 62250079Scarl#include "../ntb_hw/ntb_hw.h" 63250079Scarl 64250079Scarl/* 65250079Scarl * The Non-Transparent Bridge (NTB) is a device on some Intel processors that 66250079Scarl * allows you to connect two systems using a PCI-e link. 67250079Scarl * 68250079Scarl * This module contains a protocol for sending and receiving messages, and 69250079Scarl * exposes that protocol through a simulated ethernet device called ntb. 70250079Scarl * 71250079Scarl * NOTE: Much of the code in this module is shared with Linux. Any patches may 72250079Scarl * be picked up and redistributed in Linux with a dual GPL/BSD license. 73250079Scarl */ 74250079Scarl 75289544Scem#define QP_SETSIZE 64 76289544ScemBITSET_DEFINE(_qpset, QP_SETSIZE); 77289544Scem#define test_bit(pos, addr) BIT_ISSET(QP_SETSIZE, (pos), (addr)) 78289544Scem#define set_bit(pos, addr) BIT_SET(QP_SETSIZE, (pos), (addr)) 79289544Scem#define clear_bit(pos, addr) BIT_CLR(QP_SETSIZE, (pos), (addr)) 80289544Scem#define ffs_bit(addr) BIT_FFS(QP_SETSIZE, (addr)) 81250079Scarl 82250079Scarl#define KTR_NTB KTR_SPARE3 83250079Scarl 84289546Scem#define NTB_TRANSPORT_VERSION 4 85250079Scarl#define NTB_RX_MAX_PKTS 64 86250079Scarl#define NTB_RXQ_SIZE 300 87250079Scarl 88289546Scemenum ntb_link_event { 89289546Scem NTB_LINK_DOWN = 0, 90289546Scem NTB_LINK_UP, 91289546Scem}; 92289546Scem 93290684Scemstatic SYSCTL_NODE(_hw, OID_AUTO, if_ntb, CTLFLAG_RW, 0, "if_ntb"); 94289208Scem 95290684Scemstatic unsigned g_if_ntb_debug_level; 96290684ScemSYSCTL_UINT(_hw_if_ntb, OID_AUTO, debug_level, CTLFLAG_RWTUN, 97290684Scem &g_if_ntb_debug_level, 0, "if_ntb log level -- higher is more verbose"); 98290684Scem#define ntb_printf(lvl, ...) do { \ 99290684Scem if ((lvl) <= g_if_ntb_debug_level) { \ 100290684Scem if_printf(nt->ifp, __VA_ARGS__); \ 101290684Scem } \ 102290684Scem} while (0) 103290684Scem 104290684Scemstatic unsigned transport_mtu = 0x10000 + ETHER_HDR_LEN + ETHER_CRC_LEN; 105290684Scem 106289546Scemstatic uint64_t max_mw_size; 107290684ScemSYSCTL_UQUAD(_hw_if_ntb, OID_AUTO, max_mw_size, CTLFLAG_RDTUN, &max_mw_size, 0, 108289546Scem "If enabled (non-zero), limit the size of large memory windows. " 109289546Scem "Both sides of the NTB MUST set the same value here."); 110289546Scem 111290684Scemstatic unsigned max_num_clients; 112290684ScemSYSCTL_UINT(_hw_if_ntb, OID_AUTO, max_num_clients, CTLFLAG_RDTUN, 113289396Scem &max_num_clients, 0, "Maximum number of NTB transport clients. " 114289396Scem "0 (default) - use all available NTB memory windows; " 115289396Scem "positive integer N - Limit to N memory windows."); 116250079Scarl 117291084Scemstatic unsigned enable_xeon_watchdog; 118291084ScemSYSCTL_UINT(_hw_if_ntb, OID_AUTO, enable_xeon_watchdog, CTLFLAG_RDTUN, 119291084Scem &enable_xeon_watchdog, 0, "If non-zero, write a register every second to " 120291084Scem "keep a watchdog from tearing down the NTB link"); 121291084Scem 122250079ScarlSTAILQ_HEAD(ntb_queue_list, ntb_queue_entry); 123250079Scarl 124291028Scemtypedef uint32_t ntb_q_idx_t; 125289653Scem 126250079Scarlstruct ntb_queue_entry { 127250079Scarl /* ntb_queue list reference */ 128250079Scarl STAILQ_ENTRY(ntb_queue_entry) entry; 129250079Scarl 130289546Scem /* info on data to be transferred */ 131250079Scarl void *cb_data; 132250079Scarl void *buf; 133291028Scem uint32_t len; 134291028Scem uint32_t flags; 135289546Scem 136289546Scem struct ntb_transport_qp *qp; 137289546Scem struct ntb_payload_header *x_hdr; 138289653Scem ntb_q_idx_t index; 139250079Scarl}; 140250079Scarl 141250079Scarlstruct ntb_rx_info { 142289653Scem ntb_q_idx_t entry; 143250079Scarl}; 144250079Scarl 145250079Scarlstruct ntb_transport_qp { 146289545Scem struct ntb_transport_ctx *transport; 147250079Scarl struct ntb_softc *ntb; 148250079Scarl 149250079Scarl void *cb_data; 150250079Scarl 151250079Scarl bool client_ready; 152290686Scem volatile bool link_is_up; 153255281Scarl uint8_t qp_num; /* Only 64 QPs are allowed. 0-63 */ 154250079Scarl 155250079Scarl struct ntb_rx_info *rx_info; 156250079Scarl struct ntb_rx_info *remote_rx_info; 157250079Scarl 158289341Scem void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, 159250079Scarl void *data, int len); 160250079Scarl struct ntb_queue_list tx_free_q; 161250079Scarl struct mtx ntb_tx_free_q_lock; 162290679Scem caddr_t tx_mw; 163289546Scem bus_addr_t tx_mw_phys; 164289653Scem ntb_q_idx_t tx_index; 165289653Scem ntb_q_idx_t tx_max_entry; 166250079Scarl uint64_t tx_max_frame; 167250079Scarl 168289341Scem void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, 169250079Scarl void *data, int len); 170289651Scem struct ntb_queue_list rx_post_q; 171250079Scarl struct ntb_queue_list rx_pend_q; 172289651Scem /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */ 173289651Scem struct mtx ntb_rx_q_lock; 174250079Scarl struct task rx_completion_task; 175289546Scem struct task rxc_db_work; 176290679Scem caddr_t rx_buff; 177289653Scem ntb_q_idx_t rx_index; 178289653Scem ntb_q_idx_t rx_max_entry; 179250079Scarl uint64_t rx_max_frame; 180250079Scarl 181289341Scem void (*event_handler)(void *data, enum ntb_link_event status); 182250079Scarl struct callout link_work; 183250079Scarl struct callout queue_full; 184250079Scarl struct callout rx_full; 185250079Scarl 186250079Scarl uint64_t last_rx_no_buf; 187250079Scarl 188250079Scarl /* Stats */ 189250079Scarl uint64_t rx_bytes; 190250079Scarl uint64_t rx_pkts; 191250079Scarl uint64_t rx_ring_empty; 192250079Scarl uint64_t rx_err_no_buf; 193250079Scarl uint64_t rx_err_oflow; 194250079Scarl uint64_t rx_err_ver; 195250079Scarl uint64_t tx_bytes; 196250079Scarl uint64_t tx_pkts; 197250079Scarl uint64_t tx_ring_full; 198289653Scem uint64_t tx_err_no_buf; 199250079Scarl}; 200250079Scarl 201250079Scarlstruct ntb_queue_handlers { 202289341Scem void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, 203250079Scarl void *data, int len); 204289341Scem void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, 205250079Scarl void *data, int len); 206289341Scem void (*event_handler)(void *data, enum ntb_link_event status); 207250079Scarl}; 208250079Scarl 209250079Scarlstruct ntb_transport_mw { 210289546Scem vm_paddr_t phys_addr; 211289546Scem size_t phys_size; 212289546Scem size_t xlat_align; 213289546Scem size_t xlat_align_size; 214291033Scem bus_addr_t addr_limit; 215289546Scem /* Tx buff is off vbase / phys_addr */ 216290679Scem caddr_t vbase; 217289545Scem size_t xlat_size; 218289546Scem size_t buff_size; 219289546Scem /* Rx buff is off virt_addr / dma_addr */ 220290679Scem caddr_t virt_addr; 221289546Scem bus_addr_t dma_addr; 222250079Scarl}; 223250079Scarl 224289545Scemstruct ntb_transport_ctx { 225250079Scarl struct ntb_softc *ntb; 226250079Scarl struct ifnet *ifp; 227289545Scem struct ntb_transport_mw mw_vec[NTB_MAX_NUM_MW]; 228289545Scem struct ntb_transport_qp *qp_vec; 229289544Scem struct _qpset qp_bitmap; 230289546Scem struct _qpset qp_bitmap_free; 231289545Scem unsigned mw_count; 232289545Scem unsigned qp_count; 233290686Scem volatile bool link_is_up; 234250079Scarl struct callout link_work; 235291084Scem struct callout link_watchdog; 236290683Scem struct task link_cleanup; 237250079Scarl uint64_t bufsize; 238250079Scarl u_char eaddr[ETHER_ADDR_LEN]; 239250079Scarl struct mtx tx_lock; 240250079Scarl struct mtx rx_lock; 241289546Scem 242289546Scem /* The hardcoded single queuepair in ntb_setup_interface() */ 243289546Scem struct ntb_transport_qp *qp; 244250079Scarl}; 245250079Scarl 246289545Scemstatic struct ntb_transport_ctx net_softc; 247250079Scarl 248250079Scarlenum { 249250079Scarl IF_NTB_DESC_DONE_FLAG = 1 << 0, 250250079Scarl IF_NTB_LINK_DOWN_FLAG = 1 << 1, 251250079Scarl}; 252250079Scarl 253250079Scarlstruct ntb_payload_header { 254291028Scem ntb_q_idx_t ver; 255291028Scem uint32_t len; 256291028Scem uint32_t flags; 257250079Scarl}; 258250079Scarl 259250079Scarlenum { 260289153Scem /* 261289153Scem * The order of this enum is part of the if_ntb remote protocol. Do 262289153Scem * not reorder without bumping protocol version (and it's probably best 263289153Scem * to keep the protocol in lock-step with the Linux NTB driver. 264289153Scem */ 265250079Scarl IF_NTB_VERSION = 0, 266289153Scem IF_NTB_QP_LINKS, 267250079Scarl IF_NTB_NUM_QPS, 268289153Scem IF_NTB_NUM_MWS, 269289153Scem /* 270289153Scem * N.B.: transport_link_work assumes MW1 enums = MW0 + 2. 271289153Scem */ 272289153Scem IF_NTB_MW0_SZ_HIGH, 273289153Scem IF_NTB_MW0_SZ_LOW, 274289153Scem IF_NTB_MW1_SZ_HIGH, 275289153Scem IF_NTB_MW1_SZ_LOW, 276250079Scarl IF_NTB_MAX_SPAD, 277291084Scem 278291084Scem /* 279291084Scem * Some NTB-using hardware have a watchdog to work around NTB hangs; if 280291084Scem * a register or doorbell isn't written every few seconds, the link is 281291084Scem * torn down. Write an otherwise unused register every few seconds to 282291084Scem * work around this watchdog. 283291084Scem */ 284291084Scem IF_NTB_WATCHDOG_SPAD = 15 285250079Scarl}; 286291084ScemCTASSERT(IF_NTB_WATCHDOG_SPAD < XEON_SPAD_COUNT && 287291084Scem IF_NTB_WATCHDOG_SPAD < ATOM_SPAD_COUNT); 288250079Scarl 289289545Scem#define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) 290250079Scarl#define NTB_QP_DEF_NUM_ENTRIES 100 291250079Scarl#define NTB_LINK_DOWN_TIMEOUT 10 292250079Scarl 293250079Scarlstatic int ntb_handle_module_events(struct module *m, int what, void *arg); 294250079Scarlstatic int ntb_setup_interface(void); 295250079Scarlstatic int ntb_teardown_interface(void); 296250079Scarlstatic void ntb_net_init(void *arg); 297250079Scarlstatic int ntb_ioctl(struct ifnet *ifp, u_long command, caddr_t data); 298250079Scarlstatic void ntb_start(struct ifnet *ifp); 299250079Scarlstatic void ntb_net_tx_handler(struct ntb_transport_qp *qp, void *qp_data, 300250079Scarl void *data, int len); 301250079Scarlstatic void ntb_net_rx_handler(struct ntb_transport_qp *qp, void *qp_data, 302250079Scarl void *data, int len); 303289340Scemstatic void ntb_net_event_handler(void *data, enum ntb_link_event status); 304290682Scemstatic int ntb_transport_probe(struct ntb_softc *ntb); 305289546Scemstatic void ntb_transport_free(struct ntb_transport_ctx *); 306289545Scemstatic void ntb_transport_init_queue(struct ntb_transport_ctx *nt, 307250079Scarl unsigned int qp_num); 308250079Scarlstatic void ntb_transport_free_queue(struct ntb_transport_qp *qp); 309289546Scemstatic struct ntb_transport_qp *ntb_transport_create_queue(void *data, 310250079Scarl struct ntb_softc *pdev, const struct ntb_queue_handlers *handlers); 311250079Scarlstatic void ntb_transport_link_up(struct ntb_transport_qp *qp); 312250079Scarlstatic int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, 313250079Scarl void *data, unsigned int len); 314250079Scarlstatic int ntb_process_tx(struct ntb_transport_qp *qp, 315250079Scarl struct ntb_queue_entry *entry); 316289546Scemstatic void ntb_memcpy_tx(struct ntb_transport_qp *qp, 317250079Scarl struct ntb_queue_entry *entry, void *offset); 318250079Scarlstatic void ntb_qp_full(void *arg); 319289546Scemstatic void ntb_transport_rxc_db(void *arg, int pending); 320250079Scarlstatic int ntb_process_rxc(struct ntb_transport_qp *qp); 321289651Scemstatic void ntb_memcpy_rx(struct ntb_transport_qp *qp, 322250079Scarl struct ntb_queue_entry *entry, void *offset); 323289651Scemstatic inline void ntb_rx_copy_callback(struct ntb_transport_qp *qp, 324289651Scem void *data); 325289546Scemstatic void ntb_complete_rxc(void *arg, int pending); 326289598Scemstatic void ntb_transport_doorbell_callback(void *data, uint32_t vector); 327289546Scemstatic void ntb_transport_event_callback(void *data); 328250079Scarlstatic void ntb_transport_link_work(void *arg); 329289652Scemstatic int ntb_set_mw(struct ntb_transport_ctx *, int num_mw, size_t size); 330289545Scemstatic void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw); 331289546Scemstatic int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, 332250079Scarl unsigned int qp_num); 333250079Scarlstatic void ntb_qp_link_work(void *arg); 334289545Scemstatic void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt); 335290683Scemstatic void ntb_transport_link_cleanup_work(void *, int); 336250079Scarlstatic void ntb_qp_link_down(struct ntb_transport_qp *qp); 337289613Scemstatic void ntb_qp_link_down_reset(struct ntb_transport_qp *qp); 338250079Scarlstatic void ntb_qp_link_cleanup(struct ntb_transport_qp *qp); 339250079Scarlstatic void ntb_transport_link_down(struct ntb_transport_qp *qp); 340250079Scarlstatic void ntb_send_link_down(struct ntb_transport_qp *qp); 341250079Scarlstatic void ntb_list_add(struct mtx *lock, struct ntb_queue_entry *entry, 342250079Scarl struct ntb_queue_list *list); 343250079Scarlstatic struct ntb_queue_entry *ntb_list_rm(struct mtx *lock, 344250079Scarl struct ntb_queue_list *list); 345289651Scemstatic struct ntb_queue_entry *ntb_list_mv(struct mtx *lock, 346289651Scem struct ntb_queue_list *from, struct ntb_queue_list *to); 347250079Scarlstatic void create_random_local_eui48(u_char *eaddr); 348250079Scarlstatic unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp); 349291084Scemstatic void xeon_link_watchdog_hb(void *); 350250079Scarl 351289546Scemstatic const struct ntb_ctx_ops ntb_transport_ops = { 352289546Scem .link_event = ntb_transport_event_callback, 353289546Scem .db_event = ntb_transport_doorbell_callback, 354289546Scem}; 355289546Scem 356250079ScarlMALLOC_DEFINE(M_NTB_IF, "if_ntb", "ntb network driver"); 357250079Scarl 358291028Scemstatic inline void 359291028Scemiowrite32(uint32_t val, void *addr) 360291028Scem{ 361291028Scem 362291028Scem bus_space_write_4(X86_BUS_SPACE_MEM, 0/* HACK */, (uintptr_t)addr, 363291028Scem val); 364291028Scem} 365291028Scem 366250079Scarl/* Module setup and teardown */ 367250079Scarlstatic int 368250079Scarlntb_handle_module_events(struct module *m, int what, void *arg) 369250079Scarl{ 370250079Scarl int err = 0; 371250079Scarl 372250079Scarl switch (what) { 373250079Scarl case MOD_LOAD: 374250079Scarl err = ntb_setup_interface(); 375250079Scarl break; 376250079Scarl case MOD_UNLOAD: 377250079Scarl err = ntb_teardown_interface(); 378250079Scarl break; 379250079Scarl default: 380250079Scarl err = EOPNOTSUPP; 381250079Scarl break; 382250079Scarl } 383250079Scarl return (err); 384250079Scarl} 385250079Scarl 386255271Scarlstatic moduledata_t if_ntb_mod = { 387255271Scarl "if_ntb", 388250079Scarl ntb_handle_module_events, 389250079Scarl NULL 390250079Scarl}; 391250079Scarl 392255271ScarlDECLARE_MODULE(if_ntb, if_ntb_mod, SI_SUB_KLD, SI_ORDER_ANY); 393255271ScarlMODULE_DEPEND(if_ntb, ntb_hw, 1, 1, 1); 394250079Scarl 395250079Scarlstatic int 396289209Scemntb_setup_interface(void) 397250079Scarl{ 398250079Scarl struct ifnet *ifp; 399250079Scarl struct ntb_queue_handlers handlers = { ntb_net_rx_handler, 400250079Scarl ntb_net_tx_handler, ntb_net_event_handler }; 401289546Scem int rc; 402250079Scarl 403250079Scarl net_softc.ntb = devclass_get_softc(devclass_find("ntb_hw"), 0); 404250079Scarl if (net_softc.ntb == NULL) { 405255281Scarl printf("ntb: Cannot find devclass\n"); 406250079Scarl return (ENXIO); 407250079Scarl } 408250079Scarl 409250079Scarl ifp = net_softc.ifp = if_alloc(IFT_ETHER); 410250079Scarl if (ifp == NULL) { 411289546Scem ntb_transport_free(&net_softc); 412289546Scem printf("ntb: Cannot allocate ifnet structure\n"); 413250079Scarl return (ENOMEM); 414250079Scarl } 415290684Scem if_initname(ifp, "ntb", 0); 416250079Scarl 417290684Scem rc = ntb_transport_probe(net_softc.ntb); 418290684Scem if (rc != 0) { 419290684Scem printf("ntb: Cannot init transport: %d\n", rc); 420290684Scem if_free(net_softc.ifp); 421290684Scem return (rc); 422290684Scem } 423290684Scem 424250079Scarl net_softc.qp = ntb_transport_create_queue(ifp, net_softc.ntb, 425250079Scarl &handlers); 426250079Scarl ifp->if_init = ntb_net_init; 427250079Scarl ifp->if_softc = &net_softc; 428250079Scarl ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX; 429250079Scarl ifp->if_ioctl = ntb_ioctl; 430250079Scarl ifp->if_start = ntb_start; 431250079Scarl IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); 432250079Scarl ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; 433250079Scarl IFQ_SET_READY(&ifp->if_snd); 434250079Scarl create_random_local_eui48(net_softc.eaddr); 435250079Scarl ether_ifattach(ifp, net_softc.eaddr); 436250079Scarl ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_JUMBO_MTU; 437250079Scarl ifp->if_capenable = ifp->if_capabilities; 438250079Scarl 439250079Scarl ntb_transport_link_up(net_softc.qp); 440250079Scarl net_softc.bufsize = ntb_transport_max_size(net_softc.qp) + 441250079Scarl sizeof(struct ether_header); 442250079Scarl return (0); 443250079Scarl} 444250079Scarl 445250079Scarlstatic int 446289209Scemntb_teardown_interface(void) 447250079Scarl{ 448250079Scarl 449290684Scem if (net_softc.qp != NULL) { 450255280Scarl ntb_transport_link_down(net_softc.qp); 451250079Scarl 452290684Scem ntb_transport_free_queue(net_softc.qp); 453290684Scem ntb_transport_free(&net_softc); 454290684Scem } 455290684Scem 456255280Scarl if (net_softc.ifp != NULL) { 457255280Scarl ether_ifdetach(net_softc.ifp); 458255280Scarl if_free(net_softc.ifp); 459290684Scem net_softc.ifp = NULL; 460255280Scarl } 461250079Scarl 462250079Scarl return (0); 463250079Scarl} 464250079Scarl 465250079Scarl/* Network device interface */ 466250079Scarl 467250079Scarlstatic void 468250079Scarlntb_net_init(void *arg) 469250079Scarl{ 470289545Scem struct ntb_transport_ctx *ntb_softc = arg; 471250079Scarl struct ifnet *ifp = ntb_softc->ifp; 472250079Scarl 473250079Scarl ifp->if_drv_flags |= IFF_DRV_RUNNING; 474250079Scarl ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 475250079Scarl ifp->if_flags |= IFF_UP; 476250079Scarl if_link_state_change(ifp, LINK_STATE_UP); 477250079Scarl} 478250079Scarl 479250079Scarlstatic int 480250079Scarlntb_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 481250079Scarl{ 482289545Scem struct ntb_transport_ctx *nt = ifp->if_softc; 483250079Scarl struct ifreq *ifr = (struct ifreq *)data; 484250079Scarl int error = 0; 485250079Scarl 486250079Scarl switch (command) { 487250079Scarl case SIOCSIFMTU: 488250079Scarl { 489250079Scarl if (ifr->ifr_mtu > ntb_transport_max_size(nt->qp) - 490250079Scarl ETHER_HDR_LEN - ETHER_CRC_LEN) { 491250079Scarl error = EINVAL; 492250079Scarl break; 493250079Scarl } 494250079Scarl 495250079Scarl ifp->if_mtu = ifr->ifr_mtu; 496250079Scarl break; 497250079Scarl } 498250079Scarl default: 499250079Scarl error = ether_ioctl(ifp, command, data); 500250079Scarl break; 501250079Scarl } 502250079Scarl 503250079Scarl return (error); 504250079Scarl} 505250079Scarl 506250079Scarl 507250079Scarlstatic void 508250079Scarlntb_start(struct ifnet *ifp) 509250079Scarl{ 510250079Scarl struct mbuf *m_head; 511289545Scem struct ntb_transport_ctx *nt = ifp->if_softc; 512250079Scarl int rc; 513250079Scarl 514250079Scarl mtx_lock(&nt->tx_lock); 515250079Scarl ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 516250079Scarl CTR0(KTR_NTB, "TX: ntb_start"); 517250079Scarl while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 518250079Scarl IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 519250079Scarl CTR1(KTR_NTB, "TX: start mbuf %p", m_head); 520250079Scarl rc = ntb_transport_tx_enqueue(nt->qp, m_head, m_head, 521250079Scarl m_length(m_head, NULL)); 522250079Scarl if (rc != 0) { 523250079Scarl CTR1(KTR_NTB, 524255281Scarl "TX: could not tx mbuf %p. Returning to snd q", 525250079Scarl m_head); 526250079Scarl if (rc == EAGAIN) { 527250079Scarl ifp->if_drv_flags |= IFF_DRV_OACTIVE; 528250079Scarl IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 529250079Scarl callout_reset(&nt->qp->queue_full, hz / 1000, 530250079Scarl ntb_qp_full, ifp); 531250079Scarl } 532250079Scarl break; 533250079Scarl } 534250079Scarl 535250079Scarl } 536250079Scarl mtx_unlock(&nt->tx_lock); 537250079Scarl} 538250079Scarl 539250079Scarl/* Network Device Callbacks */ 540250079Scarlstatic void 541250079Scarlntb_net_tx_handler(struct ntb_transport_qp *qp, void *qp_data, void *data, 542250079Scarl int len) 543250079Scarl{ 544250079Scarl 545250079Scarl m_freem(data); 546250079Scarl CTR1(KTR_NTB, "TX: tx_handler freeing mbuf %p", data); 547250079Scarl} 548250079Scarl 549250079Scarlstatic void 550250079Scarlntb_net_rx_handler(struct ntb_transport_qp *qp, void *qp_data, void *data, 551250079Scarl int len) 552250079Scarl{ 553250079Scarl struct mbuf *m = data; 554250079Scarl struct ifnet *ifp = qp_data; 555250079Scarl 556250079Scarl CTR0(KTR_NTB, "RX: rx handler"); 557250079Scarl (*ifp->if_input)(ifp, m); 558250079Scarl} 559250079Scarl 560250079Scarlstatic void 561289340Scemntb_net_event_handler(void *data, enum ntb_link_event status) 562250079Scarl{ 563289340Scem struct ifnet *ifp; 564250079Scarl 565289340Scem ifp = data; 566289340Scem (void)ifp; 567289340Scem 568289340Scem /* XXX The Linux driver munges with the carrier status here. */ 569289340Scem 570289340Scem switch (status) { 571289340Scem case NTB_LINK_DOWN: 572289340Scem break; 573289340Scem case NTB_LINK_UP: 574289340Scem break; 575289340Scem default: 576289340Scem panic("Bogus ntb_link_event %u\n", status); 577289340Scem } 578250079Scarl} 579250079Scarl 580250079Scarl/* Transport Init and teardown */ 581250079Scarl 582291084Scemstatic void 583291084Scemxeon_link_watchdog_hb(void *arg) 584291084Scem{ 585291084Scem struct ntb_transport_ctx *nt; 586291084Scem 587291084Scem nt = arg; 588291084Scem ntb_spad_write(nt->ntb, IF_NTB_WATCHDOG_SPAD, 0); 589291084Scem callout_reset(&nt->link_watchdog, 1 * hz, xeon_link_watchdog_hb, nt); 590291084Scem} 591291084Scem 592250079Scarlstatic int 593290682Scemntb_transport_probe(struct ntb_softc *ntb) 594250079Scarl{ 595289545Scem struct ntb_transport_ctx *nt = &net_softc; 596289546Scem struct ntb_transport_mw *mw; 597289546Scem uint64_t qp_bitmap; 598289544Scem int rc; 599289546Scem unsigned i; 600250079Scarl 601289545Scem nt->mw_count = ntb_mw_count(ntb); 602289546Scem for (i = 0; i < nt->mw_count; i++) { 603289546Scem mw = &nt->mw_vec[i]; 604289396Scem 605289546Scem rc = ntb_mw_get_range(ntb, i, &mw->phys_addr, &mw->vbase, 606291033Scem &mw->phys_size, &mw->xlat_align, &mw->xlat_align_size, 607291033Scem &mw->addr_limit); 608289546Scem if (rc != 0) 609289546Scem goto err; 610289546Scem 611289546Scem mw->buff_size = 0; 612289546Scem mw->xlat_size = 0; 613290679Scem mw->virt_addr = NULL; 614289546Scem mw->dma_addr = 0; 615289546Scem } 616289546Scem 617289546Scem qp_bitmap = ntb_db_valid_mask(ntb); 618289546Scem nt->qp_count = flsll(qp_bitmap); 619289546Scem KASSERT(nt->qp_count != 0, ("bogus db bitmap")); 620289546Scem nt->qp_count -= 1; 621289546Scem 622289546Scem if (max_num_clients != 0 && max_num_clients < nt->qp_count) 623289546Scem nt->qp_count = max_num_clients; 624289546Scem else if (nt->mw_count < nt->qp_count) 625289546Scem nt->qp_count = nt->mw_count; 626289546Scem KASSERT(nt->qp_count <= QP_SETSIZE, ("invalid qp_count")); 627289546Scem 628250079Scarl mtx_init(&nt->tx_lock, "ntb transport tx", NULL, MTX_DEF); 629250079Scarl mtx_init(&nt->rx_lock, "ntb transport rx", NULL, MTX_DEF); 630250079Scarl 631289545Scem nt->qp_vec = malloc(nt->qp_count * sizeof(*nt->qp_vec), M_NTB_IF, 632289545Scem M_WAITOK | M_ZERO); 633250079Scarl 634289545Scem for (i = 0; i < nt->qp_count; i++) { 635289544Scem set_bit(i, &nt->qp_bitmap); 636289546Scem set_bit(i, &nt->qp_bitmap_free); 637250079Scarl ntb_transport_init_queue(nt, i); 638289544Scem } 639250079Scarl 640250079Scarl callout_init(&nt->link_work, 0); 641291084Scem callout_init(&nt->link_watchdog, 0); 642290683Scem TASK_INIT(&nt->link_cleanup, 0, ntb_transport_link_cleanup_work, nt); 643250079Scarl 644289546Scem rc = ntb_set_ctx(ntb, nt, &ntb_transport_ops); 645250079Scarl if (rc != 0) 646250079Scarl goto err; 647250079Scarl 648289546Scem nt->link_is_up = false; 649289546Scem ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); 650289546Scem ntb_link_event(ntb); 651290682Scem 652290682Scem callout_reset(&nt->link_work, 0, ntb_transport_link_work, nt); 653291084Scem if (enable_xeon_watchdog != 0) 654291084Scem callout_reset(&nt->link_watchdog, 0, xeon_link_watchdog_hb, nt); 655250079Scarl return (0); 656250079Scarl 657250079Scarlerr: 658289545Scem free(nt->qp_vec, M_NTB_IF); 659289546Scem nt->qp_vec = NULL; 660250079Scarl return (rc); 661250079Scarl} 662250079Scarl 663250079Scarlstatic void 664289546Scemntb_transport_free(struct ntb_transport_ctx *nt) 665250079Scarl{ 666250079Scarl struct ntb_softc *ntb = nt->ntb; 667289546Scem struct _qpset qp_bitmap_alloc; 668289544Scem uint8_t i; 669250079Scarl 670289273Scem ntb_transport_link_cleanup(nt); 671290683Scem taskqueue_drain(taskqueue_swi, &nt->link_cleanup); 672250079Scarl callout_drain(&nt->link_work); 673291084Scem callout_drain(&nt->link_watchdog); 674250079Scarl 675289546Scem BIT_COPY(QP_SETSIZE, &nt->qp_bitmap, &qp_bitmap_alloc); 676289546Scem BIT_NAND(QP_SETSIZE, &qp_bitmap_alloc, &nt->qp_bitmap_free); 677289546Scem 678289546Scem /* Verify that all the QPs are freed */ 679289545Scem for (i = 0; i < nt->qp_count; i++) 680289546Scem if (test_bit(i, &qp_bitmap_alloc)) 681289545Scem ntb_transport_free_queue(&nt->qp_vec[i]); 682250079Scarl 683289546Scem ntb_link_disable(ntb); 684289546Scem ntb_clear_ctx(ntb); 685250079Scarl 686289546Scem for (i = 0; i < nt->mw_count; i++) 687289153Scem ntb_free_mw(nt, i); 688250079Scarl 689289545Scem free(nt->qp_vec, M_NTB_IF); 690250079Scarl} 691250079Scarl 692250079Scarlstatic void 693289545Scemntb_transport_init_queue(struct ntb_transport_ctx *nt, unsigned int qp_num) 694250079Scarl{ 695289546Scem struct ntb_transport_mw *mw; 696250079Scarl struct ntb_transport_qp *qp; 697289546Scem vm_paddr_t mw_base; 698289546Scem uint64_t mw_size, qp_offset; 699289546Scem size_t tx_size; 700289546Scem unsigned num_qps_mw, mw_num, mw_count; 701250079Scarl 702289546Scem mw_count = nt->mw_count; 703289545Scem mw_num = QP_TO_MW(nt, qp_num); 704289546Scem mw = &nt->mw_vec[mw_num]; 705289396Scem 706289545Scem qp = &nt->qp_vec[qp_num]; 707250079Scarl qp->qp_num = qp_num; 708250079Scarl qp->transport = nt; 709250079Scarl qp->ntb = nt->ntb; 710289545Scem qp->client_ready = false; 711250079Scarl qp->event_handler = NULL; 712289613Scem ntb_qp_link_down_reset(qp); 713250079Scarl 714289545Scem if (nt->qp_count % mw_count && mw_num + 1 < nt->qp_count / mw_count) 715289545Scem num_qps_mw = nt->qp_count / mw_count + 1; 716250079Scarl else 717289545Scem num_qps_mw = nt->qp_count / mw_count; 718250079Scarl 719289546Scem mw_base = mw->phys_addr; 720289546Scem mw_size = mw->phys_size; 721289546Scem 722289546Scem tx_size = mw_size / num_qps_mw; 723290688Scem qp_offset = tx_size * (qp_num / mw_count); 724289546Scem 725290679Scem qp->tx_mw = mw->vbase + qp_offset; 726289546Scem KASSERT(qp->tx_mw != NULL, ("uh oh?")); 727289546Scem 728289546Scem /* XXX Assumes that a vm_paddr_t is equivalent to bus_addr_t */ 729289546Scem qp->tx_mw_phys = mw_base + qp_offset; 730289546Scem KASSERT(qp->tx_mw_phys != 0, ("uh oh?")); 731289546Scem 732250079Scarl tx_size -= sizeof(struct ntb_rx_info); 733290679Scem qp->rx_info = (void *)(qp->tx_mw + tx_size); 734250079Scarl 735289156Scem /* Due to house-keeping, there must be at least 2 buffs */ 736289650Scem qp->tx_max_frame = qmin(tx_size / 2, 737289650Scem transport_mtu + sizeof(struct ntb_payload_header)); 738250079Scarl qp->tx_max_entry = tx_size / qp->tx_max_frame; 739250079Scarl 740250079Scarl callout_init(&qp->link_work, 0); 741283291Sjkim callout_init(&qp->queue_full, 1); 742283291Sjkim callout_init(&qp->rx_full, 1); 743250079Scarl 744289651Scem mtx_init(&qp->ntb_rx_q_lock, "ntb rx q", NULL, MTX_SPIN); 745250079Scarl mtx_init(&qp->ntb_tx_free_q_lock, "ntb tx free q", NULL, MTX_SPIN); 746289546Scem TASK_INIT(&qp->rx_completion_task, 0, ntb_complete_rxc, qp); 747289546Scem TASK_INIT(&qp->rxc_db_work, 0, ntb_transport_rxc_db, qp); 748250079Scarl 749289651Scem STAILQ_INIT(&qp->rx_post_q); 750250079Scarl STAILQ_INIT(&qp->rx_pend_q); 751250079Scarl STAILQ_INIT(&qp->tx_free_q); 752290682Scem 753290682Scem callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp); 754250079Scarl} 755250079Scarl 756250079Scarlstatic void 757250079Scarlntb_transport_free_queue(struct ntb_transport_qp *qp) 758250079Scarl{ 759250079Scarl struct ntb_queue_entry *entry; 760250079Scarl 761250079Scarl if (qp == NULL) 762250079Scarl return; 763250079Scarl 764250079Scarl callout_drain(&qp->link_work); 765250079Scarl 766289546Scem ntb_db_set_mask(qp->ntb, 1ull << qp->qp_num); 767289546Scem taskqueue_drain(taskqueue_swi, &qp->rxc_db_work); 768289546Scem taskqueue_drain(taskqueue_swi, &qp->rx_completion_task); 769250079Scarl 770289546Scem qp->cb_data = NULL; 771289546Scem qp->rx_handler = NULL; 772289546Scem qp->tx_handler = NULL; 773289546Scem qp->event_handler = NULL; 774289546Scem 775289651Scem while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) 776250079Scarl free(entry, M_NTB_IF); 777250079Scarl 778289651Scem while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) 779289651Scem free(entry, M_NTB_IF); 780289651Scem 781250079Scarl while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 782250079Scarl free(entry, M_NTB_IF); 783250079Scarl 784289546Scem set_bit(qp->qp_num, &qp->transport->qp_bitmap_free); 785250079Scarl} 786250079Scarl 787250079Scarl/** 788250079Scarl * ntb_transport_create_queue - Create a new NTB transport layer queue 789250079Scarl * @rx_handler: receive callback function 790250079Scarl * @tx_handler: transmit callback function 791250079Scarl * @event_handler: event callback function 792250079Scarl * 793250079Scarl * Create a new NTB transport layer queue and provide the queue with a callback 794250079Scarl * routine for both transmit and receive. The receive callback routine will be 795250079Scarl * used to pass up data when the transport has received it on the queue. The 796250079Scarl * transmit callback routine will be called when the transport has completed the 797250079Scarl * transmission of the data on the queue and the data is ready to be freed. 798250079Scarl * 799250079Scarl * RETURNS: pointer to newly created ntb_queue, NULL on error. 800250079Scarl */ 801250079Scarlstatic struct ntb_transport_qp * 802289546Scemntb_transport_create_queue(void *data, struct ntb_softc *ntb, 803250079Scarl const struct ntb_queue_handlers *handlers) 804250079Scarl{ 805250079Scarl struct ntb_queue_entry *entry; 806250079Scarl struct ntb_transport_qp *qp; 807289545Scem struct ntb_transport_ctx *nt; 808250079Scarl unsigned int free_queue; 809289546Scem int i; 810250079Scarl 811289546Scem nt = ntb_get_ctx(ntb, NULL); 812289546Scem KASSERT(nt != NULL, ("bogus")); 813250079Scarl 814289544Scem free_queue = ffs_bit(&nt->qp_bitmap); 815250079Scarl if (free_queue == 0) 816289546Scem return (NULL); 817250079Scarl 818250079Scarl /* decrement free_queue to make it zero based */ 819250079Scarl free_queue--; 820250079Scarl 821289545Scem qp = &nt->qp_vec[free_queue]; 822290810Scem clear_bit(qp->qp_num, &nt->qp_bitmap_free); 823250079Scarl qp->cb_data = data; 824250079Scarl qp->rx_handler = handlers->rx_handler; 825250079Scarl qp->tx_handler = handlers->tx_handler; 826250079Scarl qp->event_handler = handlers->event_handler; 827250079Scarl 828250079Scarl for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 829289546Scem entry = malloc(sizeof(*entry), M_NTB_IF, M_WAITOK | M_ZERO); 830250079Scarl entry->cb_data = nt->ifp; 831250079Scarl entry->buf = NULL; 832250079Scarl entry->len = transport_mtu; 833289651Scem ntb_list_add(&qp->ntb_rx_q_lock, entry, &qp->rx_pend_q); 834250079Scarl } 835250079Scarl 836250079Scarl for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { 837289546Scem entry = malloc(sizeof(*entry), M_NTB_IF, M_WAITOK | M_ZERO); 838250079Scarl ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); 839250079Scarl } 840250079Scarl 841289546Scem ntb_db_clear(ntb, 1ull << qp->qp_num); 842289546Scem ntb_db_clear_mask(ntb, 1ull << qp->qp_num); 843250079Scarl return (qp); 844250079Scarl} 845250079Scarl 846250079Scarl/** 847250079Scarl * ntb_transport_link_up - Notify NTB transport of client readiness to use queue 848250079Scarl * @qp: NTB transport layer queue to be enabled 849250079Scarl * 850250079Scarl * Notify NTB transport layer of client readiness to use queue 851250079Scarl */ 852250079Scarlstatic void 853250079Scarlntb_transport_link_up(struct ntb_transport_qp *qp) 854250079Scarl{ 855290684Scem struct ntb_transport_ctx *nt; 856250079Scarl 857250079Scarl if (qp == NULL) 858250079Scarl return; 859250079Scarl 860289545Scem qp->client_ready = true; 861250079Scarl 862290684Scem nt = qp->transport; 863290684Scem ntb_printf(2, "qp client ready\n"); 864290684Scem 865289545Scem if (qp->transport->link_is_up) 866250079Scarl callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp); 867250079Scarl} 868250079Scarl 869250079Scarl 870250079Scarl 871250079Scarl/* Transport Tx */ 872250079Scarl 873250079Scarl/** 874250079Scarl * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry 875250079Scarl * @qp: NTB transport layer queue the entry is to be enqueued on 876250079Scarl * @cb: per buffer pointer for callback function to use 877250079Scarl * @data: pointer to data buffer that will be sent 878250079Scarl * @len: length of the data buffer 879250079Scarl * 880250079Scarl * Enqueue a new transmit buffer onto the transport queue from which a NTB 881289266Scem * payload will be transmitted. This assumes that a lock is being held to 882250079Scarl * serialize access to the qp. 883250079Scarl * 884250079Scarl * RETURNS: An appropriate ERRNO error value on error, or zero for success. 885250079Scarl */ 886250079Scarlstatic int 887250079Scarlntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, 888250079Scarl unsigned int len) 889250079Scarl{ 890250079Scarl struct ntb_queue_entry *entry; 891250079Scarl int rc; 892250079Scarl 893289545Scem if (qp == NULL || !qp->link_is_up || len == 0) { 894250079Scarl CTR0(KTR_NTB, "TX: link not up"); 895250079Scarl return (EINVAL); 896250079Scarl } 897250079Scarl 898250079Scarl entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); 899250079Scarl if (entry == NULL) { 900255281Scarl CTR0(KTR_NTB, "TX: could not get entry from tx_free_q"); 901289653Scem qp->tx_err_no_buf++; 902289653Scem return (EBUSY); 903250079Scarl } 904250079Scarl CTR1(KTR_NTB, "TX: got entry %p from tx_free_q", entry); 905250079Scarl 906250079Scarl entry->cb_data = cb; 907250079Scarl entry->buf = data; 908250079Scarl entry->len = len; 909250079Scarl entry->flags = 0; 910250079Scarl 911250079Scarl rc = ntb_process_tx(qp, entry); 912250079Scarl if (rc != 0) { 913250079Scarl ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); 914250079Scarl CTR1(KTR_NTB, 915250079Scarl "TX: process_tx failed. Returning entry %p to tx_free_q", 916250079Scarl entry); 917250079Scarl } 918250079Scarl return (rc); 919250079Scarl} 920250079Scarl 921250079Scarlstatic int 922250079Scarlntb_process_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry) 923250079Scarl{ 924250079Scarl void *offset; 925250079Scarl 926291028Scem offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index; 927250079Scarl CTR3(KTR_NTB, 928291028Scem "TX: process_tx: tx_pkts=%lu, tx_index=%u, remote entry=%u", 929250079Scarl qp->tx_pkts, qp->tx_index, qp->remote_rx_info->entry); 930250079Scarl if (qp->tx_index == qp->remote_rx_info->entry) { 931250079Scarl CTR0(KTR_NTB, "TX: ring full"); 932250079Scarl qp->tx_ring_full++; 933250079Scarl return (EAGAIN); 934250079Scarl } 935250079Scarl 936250079Scarl if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) { 937250079Scarl if (qp->tx_handler != NULL) 938250079Scarl qp->tx_handler(qp, qp->cb_data, entry->buf, 939291034Scem EIO); 940291034Scem else 941291034Scem m_freem(entry->buf); 942250079Scarl 943291034Scem entry->buf = NULL; 944250079Scarl ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); 945250079Scarl CTR1(KTR_NTB, 946250079Scarl "TX: frame too big. returning entry %p to tx_free_q", 947250079Scarl entry); 948250079Scarl return (0); 949250079Scarl } 950250079Scarl CTR2(KTR_NTB, "TX: copying entry %p to offset %p", entry, offset); 951289546Scem ntb_memcpy_tx(qp, entry, offset); 952250079Scarl 953250079Scarl qp->tx_index++; 954250079Scarl qp->tx_index %= qp->tx_max_entry; 955250079Scarl 956250079Scarl qp->tx_pkts++; 957250079Scarl 958250079Scarl return (0); 959250079Scarl} 960250079Scarl 961250079Scarlstatic void 962289546Scemntb_memcpy_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry, 963250079Scarl void *offset) 964250079Scarl{ 965250079Scarl struct ntb_payload_header *hdr; 966250079Scarl 967289546Scem /* This piece is from Linux' ntb_async_tx() */ 968250079Scarl hdr = (struct ntb_payload_header *)((char *)offset + qp->tx_max_frame - 969250079Scarl sizeof(struct ntb_payload_header)); 970289546Scem entry->x_hdr = hdr; 971291028Scem iowrite32(entry->len, &hdr->len); 972291028Scem iowrite32(qp->tx_pkts, &hdr->ver); 973289546Scem 974289546Scem /* This piece is ntb_memcpy_tx() */ 975289546Scem CTR2(KTR_NTB, "TX: copying %d bytes to offset %p", entry->len, offset); 976289546Scem if (entry->buf != NULL) { 977289546Scem m_copydata((struct mbuf *)entry->buf, 0, entry->len, offset); 978289546Scem 979289546Scem /* 980289546Scem * Ensure that the data is fully copied before setting the 981289546Scem * flags 982289546Scem */ 983289546Scem wmb(); 984289546Scem } 985289546Scem 986289546Scem /* The rest is ntb_tx_copy_callback() */ 987291028Scem iowrite32(entry->flags | IF_NTB_DESC_DONE_FLAG, &hdr->flags); 988291028Scem CTR1(KTR_NTB, "TX: hdr %p set DESC_DONE", hdr); 989250079Scarl 990289545Scem ntb_peer_db_set(qp->ntb, 1ull << qp->qp_num); 991250079Scarl 992289341Scem /* 993250079Scarl * The entry length can only be zero if the packet is intended to be a 994250079Scarl * "link down" or similar. Since no payload is being sent in these 995250079Scarl * cases, there is nothing to add to the completion queue. 996250079Scarl */ 997250079Scarl if (entry->len > 0) { 998250079Scarl qp->tx_bytes += entry->len; 999250079Scarl 1000250079Scarl if (qp->tx_handler) 1001291034Scem qp->tx_handler(qp, qp->cb_data, entry->buf, 1002291034Scem entry->len); 1003291034Scem else 1004291034Scem m_freem(entry->buf); 1005291034Scem entry->buf = NULL; 1006250079Scarl } 1007250079Scarl 1008291028Scem CTR3(KTR_NTB, 1009291028Scem "TX: entry %p sent. hdr->ver = %u, hdr->flags = 0x%x, Returning " 1010291028Scem "to tx_free_q", entry, hdr->ver, hdr->flags); 1011250079Scarl ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); 1012250079Scarl} 1013250079Scarl 1014250079Scarlstatic void 1015250079Scarlntb_qp_full(void *arg) 1016250079Scarl{ 1017250079Scarl 1018250079Scarl CTR0(KTR_NTB, "TX: qp_full callout"); 1019250079Scarl ntb_start(arg); 1020250079Scarl} 1021250079Scarl 1022250079Scarl/* Transport Rx */ 1023250079Scarlstatic void 1024289546Scemntb_transport_rxc_db(void *arg, int pending __unused) 1025250079Scarl{ 1026289281Scem struct ntb_transport_qp *qp = arg; 1027289653Scem ntb_q_idx_t i; 1028289157Scem int rc; 1029250079Scarl 1030289341Scem /* 1031250079Scarl * Limit the number of packets processed in a single interrupt to 1032250079Scarl * provide fairness to others 1033250079Scarl */ 1034289546Scem CTR0(KTR_NTB, "RX: transport_rx"); 1035250079Scarl mtx_lock(&qp->transport->rx_lock); 1036289546Scem for (i = 0; i < qp->rx_max_entry; i++) { 1037250079Scarl rc = ntb_process_rxc(qp); 1038250079Scarl if (rc != 0) { 1039250079Scarl CTR0(KTR_NTB, "RX: process_rxc failed"); 1040250079Scarl break; 1041250079Scarl } 1042250079Scarl } 1043250079Scarl mtx_unlock(&qp->transport->rx_lock); 1044289281Scem 1045289546Scem if (i == qp->rx_max_entry) 1046289546Scem taskqueue_enqueue(taskqueue_swi, &qp->rxc_db_work); 1047289546Scem else if ((ntb_db_read(qp->ntb) & (1ull << qp->qp_num)) != 0) { 1048289546Scem /* If db is set, clear it and read it back to commit clear. */ 1049289546Scem ntb_db_clear(qp->ntb, 1ull << qp->qp_num); 1050289546Scem (void)ntb_db_read(qp->ntb); 1051289546Scem 1052289546Scem /* 1053289546Scem * An interrupt may have arrived between finishing 1054289546Scem * ntb_process_rxc and clearing the doorbell bit: there might 1055289546Scem * be some more work to do. 1056289546Scem */ 1057289546Scem taskqueue_enqueue(taskqueue_swi, &qp->rxc_db_work); 1058289546Scem } 1059250079Scarl} 1060250079Scarl 1061250079Scarlstatic int 1062250079Scarlntb_process_rxc(struct ntb_transport_qp *qp) 1063250079Scarl{ 1064250079Scarl struct ntb_payload_header *hdr; 1065250079Scarl struct ntb_queue_entry *entry; 1066291028Scem caddr_t offset; 1067250079Scarl 1068291028Scem offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index; 1069291028Scem hdr = (void *)(offset + qp->rx_max_frame - 1070291028Scem sizeof(struct ntb_payload_header)); 1071250079Scarl 1072250079Scarl CTR1(KTR_NTB, "RX: process_rxc rx_index = %u", qp->rx_index); 1073250079Scarl if ((hdr->flags & IF_NTB_DESC_DONE_FLAG) == 0) { 1074289546Scem CTR0(KTR_NTB, "RX: hdr not done"); 1075250079Scarl qp->rx_ring_empty++; 1076250079Scarl return (EAGAIN); 1077250079Scarl } 1078250079Scarl 1079289546Scem if ((hdr->flags & IF_NTB_LINK_DOWN_FLAG) != 0) { 1080289546Scem CTR0(KTR_NTB, "RX: link down"); 1081289546Scem ntb_qp_link_down(qp); 1082289546Scem hdr->flags = 0; 1083289546Scem return (EAGAIN); 1084289546Scem } 1085289546Scem 1086289546Scem if (hdr->ver != (uint32_t)qp->rx_pkts) { 1087289546Scem CTR2(KTR_NTB,"RX: ver != rx_pkts (%x != %lx). " 1088291028Scem "Returning entry to rx_pend_q", hdr->ver, qp->rx_pkts); 1089250079Scarl qp->rx_err_ver++; 1090250079Scarl return (EIO); 1091250079Scarl } 1092250079Scarl 1093289651Scem entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q); 1094289546Scem if (entry == NULL) { 1095289546Scem qp->rx_err_no_buf++; 1096289546Scem CTR0(KTR_NTB, "RX: No entries in rx_pend_q"); 1097289546Scem return (EAGAIN); 1098250079Scarl } 1099289546Scem callout_stop(&qp->rx_full); 1100289546Scem CTR1(KTR_NTB, "RX: rx entry %p from rx_pend_q", entry); 1101250079Scarl 1102289546Scem entry->x_hdr = hdr; 1103289546Scem entry->index = qp->rx_index; 1104250079Scarl 1105289546Scem if (hdr->len > entry->len) { 1106289546Scem CTR2(KTR_NTB, "RX: len too long. Wanted %ju got %ju", 1107289546Scem (uintmax_t)hdr->len, (uintmax_t)entry->len); 1108250079Scarl qp->rx_err_oflow++; 1109250079Scarl 1110289546Scem entry->len = -EIO; 1111289546Scem entry->flags |= IF_NTB_DESC_DONE_FLAG; 1112250079Scarl 1113289546Scem taskqueue_enqueue(taskqueue_swi, &qp->rx_completion_task); 1114289546Scem } else { 1115289546Scem qp->rx_bytes += hdr->len; 1116289546Scem qp->rx_pkts++; 1117250079Scarl 1118289546Scem CTR1(KTR_NTB, "RX: received %ld rx_pkts", qp->rx_pkts); 1119250079Scarl 1120289546Scem entry->len = hdr->len; 1121289546Scem 1122289651Scem ntb_memcpy_rx(qp, entry, offset); 1123289546Scem } 1124289546Scem 1125250079Scarl qp->rx_index++; 1126250079Scarl qp->rx_index %= qp->rx_max_entry; 1127250079Scarl return (0); 1128250079Scarl} 1129250079Scarl 1130250079Scarlstatic void 1131289651Scemntb_memcpy_rx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry, 1132250079Scarl void *offset) 1133250079Scarl{ 1134250079Scarl struct ifnet *ifp = entry->cb_data; 1135250079Scarl unsigned int len = entry->len; 1136250079Scarl struct mbuf *m; 1137250079Scarl 1138250079Scarl CTR2(KTR_NTB, "RX: copying %d bytes from offset %p", len, offset); 1139250079Scarl m = m_devget(offset, len, 0, ifp, NULL); 1140250079Scarl m->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID; 1141250079Scarl 1142250079Scarl entry->buf = (void *)m; 1143250079Scarl 1144289546Scem /* Ensure that the data is globally visible before clearing the flag */ 1145289546Scem wmb(); 1146289546Scem 1147289651Scem CTR2(KTR_NTB, "RX: copied entry %p to mbuf %p.", entry, m); 1148289651Scem ntb_rx_copy_callback(qp, entry); 1149289651Scem} 1150250079Scarl 1151289651Scemstatic inline void 1152289651Scemntb_rx_copy_callback(struct ntb_transport_qp *qp, void *data) 1153289651Scem{ 1154289651Scem struct ntb_queue_entry *entry; 1155289651Scem 1156289651Scem entry = data; 1157289651Scem entry->flags |= IF_NTB_DESC_DONE_FLAG; 1158250079Scarl taskqueue_enqueue(taskqueue_swi, &qp->rx_completion_task); 1159250079Scarl} 1160250079Scarl 1161250079Scarlstatic void 1162289546Scemntb_complete_rxc(void *arg, int pending) 1163250079Scarl{ 1164250079Scarl struct ntb_transport_qp *qp = arg; 1165289651Scem struct ntb_queue_entry *entry; 1166250079Scarl struct mbuf *m; 1167289651Scem unsigned len; 1168250079Scarl 1169250079Scarl CTR0(KTR_NTB, "RX: rx_completion_task"); 1170250079Scarl 1171289651Scem mtx_lock_spin(&qp->ntb_rx_q_lock); 1172289651Scem 1173289651Scem while (!STAILQ_EMPTY(&qp->rx_post_q)) { 1174289651Scem entry = STAILQ_FIRST(&qp->rx_post_q); 1175289651Scem if ((entry->flags & IF_NTB_DESC_DONE_FLAG) == 0) 1176289651Scem break; 1177289651Scem 1178289651Scem entry->x_hdr->flags = 0; 1179291028Scem iowrite32(entry->index, &qp->rx_info->entry); 1180289651Scem 1181291034Scem STAILQ_REMOVE_HEAD(&qp->rx_post_q, entry); 1182291034Scem 1183289651Scem len = entry->len; 1184250079Scarl m = entry->buf; 1185289651Scem 1186291034Scem /* 1187291034Scem * Re-initialize queue_entry for reuse; rx_handler takes 1188291034Scem * ownership of the mbuf. 1189291034Scem */ 1190291034Scem entry->buf = NULL; 1191291034Scem entry->len = transport_mtu; 1192291034Scem entry->cb_data = qp->transport->ifp; 1193289651Scem 1194291034Scem STAILQ_INSERT_TAIL(&qp->rx_pend_q, entry, entry); 1195291034Scem 1196289651Scem mtx_unlock_spin(&qp->ntb_rx_q_lock); 1197289651Scem 1198250079Scarl CTR2(KTR_NTB, "RX: completing entry %p, mbuf %p", entry, m); 1199289651Scem if (qp->rx_handler != NULL && qp->client_ready) 1200289651Scem qp->rx_handler(qp, qp->cb_data, m, len); 1201291034Scem else 1202291034Scem m_freem(m); 1203250079Scarl 1204289651Scem mtx_lock_spin(&qp->ntb_rx_q_lock); 1205289651Scem } 1206250079Scarl 1207289651Scem mtx_unlock_spin(&qp->ntb_rx_q_lock); 1208250079Scarl} 1209250079Scarl 1210289546Scemstatic void 1211289598Scemntb_transport_doorbell_callback(void *data, uint32_t vector) 1212289546Scem{ 1213289546Scem struct ntb_transport_ctx *nt = data; 1214289546Scem struct ntb_transport_qp *qp; 1215289546Scem struct _qpset db_bits; 1216289546Scem uint64_t vec_mask; 1217289546Scem unsigned qp_num; 1218289546Scem 1219289546Scem BIT_COPY(QP_SETSIZE, &nt->qp_bitmap, &db_bits); 1220289546Scem BIT_NAND(QP_SETSIZE, &db_bits, &nt->qp_bitmap_free); 1221289546Scem 1222289546Scem vec_mask = ntb_db_vector_mask(nt->ntb, vector); 1223289546Scem while (vec_mask != 0) { 1224289775Scem qp_num = ffsll(vec_mask) - 1; 1225289546Scem 1226289546Scem if (test_bit(qp_num, &db_bits)) { 1227289546Scem qp = &nt->qp_vec[qp_num]; 1228289546Scem taskqueue_enqueue(taskqueue_swi, &qp->rxc_db_work); 1229289546Scem } 1230289546Scem 1231289546Scem vec_mask &= ~(1ull << qp_num); 1232289546Scem } 1233289546Scem} 1234289546Scem 1235250079Scarl/* Link Event handler */ 1236250079Scarlstatic void 1237289546Scemntb_transport_event_callback(void *data) 1238250079Scarl{ 1239289545Scem struct ntb_transport_ctx *nt = data; 1240250079Scarl 1241289546Scem if (ntb_link_is_up(nt->ntb, NULL, NULL)) { 1242290684Scem ntb_printf(1, "HW link up\n"); 1243250079Scarl callout_reset(&nt->link_work, 0, ntb_transport_link_work, nt); 1244289546Scem } else { 1245290684Scem ntb_printf(1, "HW link down\n"); 1246290683Scem taskqueue_enqueue(taskqueue_swi, &nt->link_cleanup); 1247250079Scarl } 1248250079Scarl} 1249250079Scarl 1250250079Scarl/* Link bring up */ 1251250079Scarlstatic void 1252250079Scarlntb_transport_link_work(void *arg) 1253250079Scarl{ 1254289545Scem struct ntb_transport_ctx *nt = arg; 1255250079Scarl struct ntb_softc *ntb = nt->ntb; 1256250079Scarl struct ntb_transport_qp *qp; 1257289546Scem uint64_t val64, size; 1258289546Scem uint32_t val; 1259289546Scem unsigned i; 1260289208Scem int rc; 1261250079Scarl 1262289153Scem /* send the local info, in the opposite order of the way we read it */ 1263289546Scem for (i = 0; i < nt->mw_count; i++) { 1264289546Scem size = nt->mw_vec[i].phys_size; 1265250079Scarl 1266289546Scem if (max_mw_size != 0 && size > max_mw_size) 1267289546Scem size = max_mw_size; 1268289546Scem 1269289546Scem ntb_peer_spad_write(ntb, IF_NTB_MW0_SZ_HIGH + (i * 2), 1270289546Scem size >> 32); 1271289546Scem ntb_peer_spad_write(ntb, IF_NTB_MW0_SZ_LOW + (i * 2), size); 1272289153Scem } 1273250079Scarl 1274289546Scem ntb_peer_spad_write(ntb, IF_NTB_NUM_MWS, nt->mw_count); 1275250079Scarl 1276289546Scem ntb_peer_spad_write(ntb, IF_NTB_NUM_QPS, nt->qp_count); 1277250079Scarl 1278289546Scem ntb_peer_spad_write(ntb, IF_NTB_VERSION, NTB_TRANSPORT_VERSION); 1279250079Scarl 1280250079Scarl /* Query the remote side for its info */ 1281289546Scem val = 0; 1282289546Scem ntb_spad_read(ntb, IF_NTB_VERSION, &val); 1283250079Scarl if (val != NTB_TRANSPORT_VERSION) 1284250079Scarl goto out; 1285250079Scarl 1286289546Scem ntb_spad_read(ntb, IF_NTB_NUM_QPS, &val); 1287289545Scem if (val != nt->qp_count) 1288250079Scarl goto out; 1289250079Scarl 1290289546Scem ntb_spad_read(ntb, IF_NTB_NUM_MWS, &val); 1291289546Scem if (val != nt->mw_count) 1292250079Scarl goto out; 1293250079Scarl 1294289546Scem for (i = 0; i < nt->mw_count; i++) { 1295289546Scem ntb_spad_read(ntb, IF_NTB_MW0_SZ_HIGH + (i * 2), &val); 1296289153Scem val64 = (uint64_t)val << 32; 1297250079Scarl 1298289546Scem ntb_spad_read(ntb, IF_NTB_MW0_SZ_LOW + (i * 2), &val); 1299289153Scem val64 |= val; 1300250079Scarl 1301289153Scem rc = ntb_set_mw(nt, i, val64); 1302289153Scem if (rc != 0) 1303289153Scem goto free_mws; 1304289153Scem } 1305289153Scem 1306289545Scem nt->link_is_up = true; 1307290684Scem ntb_printf(1, "transport link up\n"); 1308250079Scarl 1309289545Scem for (i = 0; i < nt->qp_count; i++) { 1310289545Scem qp = &nt->qp_vec[i]; 1311250079Scarl 1312250079Scarl ntb_transport_setup_qp_mw(nt, i); 1313250079Scarl 1314289545Scem if (qp->client_ready) 1315250079Scarl callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp); 1316250079Scarl } 1317250079Scarl 1318250079Scarl return; 1319250079Scarl 1320289153Scemfree_mws: 1321289546Scem for (i = 0; i < nt->mw_count; i++) 1322289153Scem ntb_free_mw(nt, i); 1323250079Scarlout: 1324289546Scem if (ntb_link_is_up(ntb, NULL, NULL)) 1325250079Scarl callout_reset(&nt->link_work, 1326289153Scem NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_transport_link_work, nt); 1327250079Scarl} 1328250079Scarl 1329250079Scarlstatic int 1330289652Scemntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, size_t size) 1331250079Scarl{ 1332289545Scem struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; 1333289652Scem size_t xlat_size, buff_size; 1334289546Scem int rc; 1335250079Scarl 1336289652Scem if (size == 0) 1337289652Scem return (EINVAL); 1338289652Scem 1339289546Scem xlat_size = roundup(size, mw->xlat_align_size); 1340289546Scem buff_size = roundup(size, mw->xlat_align); 1341289546Scem 1342289154Scem /* No need to re-setup */ 1343289546Scem if (mw->xlat_size == xlat_size) 1344289154Scem return (0); 1345289154Scem 1346289546Scem if (mw->buff_size != 0) 1347289154Scem ntb_free_mw(nt, num_mw); 1348289154Scem 1349289546Scem /* Alloc memory for receiving data. Must be aligned */ 1350289546Scem mw->xlat_size = xlat_size; 1351289546Scem mw->buff_size = buff_size; 1352250079Scarl 1353289546Scem mw->virt_addr = contigmalloc(mw->buff_size, M_NTB_IF, M_ZERO, 0, 1354291033Scem mw->addr_limit, mw->xlat_align, 0); 1355250079Scarl if (mw->virt_addr == NULL) { 1356289545Scem mw->xlat_size = 0; 1357289546Scem mw->buff_size = 0; 1358289346Scem printf("ntb: Unable to allocate MW buffer of size %zu\n", 1359289545Scem mw->xlat_size); 1360250079Scarl return (ENOMEM); 1361250079Scarl } 1362250079Scarl /* TODO: replace with bus_space_* functions */ 1363250079Scarl mw->dma_addr = vtophys(mw->virt_addr); 1364250079Scarl 1365289346Scem /* 1366289346Scem * Ensure that the allocation from contigmalloc is aligned as 1367289346Scem * requested. XXX: This may not be needed -- brought in for parity 1368289346Scem * with the Linux driver. 1369289346Scem */ 1370289546Scem if (mw->dma_addr % mw->xlat_align != 0) { 1371290684Scem ntb_printf(0, 1372289652Scem "DMA memory 0x%jx not aligned to BAR size 0x%zx\n", 1373289346Scem (uintmax_t)mw->dma_addr, size); 1374289346Scem ntb_free_mw(nt, num_mw); 1375289346Scem return (ENOMEM); 1376289346Scem } 1377289346Scem 1378250079Scarl /* Notify HW the memory location of the receive buffer */ 1379289546Scem rc = ntb_mw_set_trans(nt->ntb, num_mw, mw->dma_addr, mw->xlat_size); 1380289546Scem if (rc) { 1381290684Scem ntb_printf(0, "Unable to set mw%d translation\n", num_mw); 1382289546Scem ntb_free_mw(nt, num_mw); 1383289546Scem return (rc); 1384289546Scem } 1385250079Scarl 1386250079Scarl return (0); 1387250079Scarl} 1388250079Scarl 1389250079Scarlstatic void 1390289545Scemntb_free_mw(struct ntb_transport_ctx *nt, int num_mw) 1391289153Scem{ 1392289545Scem struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; 1393289153Scem 1394289153Scem if (mw->virt_addr == NULL) 1395289153Scem return; 1396289153Scem 1397289546Scem ntb_mw_clear_trans(nt->ntb, num_mw); 1398289545Scem contigfree(mw->virt_addr, mw->xlat_size, M_NTB_IF); 1399289546Scem mw->xlat_size = 0; 1400289546Scem mw->buff_size = 0; 1401289153Scem mw->virt_addr = NULL; 1402289153Scem} 1403289153Scem 1404289546Scemstatic int 1405289545Scemntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, unsigned int qp_num) 1406250079Scarl{ 1407289545Scem struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; 1408289546Scem struct ntb_transport_mw *mw; 1409250079Scarl void *offset; 1410289653Scem ntb_q_idx_t i; 1411289546Scem size_t rx_size; 1412289546Scem unsigned num_qps_mw, mw_num, mw_count; 1413250079Scarl 1414289546Scem mw_count = nt->mw_count; 1415289545Scem mw_num = QP_TO_MW(nt, qp_num); 1416289546Scem mw = &nt->mw_vec[mw_num]; 1417289396Scem 1418289546Scem if (mw->virt_addr == NULL) 1419289546Scem return (ENOMEM); 1420289546Scem 1421289545Scem if (nt->qp_count % mw_count && mw_num + 1 < nt->qp_count / mw_count) 1422289545Scem num_qps_mw = nt->qp_count / mw_count + 1; 1423250079Scarl else 1424289545Scem num_qps_mw = nt->qp_count / mw_count; 1425250079Scarl 1426289546Scem rx_size = mw->xlat_size / num_qps_mw; 1427290688Scem qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count); 1428250079Scarl rx_size -= sizeof(struct ntb_rx_info); 1429250079Scarl 1430290679Scem qp->remote_rx_info = (void*)(qp->rx_buff + rx_size); 1431289546Scem 1432289156Scem /* Due to house-keeping, there must be at least 2 buffs */ 1433289650Scem qp->rx_max_frame = qmin(rx_size / 2, 1434289650Scem transport_mtu + sizeof(struct ntb_payload_header)); 1435250079Scarl qp->rx_max_entry = rx_size / qp->rx_max_frame; 1436250079Scarl qp->rx_index = 0; 1437250079Scarl 1438289156Scem qp->remote_rx_info->entry = qp->rx_max_entry - 1; 1439250079Scarl 1440289546Scem /* Set up the hdr offsets with 0s */ 1441250079Scarl for (i = 0; i < qp->rx_max_entry; i++) { 1442290679Scem offset = (void *)(qp->rx_buff + qp->rx_max_frame * (i + 1) - 1443250079Scarl sizeof(struct ntb_payload_header)); 1444250079Scarl memset(offset, 0, sizeof(struct ntb_payload_header)); 1445250079Scarl } 1446250079Scarl 1447250079Scarl qp->rx_pkts = 0; 1448250079Scarl qp->tx_pkts = 0; 1449289155Scem qp->tx_index = 0; 1450289546Scem 1451289546Scem return (0); 1452250079Scarl} 1453250079Scarl 1454250079Scarlstatic void 1455250079Scarlntb_qp_link_work(void *arg) 1456250079Scarl{ 1457250079Scarl struct ntb_transport_qp *qp = arg; 1458250079Scarl struct ntb_softc *ntb = qp->ntb; 1459289545Scem struct ntb_transport_ctx *nt = qp->transport; 1460289615Scem uint32_t val, dummy; 1461250079Scarl 1462289546Scem ntb_spad_read(ntb, IF_NTB_QP_LINKS, &val); 1463250079Scarl 1464289546Scem ntb_peer_spad_write(ntb, IF_NTB_QP_LINKS, val | (1ull << qp->qp_num)); 1465250079Scarl 1466250079Scarl /* query remote spad for qp ready bits */ 1467289615Scem ntb_peer_spad_read(ntb, IF_NTB_QP_LINKS, &dummy); 1468250079Scarl 1469250079Scarl /* See if the remote side is up */ 1470289546Scem if ((val & (1ull << qp->qp_num)) != 0) { 1471290684Scem ntb_printf(2, "qp link up\n"); 1472289545Scem qp->link_is_up = true; 1473289546Scem 1474250079Scarl if (qp->event_handler != NULL) 1475250079Scarl qp->event_handler(qp->cb_data, NTB_LINK_UP); 1476289546Scem 1477289546Scem taskqueue_enqueue(taskqueue_swi, &qp->rxc_db_work); 1478289546Scem } else if (nt->link_is_up) 1479250079Scarl callout_reset(&qp->link_work, 1480250079Scarl NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_qp_link_work, qp); 1481250079Scarl} 1482250079Scarl 1483250079Scarl/* Link down event*/ 1484250079Scarlstatic void 1485289545Scemntb_transport_link_cleanup(struct ntb_transport_ctx *nt) 1486250079Scarl{ 1487289546Scem struct ntb_transport_qp *qp; 1488289546Scem struct _qpset qp_bitmap_alloc; 1489289546Scem unsigned i; 1490250079Scarl 1491289546Scem BIT_COPY(QP_SETSIZE, &nt->qp_bitmap, &qp_bitmap_alloc); 1492289546Scem BIT_NAND(QP_SETSIZE, &qp_bitmap_alloc, &nt->qp_bitmap_free); 1493289546Scem 1494289273Scem /* Pass along the info to any clients */ 1495289545Scem for (i = 0; i < nt->qp_count; i++) 1496289546Scem if (test_bit(i, &qp_bitmap_alloc)) { 1497289546Scem qp = &nt->qp_vec[i]; 1498289546Scem ntb_qp_link_cleanup(qp); 1499289546Scem callout_drain(&qp->link_work); 1500289546Scem } 1501289273Scem 1502289545Scem if (!nt->link_is_up) 1503250079Scarl callout_drain(&nt->link_work); 1504250079Scarl 1505289341Scem /* 1506250079Scarl * The scratchpad registers keep the values if the remote side 1507250079Scarl * goes down, blast them now to give them a sane value the next 1508250079Scarl * time they are accessed 1509250079Scarl */ 1510250079Scarl for (i = 0; i < IF_NTB_MAX_SPAD; i++) 1511289545Scem ntb_spad_write(nt->ntb, i, 0); 1512250079Scarl} 1513250079Scarl 1514290683Scemstatic void 1515290683Scemntb_transport_link_cleanup_work(void *arg, int pending __unused) 1516290683Scem{ 1517250079Scarl 1518290683Scem ntb_transport_link_cleanup(arg); 1519290683Scem} 1520290683Scem 1521250079Scarlstatic void 1522250079Scarlntb_qp_link_down(struct ntb_transport_qp *qp) 1523250079Scarl{ 1524250079Scarl 1525250079Scarl ntb_qp_link_cleanup(qp); 1526250079Scarl} 1527250079Scarl 1528250079Scarlstatic void 1529289613Scemntb_qp_link_down_reset(struct ntb_transport_qp *qp) 1530289613Scem{ 1531289613Scem 1532289613Scem qp->link_is_up = false; 1533289613Scem 1534289613Scem qp->tx_index = qp->rx_index = 0; 1535289613Scem qp->tx_bytes = qp->rx_bytes = 0; 1536289613Scem qp->tx_pkts = qp->rx_pkts = 0; 1537289613Scem 1538289613Scem qp->rx_ring_empty = 0; 1539289613Scem qp->tx_ring_full = 0; 1540289613Scem 1541289653Scem qp->rx_err_no_buf = qp->tx_err_no_buf = 0; 1542289653Scem qp->rx_err_oflow = qp->rx_err_ver = 0; 1543289613Scem} 1544289613Scem 1545289613Scemstatic void 1546250079Scarlntb_qp_link_cleanup(struct ntb_transport_qp *qp) 1547250079Scarl{ 1548289545Scem struct ntb_transport_ctx *nt = qp->transport; 1549250079Scarl 1550289613Scem callout_drain(&qp->link_work); 1551289613Scem ntb_qp_link_down_reset(qp); 1552250079Scarl 1553250079Scarl if (qp->event_handler != NULL) 1554250079Scarl qp->event_handler(qp->cb_data, NTB_LINK_DOWN); 1555250079Scarl 1556289545Scem if (nt->link_is_up) 1557250079Scarl callout_reset(&qp->link_work, 1558250079Scarl NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_qp_link_work, qp); 1559250079Scarl} 1560250079Scarl 1561250079Scarl/* Link commanded down */ 1562250079Scarl/** 1563250079Scarl * ntb_transport_link_down - Notify NTB transport to no longer enqueue data 1564250079Scarl * @qp: NTB transport layer queue to be disabled 1565250079Scarl * 1566250079Scarl * Notify NTB transport layer of client's desire to no longer receive data on 1567250079Scarl * transport queue specified. It is the client's responsibility to ensure all 1568289266Scem * entries on queue are purged or otherwise handled appropriately. 1569250079Scarl */ 1570250079Scarlstatic void 1571250079Scarlntb_transport_link_down(struct ntb_transport_qp *qp) 1572250079Scarl{ 1573289546Scem uint32_t val; 1574250079Scarl 1575250079Scarl if (qp == NULL) 1576250079Scarl return; 1577250079Scarl 1578289545Scem qp->client_ready = false; 1579250079Scarl 1580289546Scem ntb_spad_read(qp->ntb, IF_NTB_QP_LINKS, &val); 1581250079Scarl 1582289546Scem ntb_peer_spad_write(qp->ntb, IF_NTB_QP_LINKS, 1583250079Scarl val & ~(1 << qp->qp_num)); 1584250079Scarl 1585289545Scem if (qp->link_is_up) 1586250079Scarl ntb_send_link_down(qp); 1587250079Scarl else 1588250079Scarl callout_drain(&qp->link_work); 1589250079Scarl} 1590250079Scarl 1591250079Scarlstatic void 1592250079Scarlntb_send_link_down(struct ntb_transport_qp *qp) 1593250079Scarl{ 1594250079Scarl struct ntb_queue_entry *entry; 1595250079Scarl int i, rc; 1596250079Scarl 1597289545Scem if (!qp->link_is_up) 1598250079Scarl return; 1599250079Scarl 1600250079Scarl for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) { 1601250079Scarl entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); 1602250079Scarl if (entry != NULL) 1603250079Scarl break; 1604250079Scarl pause("NTB Wait for link down", hz / 10); 1605250079Scarl } 1606250079Scarl 1607250079Scarl if (entry == NULL) 1608250079Scarl return; 1609250079Scarl 1610250079Scarl entry->cb_data = NULL; 1611250079Scarl entry->buf = NULL; 1612250079Scarl entry->len = 0; 1613250079Scarl entry->flags = IF_NTB_LINK_DOWN_FLAG; 1614250079Scarl 1615250079Scarl mtx_lock(&qp->transport->tx_lock); 1616250079Scarl rc = ntb_process_tx(qp, entry); 1617250079Scarl if (rc != 0) 1618250079Scarl printf("ntb: Failed to send link down\n"); 1619250079Scarl mtx_unlock(&qp->transport->tx_lock); 1620289613Scem 1621289613Scem ntb_qp_link_down_reset(qp); 1622250079Scarl} 1623250079Scarl 1624250079Scarl 1625250079Scarl/* List Management */ 1626250079Scarl 1627250079Scarlstatic void 1628250079Scarlntb_list_add(struct mtx *lock, struct ntb_queue_entry *entry, 1629250079Scarl struct ntb_queue_list *list) 1630250079Scarl{ 1631250079Scarl 1632250079Scarl mtx_lock_spin(lock); 1633250079Scarl STAILQ_INSERT_TAIL(list, entry, entry); 1634250079Scarl mtx_unlock_spin(lock); 1635250079Scarl} 1636250079Scarl 1637250079Scarlstatic struct ntb_queue_entry * 1638250079Scarlntb_list_rm(struct mtx *lock, struct ntb_queue_list *list) 1639250079Scarl{ 1640250079Scarl struct ntb_queue_entry *entry; 1641250079Scarl 1642250079Scarl mtx_lock_spin(lock); 1643250079Scarl if (STAILQ_EMPTY(list)) { 1644250079Scarl entry = NULL; 1645250079Scarl goto out; 1646250079Scarl } 1647250079Scarl entry = STAILQ_FIRST(list); 1648250079Scarl STAILQ_REMOVE_HEAD(list, entry); 1649250079Scarlout: 1650250079Scarl mtx_unlock_spin(lock); 1651250079Scarl 1652250079Scarl return (entry); 1653250079Scarl} 1654250079Scarl 1655289651Scemstatic struct ntb_queue_entry * 1656289651Scemntb_list_mv(struct mtx *lock, struct ntb_queue_list *from, 1657289651Scem struct ntb_queue_list *to) 1658289651Scem{ 1659289651Scem struct ntb_queue_entry *entry; 1660289651Scem 1661289651Scem mtx_lock_spin(lock); 1662289651Scem if (STAILQ_EMPTY(from)) { 1663289651Scem entry = NULL; 1664289651Scem goto out; 1665289651Scem } 1666289651Scem entry = STAILQ_FIRST(from); 1667289651Scem STAILQ_REMOVE_HEAD(from, entry); 1668289651Scem STAILQ_INSERT_TAIL(to, entry, entry); 1669289651Scem 1670289651Scemout: 1671289651Scem mtx_unlock_spin(lock); 1672289651Scem return (entry); 1673289651Scem} 1674289651Scem 1675250079Scarl/* Helper functions */ 1676250079Scarl/* TODO: This too should really be part of the kernel */ 1677250079Scarl#define EUI48_MULTICAST 1 << 0 1678250079Scarl#define EUI48_LOCALLY_ADMINISTERED 1 << 1 1679250079Scarlstatic void 1680250079Scarlcreate_random_local_eui48(u_char *eaddr) 1681250079Scarl{ 1682250079Scarl static uint8_t counter = 0; 1683250079Scarl uint32_t seed = ticks; 1684250079Scarl 1685250079Scarl eaddr[0] = EUI48_LOCALLY_ADMINISTERED; 1686250079Scarl memcpy(&eaddr[1], &seed, sizeof(uint32_t)); 1687250079Scarl eaddr[5] = counter++; 1688250079Scarl} 1689250079Scarl 1690250079Scarl/** 1691250079Scarl * ntb_transport_max_size - Query the max payload size of a qp 1692250079Scarl * @qp: NTB transport layer queue to be queried 1693250079Scarl * 1694250079Scarl * Query the maximum payload size permissible on the given qp 1695250079Scarl * 1696250079Scarl * RETURNS: the max payload size of a qp 1697250079Scarl */ 1698250079Scarlstatic unsigned int 1699250079Scarlntb_transport_max_size(struct ntb_transport_qp *qp) 1700250079Scarl{ 1701250079Scarl 1702250079Scarl if (qp == NULL) 1703250079Scarl return (0); 1704250079Scarl 1705250079Scarl return (qp->tx_max_frame - sizeof(struct ntb_payload_header)); 1706250079Scarl} 1707