1250079Scarl/*-
2323455Smav * Copyright (c) 2016-2017 Alexander Motin <mav@FreeBSD.org>
3250079Scarl * Copyright (C) 2013 Intel Corporation
4289545Scem * Copyright (C) 2015 EMC Corporation
5250079Scarl * All rights reserved.
6250079Scarl *
7250079Scarl * Redistribution and use in source and binary forms, with or without
8250079Scarl * modification, are permitted provided that the following conditions
9250079Scarl * are met:
10250079Scarl * 1. Redistributions of source code must retain the above copyright
11250079Scarl *    notice, this list of conditions and the following disclaimer.
12250079Scarl * 2. Redistributions in binary form must reproduce the above copyright
13250079Scarl *    notice, this list of conditions and the following disclaimer in the
14250079Scarl *    documentation and/or other materials provided with the distribution.
15250079Scarl *
16250079Scarl * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17250079Scarl * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18250079Scarl * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19250079Scarl * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20250079Scarl * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21250079Scarl * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22250079Scarl * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23250079Scarl * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24250079Scarl * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25250079Scarl * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26250079Scarl * SUCH DAMAGE.
27250079Scarl */
28250079Scarl
29302484Smav/*
30302484Smav * The Non-Transparent Bridge (NTB) is a device that allows you to connect
31302484Smav * two or more systems using a PCI-e links, providing remote memory access.
32302484Smav *
33302484Smav * This module contains a transport for sending and receiving messages by
34302484Smav * writing to remote memory window(s) provided by underlying NTB device.
35302484Smav *
36302484Smav * NOTE: Much of the code in this module is shared with Linux. Any patches may
37302484Smav * be picked up and redistributed in Linux with a dual GPL/BSD license.
38302484Smav */
39302484Smav
40250079Scarl#include <sys/cdefs.h>
41250079Scarl__FBSDID("$FreeBSD: stable/11/sys/dev/ntb/ntb_transport.c 329057 2018-02-09 03:07:12Z mav $");
42250079Scarl
43250079Scarl#include <sys/param.h>
44250079Scarl#include <sys/kernel.h>
45250079Scarl#include <sys/systm.h>
46250079Scarl#include <sys/bus.h>
47250079Scarl#include <sys/ktr.h>
48289281Scem#include <sys/limits.h>
49250079Scarl#include <sys/lock.h>
50250079Scarl#include <sys/malloc.h>
51302484Smav#include <sys/mbuf.h>
52250079Scarl#include <sys/module.h>
53250079Scarl#include <sys/mutex.h>
54250079Scarl#include <sys/queue.h>
55289207Scem#include <sys/sysctl.h>
56250079Scarl#include <sys/taskqueue.h>
57289544Scem
58250079Scarl#include <vm/vm.h>
59250079Scarl#include <vm/pmap.h>
60289544Scem
61250079Scarl#include <machine/bus.h>
62250079Scarl
63302484Smav#include "ntb.h"
64302484Smav#include "ntb_transport.h"
65291085Scem
66250079Scarl#define KTR_NTB KTR_SPARE3
67250079Scarl
68289546Scem#define NTB_TRANSPORT_VERSION	4
69250079Scarl
70302484Smavstatic SYSCTL_NODE(_hw, OID_AUTO, ntb_transport, CTLFLAG_RW, 0, "ntb_transport");
71289546Scem
72302484Smavstatic unsigned g_ntb_transport_debug_level;
73302484SmavSYSCTL_UINT(_hw_ntb_transport, OID_AUTO, debug_level, CTLFLAG_RWTUN,
74302484Smav    &g_ntb_transport_debug_level, 0,
75302484Smav    "ntb_transport log level -- higher is more verbose");
76290684Scem#define ntb_printf(lvl, ...) do {			\
77302484Smav	if ((lvl) <= g_ntb_transport_debug_level) {	\
78302484Smav		printf(__VA_ARGS__);			\
79290684Scem	}						\
80290684Scem} while (0)
81290684Scem
82302484Smavstatic unsigned transport_mtu = 0x10000;
83290684Scem
84289546Scemstatic uint64_t max_mw_size;
85302484SmavSYSCTL_UQUAD(_hw_ntb_transport, OID_AUTO, max_mw_size, CTLFLAG_RDTUN, &max_mw_size, 0,
86289546Scem    "If enabled (non-zero), limit the size of large memory windows. "
87289546Scem    "Both sides of the NTB MUST set the same value here.");
88289546Scem
89291084Scemstatic unsigned enable_xeon_watchdog;
90302484SmavSYSCTL_UINT(_hw_ntb_transport, OID_AUTO, enable_xeon_watchdog, CTLFLAG_RDTUN,
91291084Scem    &enable_xeon_watchdog, 0, "If non-zero, write a register every second to "
92291084Scem    "keep a watchdog from tearing down the NTB link");
93291084Scem
94250079ScarlSTAILQ_HEAD(ntb_queue_list, ntb_queue_entry);
95250079Scarl
96291028Scemtypedef uint32_t ntb_q_idx_t;
97289653Scem
98250079Scarlstruct ntb_queue_entry {
99250079Scarl	/* ntb_queue list reference */
100250079Scarl	STAILQ_ENTRY(ntb_queue_entry) entry;
101250079Scarl
102289546Scem	/* info on data to be transferred */
103250079Scarl	void		*cb_data;
104250079Scarl	void		*buf;
105291028Scem	uint32_t	len;
106291028Scem	uint32_t	flags;
107289546Scem
108289546Scem	struct ntb_transport_qp		*qp;
109289546Scem	struct ntb_payload_header	*x_hdr;
110289653Scem	ntb_q_idx_t	index;
111250079Scarl};
112250079Scarl
113250079Scarlstruct ntb_rx_info {
114289653Scem	ntb_q_idx_t	entry;
115250079Scarl};
116250079Scarl
117250079Scarlstruct ntb_transport_qp {
118289545Scem	struct ntb_transport_ctx	*transport;
119304367Smav	device_t		 dev;
120250079Scarl
121250079Scarl	void			*cb_data;
122250079Scarl
123250079Scarl	bool			client_ready;
124290686Scem	volatile bool		link_is_up;
125255281Scarl	uint8_t			qp_num;	/* Only 64 QPs are allowed.  0-63 */
126250079Scarl
127250079Scarl	struct ntb_rx_info	*rx_info;
128250079Scarl	struct ntb_rx_info	*remote_rx_info;
129250079Scarl
130289341Scem	void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
131250079Scarl	    void *data, int len);
132250079Scarl	struct ntb_queue_list	tx_free_q;
133250079Scarl	struct mtx		ntb_tx_free_q_lock;
134290679Scem	caddr_t			tx_mw;
135289546Scem	bus_addr_t		tx_mw_phys;
136289653Scem	ntb_q_idx_t		tx_index;
137289653Scem	ntb_q_idx_t		tx_max_entry;
138250079Scarl	uint64_t		tx_max_frame;
139250079Scarl
140289341Scem	void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
141250079Scarl	    void *data, int len);
142289651Scem	struct ntb_queue_list	rx_post_q;
143250079Scarl	struct ntb_queue_list	rx_pend_q;
144289651Scem	/* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
145289651Scem	struct mtx		ntb_rx_q_lock;
146289546Scem	struct task		rxc_db_work;
147304351Smav	struct taskqueue	*rxc_tq;
148290679Scem	caddr_t			rx_buff;
149289653Scem	ntb_q_idx_t		rx_index;
150289653Scem	ntb_q_idx_t		rx_max_entry;
151250079Scarl	uint64_t		rx_max_frame;
152250079Scarl
153289341Scem	void (*event_handler)(void *data, enum ntb_link_event status);
154250079Scarl	struct callout		link_work;
155250079Scarl	struct callout		rx_full;
156250079Scarl
157250079Scarl	uint64_t		last_rx_no_buf;
158250079Scarl
159250079Scarl	/* Stats */
160250079Scarl	uint64_t		rx_bytes;
161250079Scarl	uint64_t		rx_pkts;
162250079Scarl	uint64_t		rx_ring_empty;
163250079Scarl	uint64_t		rx_err_no_buf;
164250079Scarl	uint64_t		rx_err_oflow;
165250079Scarl	uint64_t		rx_err_ver;
166250079Scarl	uint64_t		tx_bytes;
167250079Scarl	uint64_t		tx_pkts;
168250079Scarl	uint64_t		tx_ring_full;
169289653Scem	uint64_t		tx_err_no_buf;
170304349Smav
171304349Smav	struct mtx		tx_lock;
172250079Scarl};
173250079Scarl
174250079Scarlstruct ntb_transport_mw {
175289546Scem	vm_paddr_t	phys_addr;
176289546Scem	size_t		phys_size;
177289546Scem	size_t		xlat_align;
178289546Scem	size_t		xlat_align_size;
179291033Scem	bus_addr_t	addr_limit;
180289546Scem	/* Tx buff is off vbase / phys_addr */
181290679Scem	caddr_t		vbase;
182289545Scem	size_t		xlat_size;
183289546Scem	size_t		buff_size;
184289546Scem	/* Rx buff is off virt_addr / dma_addr */
185329056Smav	bus_dma_tag_t	dma_tag;
186329056Smav	bus_dmamap_t	dma_map;
187290679Scem	caddr_t		virt_addr;
188289546Scem	bus_addr_t	dma_addr;
189250079Scarl};
190250079Scarl
191304368Smavstruct ntb_transport_child {
192304368Smav	device_t	dev;
193323455Smav	int		consumer;
194304368Smav	int		qpoff;
195304368Smav	int		qpcnt;
196304368Smav	struct ntb_transport_child *next;
197304368Smav};
198304368Smav
199289545Scemstruct ntb_transport_ctx {
200304351Smav	device_t		 dev;
201304368Smav	struct ntb_transport_child *child;
202302484Smav	struct ntb_transport_mw	*mw_vec;
203289545Scem	struct ntb_transport_qp	*qp_vec;
204289545Scem	unsigned		mw_count;
205289545Scem	unsigned		qp_count;
206304368Smav	uint64_t		qp_bitmap;
207290686Scem	volatile bool		link_is_up;
208317892Smav	enum ntb_speed		link_speed;
209317892Smav	enum ntb_width		link_width;
210250079Scarl	struct callout		link_work;
211291084Scem	struct callout		link_watchdog;
212290683Scem	struct task		link_cleanup;
213250079Scarl};
214250079Scarl
215250079Scarlenum {
216302484Smav	NTBT_DESC_DONE_FLAG = 1 << 0,
217302484Smav	NTBT_LINK_DOWN_FLAG = 1 << 1,
218250079Scarl};
219250079Scarl
220250079Scarlstruct ntb_payload_header {
221291028Scem	ntb_q_idx_t ver;
222291028Scem	uint32_t len;
223291028Scem	uint32_t flags;
224250079Scarl};
225250079Scarl
226250079Scarlenum {
227289153Scem	/*
228302484Smav	 * The order of this enum is part of the remote protocol.  Do not
229302484Smav	 * reorder without bumping protocol version (and it's probably best
230289153Scem	 * to keep the protocol in lock-step with the Linux NTB driver.
231289153Scem	 */
232302484Smav	NTBT_VERSION = 0,
233302484Smav	NTBT_QP_LINKS,
234302484Smav	NTBT_NUM_QPS,
235302484Smav	NTBT_NUM_MWS,
236289153Scem	/*
237289153Scem	 * N.B.: transport_link_work assumes MW1 enums = MW0 + 2.
238289153Scem	 */
239302484Smav	NTBT_MW0_SZ_HIGH,
240302484Smav	NTBT_MW0_SZ_LOW,
241302484Smav	NTBT_MW1_SZ_HIGH,
242302484Smav	NTBT_MW1_SZ_LOW,
243291084Scem
244291084Scem	/*
245291084Scem	 * Some NTB-using hardware have a watchdog to work around NTB hangs; if
246291084Scem	 * a register or doorbell isn't written every few seconds, the link is
247291084Scem	 * torn down.  Write an otherwise unused register every few seconds to
248291084Scem	 * work around this watchdog.
249291084Scem	 */
250302484Smav	NTBT_WATCHDOG_SPAD = 15
251250079Scarl};
252250079Scarl
253289545Scem#define QP_TO_MW(nt, qp)	((qp) % nt->mw_count)
254250079Scarl#define NTB_QP_DEF_NUM_ENTRIES	100
255323456Smav#define NTB_LINK_DOWN_TIMEOUT	100
256250079Scarl
257302484Smavstatic int ntb_transport_probe(device_t dev);
258302484Smavstatic int ntb_transport_attach(device_t dev);
259302484Smavstatic int ntb_transport_detach(device_t dev);
260289545Scemstatic void ntb_transport_init_queue(struct ntb_transport_ctx *nt,
261250079Scarl    unsigned int qp_num);
262250079Scarlstatic int ntb_process_tx(struct ntb_transport_qp *qp,
263250079Scarl    struct ntb_queue_entry *entry);
264289546Scemstatic void ntb_transport_rxc_db(void *arg, int pending);
265250079Scarlstatic int ntb_process_rxc(struct ntb_transport_qp *qp);
266289651Scemstatic void ntb_memcpy_rx(struct ntb_transport_qp *qp,
267250079Scarl    struct ntb_queue_entry *entry, void *offset);
268289651Scemstatic inline void ntb_rx_copy_callback(struct ntb_transport_qp *qp,
269289651Scem    void *data);
270304350Smavstatic void ntb_complete_rxc(struct ntb_transport_qp *qp);
271289598Scemstatic void ntb_transport_doorbell_callback(void *data, uint32_t vector);
272289546Scemstatic void ntb_transport_event_callback(void *data);
273250079Scarlstatic void ntb_transport_link_work(void *arg);
274289652Scemstatic int ntb_set_mw(struct ntb_transport_ctx *, int num_mw, size_t size);
275289545Scemstatic void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw);
276289546Scemstatic int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
277250079Scarl    unsigned int qp_num);
278250079Scarlstatic void ntb_qp_link_work(void *arg);
279289545Scemstatic void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt);
280290683Scemstatic void ntb_transport_link_cleanup_work(void *, int);
281250079Scarlstatic void ntb_qp_link_down(struct ntb_transport_qp *qp);
282289613Scemstatic void ntb_qp_link_down_reset(struct ntb_transport_qp *qp);
283250079Scarlstatic void ntb_qp_link_cleanup(struct ntb_transport_qp *qp);
284250079Scarlstatic void ntb_send_link_down(struct ntb_transport_qp *qp);
285250079Scarlstatic void ntb_list_add(struct mtx *lock, struct ntb_queue_entry *entry,
286250079Scarl    struct ntb_queue_list *list);
287250079Scarlstatic struct ntb_queue_entry *ntb_list_rm(struct mtx *lock,
288250079Scarl    struct ntb_queue_list *list);
289289651Scemstatic struct ntb_queue_entry *ntb_list_mv(struct mtx *lock,
290289651Scem    struct ntb_queue_list *from, struct ntb_queue_list *to);
291291084Scemstatic void xeon_link_watchdog_hb(void *);
292250079Scarl
293289546Scemstatic const struct ntb_ctx_ops ntb_transport_ops = {
294289546Scem	.link_event = ntb_transport_event_callback,
295289546Scem	.db_event = ntb_transport_doorbell_callback,
296289546Scem};
297289546Scem
298302484SmavMALLOC_DEFINE(M_NTB_T, "ntb_transport", "ntb transport driver");
299250079Scarl
300291028Scemstatic inline void
301291028Scemiowrite32(uint32_t val, void *addr)
302291028Scem{
303291028Scem
304291028Scem	bus_space_write_4(X86_BUS_SPACE_MEM, 0/* HACK */, (uintptr_t)addr,
305291028Scem	    val);
306291028Scem}
307291028Scem
308302484Smav/* Transport Init and teardown */
309250079Scarl
310302484Smavstatic void
311302484Smavxeon_link_watchdog_hb(void *arg)
312250079Scarl{
313302484Smav	struct ntb_transport_ctx *nt;
314250079Scarl
315302484Smav	nt = arg;
316304367Smav	ntb_spad_write(nt->dev, NTBT_WATCHDOG_SPAD, 0);
317302484Smav	callout_reset(&nt->link_watchdog, 1 * hz, xeon_link_watchdog_hb, nt);
318250079Scarl}
319250079Scarl
320250079Scarlstatic int
321302484Smavntb_transport_probe(device_t dev)
322250079Scarl{
323250079Scarl
324302484Smav	device_set_desc(dev, "NTB Transport");
325250079Scarl	return (0);
326250079Scarl}
327250079Scarl
328250079Scarlstatic int
329302484Smavntb_transport_attach(device_t dev)
330250079Scarl{
331302484Smav	struct ntb_transport_ctx *nt = device_get_softc(dev);
332304368Smav	struct ntb_transport_child **cpp = &nt->child;
333304368Smav	struct ntb_transport_child *nc;
334289546Scem	struct ntb_transport_mw *mw;
335329057Smav	uint64_t db_bitmap, size;
336304368Smav	int rc, i, db_count, spad_count, qp, qpu, qpo, qpt;
337304368Smav	char cfg[128] = "";
338304368Smav	char buf[32];
339304368Smav	char *n, *np, *c, *name;
340250079Scarl
341304351Smav	nt->dev = dev;
342304367Smav	nt->mw_count = ntb_mw_count(dev);
343304368Smav	spad_count = ntb_spad_count(dev);
344304368Smav	db_bitmap = ntb_db_valid_mask(dev);
345304368Smav	db_count = flsll(db_bitmap);
346304368Smav	KASSERT(db_bitmap == (1 << db_count) - 1,
347304368Smav	    ("Doorbells are not sequential (%jx).\n", db_bitmap));
348304368Smav
349304368Smav	if (nt->mw_count == 0) {
350304368Smav		device_printf(dev, "At least 1 memory window required.\n");
351304368Smav		return (ENXIO);
352304368Smav	}
353304368Smav	if (spad_count < 6) {
354304368Smav		device_printf(dev, "At least 6 scratchpads required.\n");
355304368Smav		return (ENXIO);
356304368Smav	}
357304368Smav	if (spad_count < 4 + 2 * nt->mw_count) {
358304368Smav		nt->mw_count = (spad_count - 4) / 2;
359304368Smav		device_printf(dev, "Scratchpads enough only for %d "
360304368Smav		    "memory windows.\n", nt->mw_count);
361304368Smav	}
362304368Smav	if (db_bitmap == 0) {
363304368Smav		device_printf(dev, "At least one doorbell required.\n");
364304368Smav		return (ENXIO);
365304368Smav	}
366304368Smav
367302484Smav	nt->mw_vec = malloc(nt->mw_count * sizeof(*nt->mw_vec), M_NTB_T,
368302484Smav	    M_WAITOK | M_ZERO);
369289546Scem	for (i = 0; i < nt->mw_count; i++) {
370289546Scem		mw = &nt->mw_vec[i];
371289396Scem
372304367Smav		rc = ntb_mw_get_range(dev, i, &mw->phys_addr, &mw->vbase,
373291033Scem		    &mw->phys_size, &mw->xlat_align, &mw->xlat_align_size,
374291033Scem		    &mw->addr_limit);
375289546Scem		if (rc != 0)
376289546Scem			goto err;
377289546Scem
378289546Scem		mw->buff_size = 0;
379289546Scem		mw->xlat_size = 0;
380290679Scem		mw->virt_addr = NULL;
381289546Scem		mw->dma_addr = 0;
382300610Smav
383304367Smav		rc = ntb_mw_set_wc(dev, i, VM_MEMATTR_WRITE_COMBINING);
384300610Smav		if (rc)
385300610Smav			ntb_printf(0, "Unable to set mw%d caching\n", i);
386329057Smav
387329057Smav		/*
388329057Smav		 * Try to preallocate receive memory early, since there may
389329057Smav		 * be not enough contiguous memory later.  It is quite likely
390329057Smav		 * that NTB windows are symmetric and this allocation remain,
391329057Smav		 * but even if not, we will just reallocate it later.
392329057Smav		 */
393329057Smav		size = mw->phys_size;
394329057Smav		if (max_mw_size != 0 && size > max_mw_size)
395329057Smav			size = max_mw_size;
396329057Smav		ntb_set_mw(nt, i, size);
397289546Scem	}
398289546Scem
399304368Smav	qpu = 0;
400304368Smav	qpo = imin(db_count, nt->mw_count);
401304368Smav	qpt = db_count;
402289546Scem
403304368Smav	snprintf(buf, sizeof(buf), "hint.%s.%d.config", device_get_name(dev),
404304368Smav	    device_get_unit(dev));
405304368Smav	TUNABLE_STR_FETCH(buf, cfg, sizeof(cfg));
406304368Smav	n = cfg;
407304368Smav	i = 0;
408304368Smav	while ((c = strsep(&n, ",")) != NULL) {
409304368Smav		np = c;
410304368Smav		name = strsep(&np, ":");
411304368Smav		if (name != NULL && name[0] == 0)
412304368Smav			name = NULL;
413304368Smav		qp = (np && np[0] != 0) ? strtol(np, NULL, 10) : qpo - qpu;
414304368Smav		if (qp <= 0)
415304368Smav			qp = 1;
416289546Scem
417304368Smav		if (qp > qpt - qpu) {
418304368Smav			device_printf(dev, "Not enough resources for config\n");
419304368Smav			break;
420304368Smav		}
421304368Smav
422304368Smav		nc = malloc(sizeof(*nc), M_DEVBUF, M_WAITOK | M_ZERO);
423323455Smav		nc->consumer = i;
424304368Smav		nc->qpoff = qpu;
425304368Smav		nc->qpcnt = qp;
426304368Smav		nc->dev = device_add_child(dev, name, -1);
427304368Smav		if (nc->dev == NULL) {
428304368Smav			device_printf(dev, "Can not add child.\n");
429304368Smav			break;
430304368Smav		}
431304368Smav		device_set_ivars(nc->dev, nc);
432304368Smav		*cpp = nc;
433304368Smav		cpp = &nc->next;
434304368Smav
435304368Smav		if (bootverbose) {
436304368Smav			device_printf(dev, "%d \"%s\": queues %d",
437304368Smav			    i, name, qpu);
438304368Smav			if (qp > 1)
439304368Smav				printf("-%d", qpu + qp - 1);
440304368Smav			printf("\n");
441304368Smav		}
442304368Smav
443304368Smav		qpu += qp;
444304368Smav		i++;
445304368Smav	}
446304368Smav	nt->qp_count = qpu;
447304368Smav
448302484Smav	nt->qp_vec = malloc(nt->qp_count * sizeof(*nt->qp_vec), M_NTB_T,
449289545Scem	    M_WAITOK | M_ZERO);
450250079Scarl
451304368Smav	for (i = 0; i < nt->qp_count; i++)
452250079Scarl		ntb_transport_init_queue(nt, i);
453250079Scarl
454250079Scarl	callout_init(&nt->link_work, 0);
455291084Scem	callout_init(&nt->link_watchdog, 0);
456290683Scem	TASK_INIT(&nt->link_cleanup, 0, ntb_transport_link_cleanup_work, nt);
457323265Smav	nt->link_is_up = false;
458250079Scarl
459304367Smav	rc = ntb_set_ctx(dev, nt, &ntb_transport_ops);
460250079Scarl	if (rc != 0)
461250079Scarl		goto err;
462250079Scarl
463304367Smav	ntb_link_enable(dev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
464290682Scem
465291084Scem	if (enable_xeon_watchdog != 0)
466291084Scem		callout_reset(&nt->link_watchdog, 0, xeon_link_watchdog_hb, nt);
467302484Smav
468302484Smav	bus_generic_attach(dev);
469250079Scarl	return (0);
470250079Scarl
471250079Scarlerr:
472302484Smav	free(nt->qp_vec, M_NTB_T);
473302484Smav	free(nt->mw_vec, M_NTB_T);
474250079Scarl	return (rc);
475250079Scarl}
476250079Scarl
477302484Smavstatic int
478302484Smavntb_transport_detach(device_t dev)
479250079Scarl{
480302484Smav	struct ntb_transport_ctx *nt = device_get_softc(dev);
481304368Smav	struct ntb_transport_child **cpp = &nt->child;
482304368Smav	struct ntb_transport_child *nc;
483304368Smav	int error = 0, i;
484250079Scarl
485304368Smav	while ((nc = *cpp) != NULL) {
486304368Smav		*cpp = (*cpp)->next;
487304368Smav		error = device_delete_child(dev, nc->dev);
488304368Smav		if (error)
489304368Smav			break;
490304368Smav		free(nc, M_DEVBUF);
491304368Smav	}
492304368Smav	KASSERT(nt->qp_bitmap == 0,
493304368Smav	    ("Some queues not freed on detach (%jx)", nt->qp_bitmap));
494302484Smav
495289273Scem	ntb_transport_link_cleanup(nt);
496290683Scem	taskqueue_drain(taskqueue_swi, &nt->link_cleanup);
497250079Scarl	callout_drain(&nt->link_work);
498291084Scem	callout_drain(&nt->link_watchdog);
499250079Scarl
500304367Smav	ntb_link_disable(dev);
501304367Smav	ntb_clear_ctx(dev);
502250079Scarl
503289546Scem	for (i = 0; i < nt->mw_count; i++)
504289153Scem		ntb_free_mw(nt, i);
505250079Scarl
506302484Smav	free(nt->qp_vec, M_NTB_T);
507302484Smav	free(nt->mw_vec, M_NTB_T);
508302484Smav	return (0);
509250079Scarl}
510250079Scarl
511323455Smavstatic int
512323455Smavntb_transport_print_child(device_t dev, device_t child)
513323455Smav{
514323455Smav	struct ntb_transport_child *nc = device_get_ivars(child);
515323455Smav	int retval;
516323455Smav
517323455Smav	retval = bus_print_child_header(dev, child);
518323455Smav	if (nc->qpcnt > 0) {
519323455Smav		printf(" queue %d", nc->qpoff);
520323455Smav		if (nc->qpcnt > 1)
521323455Smav			printf("-%d", nc->qpoff + nc->qpcnt - 1);
522323455Smav	}
523323455Smav	retval += printf(" at consumer %d", nc->consumer);
524323455Smav	retval += bus_print_child_domain(dev, child);
525323455Smav	retval += bus_print_child_footer(dev, child);
526323455Smav
527323455Smav	return (retval);
528323455Smav}
529323455Smav
530323455Smavstatic int
531323455Smavntb_transport_child_location_str(device_t dev, device_t child, char *buf,
532323455Smav    size_t buflen)
533323455Smav{
534323455Smav	struct ntb_transport_child *nc = device_get_ivars(child);
535323455Smav
536323455Smav	snprintf(buf, buflen, "consumer=%d", nc->consumer);
537323455Smav	return (0);
538323455Smav}
539323455Smav
540304368Smavint
541304368Smavntb_transport_queue_count(device_t dev)
542304368Smav{
543304368Smav	struct ntb_transport_child *nc = device_get_ivars(dev);
544304368Smav
545304368Smav	return (nc->qpcnt);
546304368Smav}
547304368Smav
548250079Scarlstatic void
549289545Scemntb_transport_init_queue(struct ntb_transport_ctx *nt, unsigned int qp_num)
550250079Scarl{
551289546Scem	struct ntb_transport_mw *mw;
552250079Scarl	struct ntb_transport_qp *qp;
553289546Scem	vm_paddr_t mw_base;
554289546Scem	uint64_t mw_size, qp_offset;
555289546Scem	size_t tx_size;
556289546Scem	unsigned num_qps_mw, mw_num, mw_count;
557250079Scarl
558289546Scem	mw_count = nt->mw_count;
559289545Scem	mw_num = QP_TO_MW(nt, qp_num);
560289546Scem	mw = &nt->mw_vec[mw_num];
561289396Scem
562289545Scem	qp = &nt->qp_vec[qp_num];
563250079Scarl	qp->qp_num = qp_num;
564250079Scarl	qp->transport = nt;
565304367Smav	qp->dev = nt->dev;
566289545Scem	qp->client_ready = false;
567250079Scarl	qp->event_handler = NULL;
568289613Scem	ntb_qp_link_down_reset(qp);
569250079Scarl
570304347Smav	if (mw_num < nt->qp_count % mw_count)
571289545Scem		num_qps_mw = nt->qp_count / mw_count + 1;
572250079Scarl	else
573289545Scem		num_qps_mw = nt->qp_count / mw_count;
574250079Scarl
575289546Scem	mw_base = mw->phys_addr;
576289546Scem	mw_size = mw->phys_size;
577289546Scem
578289546Scem	tx_size = mw_size / num_qps_mw;
579290688Scem	qp_offset = tx_size * (qp_num / mw_count);
580289546Scem
581290679Scem	qp->tx_mw = mw->vbase + qp_offset;
582289546Scem	KASSERT(qp->tx_mw != NULL, ("uh oh?"));
583289546Scem
584289546Scem	/* XXX Assumes that a vm_paddr_t is equivalent to bus_addr_t */
585289546Scem	qp->tx_mw_phys = mw_base + qp_offset;
586289546Scem	KASSERT(qp->tx_mw_phys != 0, ("uh oh?"));
587289546Scem
588250079Scarl	tx_size -= sizeof(struct ntb_rx_info);
589290679Scem	qp->rx_info = (void *)(qp->tx_mw + tx_size);
590250079Scarl
591289156Scem	/* Due to house-keeping, there must be at least 2 buffs */
592304355Smav	qp->tx_max_frame = qmin(transport_mtu, tx_size / 2);
593250079Scarl	qp->tx_max_entry = tx_size / qp->tx_max_frame;
594250079Scarl
595250079Scarl	callout_init(&qp->link_work, 0);
596283291Sjkim	callout_init(&qp->rx_full, 1);
597250079Scarl
598289651Scem	mtx_init(&qp->ntb_rx_q_lock, "ntb rx q", NULL, MTX_SPIN);
599250079Scarl	mtx_init(&qp->ntb_tx_free_q_lock, "ntb tx free q", NULL, MTX_SPIN);
600304349Smav	mtx_init(&qp->tx_lock, "ntb transport tx", NULL, MTX_DEF);
601289546Scem	TASK_INIT(&qp->rxc_db_work, 0, ntb_transport_rxc_db, qp);
602304351Smav	qp->rxc_tq = taskqueue_create("ntbt_rx", M_WAITOK,
603304351Smav	    taskqueue_thread_enqueue, &qp->rxc_tq);
604304351Smav	taskqueue_start_threads(&qp->rxc_tq, 1, PI_NET, "%s rx%d",
605304351Smav	    device_get_nameunit(nt->dev), qp_num);
606250079Scarl
607289651Scem	STAILQ_INIT(&qp->rx_post_q);
608250079Scarl	STAILQ_INIT(&qp->rx_pend_q);
609250079Scarl	STAILQ_INIT(&qp->tx_free_q);
610250079Scarl}
611250079Scarl
612302484Smavvoid
613250079Scarlntb_transport_free_queue(struct ntb_transport_qp *qp)
614250079Scarl{
615304368Smav	struct ntb_transport_ctx *nt = qp->transport;
616250079Scarl	struct ntb_queue_entry *entry;
617250079Scarl
618250079Scarl	callout_drain(&qp->link_work);
619250079Scarl
620304367Smav	ntb_db_set_mask(qp->dev, 1ull << qp->qp_num);
621304351Smav	taskqueue_drain_all(qp->rxc_tq);
622304351Smav	taskqueue_free(qp->rxc_tq);
623250079Scarl
624289546Scem	qp->cb_data = NULL;
625289546Scem	qp->rx_handler = NULL;
626289546Scem	qp->tx_handler = NULL;
627289546Scem	qp->event_handler = NULL;
628289546Scem
629289651Scem	while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q)))
630302484Smav		free(entry, M_NTB_T);
631250079Scarl
632289651Scem	while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q)))
633302484Smav		free(entry, M_NTB_T);
634289651Scem
635250079Scarl	while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
636302484Smav		free(entry, M_NTB_T);
637250079Scarl
638304368Smav	nt->qp_bitmap &= ~(1 << qp->qp_num);
639250079Scarl}
640250079Scarl
641250079Scarl/**
642250079Scarl * ntb_transport_create_queue - Create a new NTB transport layer queue
643250079Scarl * @rx_handler: receive callback function
644250079Scarl * @tx_handler: transmit callback function
645250079Scarl * @event_handler: event callback function
646250079Scarl *
647250079Scarl * Create a new NTB transport layer queue and provide the queue with a callback
648250079Scarl * routine for both transmit and receive.  The receive callback routine will be
649250079Scarl * used to pass up data when the transport has received it on the queue.   The
650250079Scarl * transmit callback routine will be called when the transport has completed the
651250079Scarl * transmission of the data on the queue and the data is ready to be freed.
652250079Scarl *
653250079Scarl * RETURNS: pointer to newly created ntb_queue, NULL on error.
654250079Scarl */
655302484Smavstruct ntb_transport_qp *
656304368Smavntb_transport_create_queue(device_t dev, int q,
657304368Smav    const struct ntb_queue_handlers *handlers, void *data)
658250079Scarl{
659304368Smav	struct ntb_transport_child *nc = device_get_ivars(dev);
660304368Smav	struct ntb_transport_ctx *nt = device_get_softc(device_get_parent(dev));
661250079Scarl	struct ntb_queue_entry *entry;
662250079Scarl	struct ntb_transport_qp *qp;
663289546Scem	int i;
664250079Scarl
665304368Smav	if (q < 0 || q >= nc->qpcnt)
666289546Scem		return (NULL);
667250079Scarl
668304368Smav	qp = &nt->qp_vec[nc->qpoff + q];
669304368Smav	nt->qp_bitmap |= (1 << qp->qp_num);
670250079Scarl	qp->cb_data = data;
671250079Scarl	qp->rx_handler = handlers->rx_handler;
672250079Scarl	qp->tx_handler = handlers->tx_handler;
673250079Scarl	qp->event_handler = handlers->event_handler;
674250079Scarl
675250079Scarl	for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
676302484Smav		entry = malloc(sizeof(*entry), M_NTB_T, M_WAITOK | M_ZERO);
677302484Smav		entry->cb_data = data;
678250079Scarl		entry->buf = NULL;
679250079Scarl		entry->len = transport_mtu;
680304348Smav		entry->qp = qp;
681289651Scem		ntb_list_add(&qp->ntb_rx_q_lock, entry, &qp->rx_pend_q);
682250079Scarl	}
683250079Scarl
684250079Scarl	for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
685302484Smav		entry = malloc(sizeof(*entry), M_NTB_T, M_WAITOK | M_ZERO);
686304348Smav		entry->qp = qp;
687250079Scarl		ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q);
688250079Scarl	}
689250079Scarl
690304367Smav	ntb_db_clear(dev, 1ull << qp->qp_num);
691250079Scarl	return (qp);
692250079Scarl}
693250079Scarl
694250079Scarl/**
695250079Scarl * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
696250079Scarl * @qp: NTB transport layer queue to be enabled
697250079Scarl *
698250079Scarl * Notify NTB transport layer of client readiness to use queue
699250079Scarl */
700302484Smavvoid
701250079Scarlntb_transport_link_up(struct ntb_transport_qp *qp)
702250079Scarl{
703302484Smav	struct ntb_transport_ctx *nt = qp->transport;
704250079Scarl
705289545Scem	qp->client_ready = true;
706250079Scarl
707304347Smav	ntb_printf(2, "qp %d client ready\n", qp->qp_num);
708290684Scem
709302484Smav	if (nt->link_is_up)
710250079Scarl		callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp);
711250079Scarl}
712250079Scarl
713250079Scarl
714250079Scarl
715250079Scarl/* Transport Tx */
716250079Scarl
717250079Scarl/**
718250079Scarl * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
719250079Scarl * @qp: NTB transport layer queue the entry is to be enqueued on
720250079Scarl * @cb: per buffer pointer for callback function to use
721250079Scarl * @data: pointer to data buffer that will be sent
722250079Scarl * @len: length of the data buffer
723250079Scarl *
724250079Scarl * Enqueue a new transmit buffer onto the transport queue from which a NTB
725289266Scem * payload will be transmitted.  This assumes that a lock is being held to
726250079Scarl * serialize access to the qp.
727250079Scarl *
728250079Scarl * RETURNS: An appropriate ERRNO error value on error, or zero for success.
729250079Scarl */
730302484Smavint
731250079Scarlntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
732250079Scarl    unsigned int len)
733250079Scarl{
734250079Scarl	struct ntb_queue_entry *entry;
735250079Scarl	int rc;
736250079Scarl
737304370Smav	if (!qp->link_is_up || len == 0) {
738250079Scarl		CTR0(KTR_NTB, "TX: link not up");
739250079Scarl		return (EINVAL);
740250079Scarl	}
741250079Scarl
742250079Scarl	entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
743250079Scarl	if (entry == NULL) {
744255281Scarl		CTR0(KTR_NTB, "TX: could not get entry from tx_free_q");
745289653Scem		qp->tx_err_no_buf++;
746289653Scem		return (EBUSY);
747250079Scarl	}
748250079Scarl	CTR1(KTR_NTB, "TX: got entry %p from tx_free_q", entry);
749250079Scarl
750250079Scarl	entry->cb_data = cb;
751250079Scarl	entry->buf = data;
752250079Scarl	entry->len = len;
753250079Scarl	entry->flags = 0;
754250079Scarl
755304349Smav	mtx_lock(&qp->tx_lock);
756250079Scarl	rc = ntb_process_tx(qp, entry);
757304349Smav	mtx_unlock(&qp->tx_lock);
758250079Scarl	if (rc != 0) {
759250079Scarl		ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q);
760250079Scarl		CTR1(KTR_NTB,
761250079Scarl		    "TX: process_tx failed. Returning entry %p to tx_free_q",
762250079Scarl		    entry);
763250079Scarl	}
764250079Scarl	return (rc);
765250079Scarl}
766250079Scarl
767304348Smavstatic void
768304348Smavntb_tx_copy_callback(void *data)
769304348Smav{
770304348Smav	struct ntb_queue_entry *entry = data;
771304348Smav	struct ntb_transport_qp *qp = entry->qp;
772304348Smav	struct ntb_payload_header *hdr = entry->x_hdr;
773304348Smav
774304348Smav	iowrite32(entry->flags | NTBT_DESC_DONE_FLAG, &hdr->flags);
775304348Smav	CTR1(KTR_NTB, "TX: hdr %p set DESC_DONE", hdr);
776304348Smav
777304367Smav	ntb_peer_db_set(qp->dev, 1ull << qp->qp_num);
778304348Smav
779304348Smav	/*
780304348Smav	 * The entry length can only be zero if the packet is intended to be a
781304348Smav	 * "link down" or similar.  Since no payload is being sent in these
782304348Smav	 * cases, there is nothing to add to the completion queue.
783304348Smav	 */
784304348Smav	if (entry->len > 0) {
785304348Smav		qp->tx_bytes += entry->len;
786304348Smav
787304348Smav		if (qp->tx_handler)
788304348Smav			qp->tx_handler(qp, qp->cb_data, entry->buf,
789304348Smav			    entry->len);
790304348Smav		else
791304348Smav			m_freem(entry->buf);
792304348Smav		entry->buf = NULL;
793304348Smav	}
794304348Smav
795304348Smav	CTR3(KTR_NTB,
796304348Smav	    "TX: entry %p sent. hdr->ver = %u, hdr->flags = 0x%x, Returning "
797304348Smav	    "to tx_free_q", entry, hdr->ver, hdr->flags);
798304348Smav	ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q);
799304348Smav}
800304348Smav
801304348Smavstatic void
802304348Smavntb_memcpy_tx(struct ntb_queue_entry *entry, void *offset)
803304348Smav{
804304348Smav
805304348Smav	CTR2(KTR_NTB, "TX: copying %d bytes to offset %p", entry->len, offset);
806304348Smav	if (entry->buf != NULL) {
807304348Smav		m_copydata((struct mbuf *)entry->buf, 0, entry->len, offset);
808304348Smav
809304348Smav		/*
810304348Smav		 * Ensure that the data is fully copied before setting the
811304348Smav		 * flags
812304348Smav		 */
813304348Smav		wmb();
814304348Smav	}
815304348Smav
816304348Smav	ntb_tx_copy_callback(entry);
817304348Smav}
818304348Smav
819304348Smavstatic void
820304348Smavntb_async_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry)
821304348Smav{
822304348Smav	struct ntb_payload_header *hdr;
823304348Smav	void *offset;
824304348Smav
825304348Smav	offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
826304348Smav	hdr = (struct ntb_payload_header *)((char *)offset + qp->tx_max_frame -
827304348Smav	    sizeof(struct ntb_payload_header));
828304348Smav	entry->x_hdr = hdr;
829304348Smav
830304348Smav	iowrite32(entry->len, &hdr->len);
831304348Smav	iowrite32(qp->tx_pkts, &hdr->ver);
832304348Smav
833304348Smav	ntb_memcpy_tx(entry, offset);
834304348Smav}
835304348Smav
836250079Scarlstatic int
837250079Scarlntb_process_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry)
838250079Scarl{
839250079Scarl
840250079Scarl	CTR3(KTR_NTB,
841291028Scem	    "TX: process_tx: tx_pkts=%lu, tx_index=%u, remote entry=%u",
842250079Scarl	    qp->tx_pkts, qp->tx_index, qp->remote_rx_info->entry);
843250079Scarl	if (qp->tx_index == qp->remote_rx_info->entry) {
844250079Scarl		CTR0(KTR_NTB, "TX: ring full");
845250079Scarl		qp->tx_ring_full++;
846250079Scarl		return (EAGAIN);
847250079Scarl	}
848250079Scarl
849250079Scarl	if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
850250079Scarl		if (qp->tx_handler != NULL)
851250079Scarl			qp->tx_handler(qp, qp->cb_data, entry->buf,
852291034Scem			    EIO);
853291034Scem		else
854291034Scem			m_freem(entry->buf);
855250079Scarl
856291034Scem		entry->buf = NULL;
857250079Scarl		ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q);
858250079Scarl		CTR1(KTR_NTB,
859250079Scarl		    "TX: frame too big. returning entry %p to tx_free_q",
860250079Scarl		    entry);
861250079Scarl		return (0);
862250079Scarl	}
863304348Smav	CTR2(KTR_NTB, "TX: copying entry %p to index %u", entry, qp->tx_index);
864304348Smav	ntb_async_tx(qp, entry);
865250079Scarl
866250079Scarl	qp->tx_index++;
867250079Scarl	qp->tx_index %= qp->tx_max_entry;
868250079Scarl
869250079Scarl	qp->tx_pkts++;
870250079Scarl
871250079Scarl	return (0);
872250079Scarl}
873250079Scarl
874250079Scarl/* Transport Rx */
875250079Scarlstatic void
876289546Scemntb_transport_rxc_db(void *arg, int pending __unused)
877250079Scarl{
878289281Scem	struct ntb_transport_qp *qp = arg;
879323452Smav	uint64_t qp_mask = 1ull << qp->qp_num;
880289157Scem	int rc;
881250079Scarl
882289546Scem	CTR0(KTR_NTB, "RX: transport_rx");
883304351Smavagain:
884304351Smav	while ((rc = ntb_process_rxc(qp)) == 0)
885304351Smav		;
886304351Smav	CTR1(KTR_NTB, "RX: process_rxc returned %d", rc);
887289281Scem
888323452Smav	if ((ntb_db_read(qp->dev) & qp_mask) != 0) {
889304351Smav		/* If db is set, clear it and check queue once more. */
890323452Smav		ntb_db_clear(qp->dev, qp_mask);
891304351Smav		goto again;
892289546Scem	}
893323452Smav	if (qp->link_is_up)
894323452Smav		ntb_db_clear_mask(qp->dev, qp_mask);
895250079Scarl}
896250079Scarl
897250079Scarlstatic int
898250079Scarlntb_process_rxc(struct ntb_transport_qp *qp)
899250079Scarl{
900250079Scarl	struct ntb_payload_header *hdr;
901250079Scarl	struct ntb_queue_entry *entry;
902291028Scem	caddr_t offset;
903250079Scarl
904291028Scem	offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
905291028Scem	hdr = (void *)(offset + qp->rx_max_frame -
906291028Scem	    sizeof(struct ntb_payload_header));
907250079Scarl
908250079Scarl	CTR1(KTR_NTB, "RX: process_rxc rx_index = %u", qp->rx_index);
909302484Smav	if ((hdr->flags & NTBT_DESC_DONE_FLAG) == 0) {
910289546Scem		CTR0(KTR_NTB, "RX: hdr not done");
911250079Scarl		qp->rx_ring_empty++;
912250079Scarl		return (EAGAIN);
913250079Scarl	}
914250079Scarl
915302484Smav	if ((hdr->flags & NTBT_LINK_DOWN_FLAG) != 0) {
916289546Scem		CTR0(KTR_NTB, "RX: link down");
917289546Scem		ntb_qp_link_down(qp);
918289546Scem		hdr->flags = 0;
919289546Scem		return (EAGAIN);
920289546Scem	}
921289546Scem
922289546Scem	if (hdr->ver != (uint32_t)qp->rx_pkts) {
923289546Scem		CTR2(KTR_NTB,"RX: ver != rx_pkts (%x != %lx). "
924291028Scem		    "Returning entry to rx_pend_q", hdr->ver, qp->rx_pkts);
925250079Scarl		qp->rx_err_ver++;
926250079Scarl		return (EIO);
927250079Scarl	}
928250079Scarl
929289651Scem	entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q);
930289546Scem	if (entry == NULL) {
931289546Scem		qp->rx_err_no_buf++;
932289546Scem		CTR0(KTR_NTB, "RX: No entries in rx_pend_q");
933289546Scem		return (EAGAIN);
934250079Scarl	}
935289546Scem	callout_stop(&qp->rx_full);
936289546Scem	CTR1(KTR_NTB, "RX: rx entry %p from rx_pend_q", entry);
937250079Scarl
938289546Scem	entry->x_hdr = hdr;
939289546Scem	entry->index = qp->rx_index;
940250079Scarl
941289546Scem	if (hdr->len > entry->len) {
942289546Scem		CTR2(KTR_NTB, "RX: len too long. Wanted %ju got %ju",
943289546Scem		    (uintmax_t)hdr->len, (uintmax_t)entry->len);
944250079Scarl		qp->rx_err_oflow++;
945250079Scarl
946289546Scem		entry->len = -EIO;
947302484Smav		entry->flags |= NTBT_DESC_DONE_FLAG;
948250079Scarl
949304350Smav		ntb_complete_rxc(qp);
950289546Scem	} else {
951289546Scem		qp->rx_bytes += hdr->len;
952289546Scem		qp->rx_pkts++;
953250079Scarl
954289546Scem		CTR1(KTR_NTB, "RX: received %ld rx_pkts", qp->rx_pkts);
955250079Scarl
956289546Scem		entry->len = hdr->len;
957289546Scem
958289651Scem		ntb_memcpy_rx(qp, entry, offset);
959289546Scem	}
960289546Scem
961250079Scarl	qp->rx_index++;
962250079Scarl	qp->rx_index %= qp->rx_max_entry;
963250079Scarl	return (0);
964250079Scarl}
965250079Scarl
966250079Scarlstatic void
967289651Scemntb_memcpy_rx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry,
968250079Scarl    void *offset)
969250079Scarl{
970250079Scarl	struct ifnet *ifp = entry->cb_data;
971250079Scarl	unsigned int len = entry->len;
972250079Scarl
973250079Scarl	CTR2(KTR_NTB, "RX: copying %d bytes from offset %p", len, offset);
974250079Scarl
975302484Smav	entry->buf = (void *)m_devget(offset, len, 0, ifp, NULL);
976304356Smav	if (entry->buf == NULL)
977304356Smav		entry->len = -ENOMEM;
978250079Scarl
979289546Scem	/* Ensure that the data is globally visible before clearing the flag */
980289546Scem	wmb();
981289546Scem
982304364Smav	CTR2(KTR_NTB, "RX: copied entry %p to mbuf %p.", entry, entry->buf);
983289651Scem	ntb_rx_copy_callback(qp, entry);
984289651Scem}
985250079Scarl
986289651Scemstatic inline void
987289651Scemntb_rx_copy_callback(struct ntb_transport_qp *qp, void *data)
988289651Scem{
989289651Scem	struct ntb_queue_entry *entry;
990289651Scem
991289651Scem	entry = data;
992302484Smav	entry->flags |= NTBT_DESC_DONE_FLAG;
993304350Smav	ntb_complete_rxc(qp);
994250079Scarl}
995250079Scarl
996250079Scarlstatic void
997304350Smavntb_complete_rxc(struct ntb_transport_qp *qp)
998250079Scarl{
999289651Scem	struct ntb_queue_entry *entry;
1000250079Scarl	struct mbuf *m;
1001289651Scem	unsigned len;
1002250079Scarl
1003250079Scarl	CTR0(KTR_NTB, "RX: rx_completion_task");
1004250079Scarl
1005289651Scem	mtx_lock_spin(&qp->ntb_rx_q_lock);
1006289651Scem
1007289651Scem	while (!STAILQ_EMPTY(&qp->rx_post_q)) {
1008289651Scem		entry = STAILQ_FIRST(&qp->rx_post_q);
1009302484Smav		if ((entry->flags & NTBT_DESC_DONE_FLAG) == 0)
1010289651Scem			break;
1011289651Scem
1012289651Scem		entry->x_hdr->flags = 0;
1013291028Scem		iowrite32(entry->index, &qp->rx_info->entry);
1014289651Scem
1015291034Scem		STAILQ_REMOVE_HEAD(&qp->rx_post_q, entry);
1016291034Scem
1017289651Scem		len = entry->len;
1018250079Scarl		m = entry->buf;
1019289651Scem
1020291034Scem		/*
1021291034Scem		 * Re-initialize queue_entry for reuse; rx_handler takes
1022291034Scem		 * ownership of the mbuf.
1023291034Scem		 */
1024291034Scem		entry->buf = NULL;
1025291034Scem		entry->len = transport_mtu;
1026302484Smav		entry->cb_data = qp->cb_data;
1027289651Scem
1028291034Scem		STAILQ_INSERT_TAIL(&qp->rx_pend_q, entry, entry);
1029291034Scem
1030289651Scem		mtx_unlock_spin(&qp->ntb_rx_q_lock);
1031289651Scem
1032250079Scarl		CTR2(KTR_NTB, "RX: completing entry %p, mbuf %p", entry, m);
1033289651Scem		if (qp->rx_handler != NULL && qp->client_ready)
1034289651Scem			qp->rx_handler(qp, qp->cb_data, m, len);
1035291034Scem		else
1036291034Scem			m_freem(m);
1037250079Scarl
1038289651Scem		mtx_lock_spin(&qp->ntb_rx_q_lock);
1039289651Scem	}
1040250079Scarl
1041289651Scem	mtx_unlock_spin(&qp->ntb_rx_q_lock);
1042250079Scarl}
1043250079Scarl
1044289546Scemstatic void
1045289598Scemntb_transport_doorbell_callback(void *data, uint32_t vector)
1046289546Scem{
1047289546Scem	struct ntb_transport_ctx *nt = data;
1048289546Scem	struct ntb_transport_qp *qp;
1049289546Scem	uint64_t vec_mask;
1050289546Scem	unsigned qp_num;
1051289546Scem
1052304367Smav	vec_mask = ntb_db_vector_mask(nt->dev, vector);
1053304368Smav	vec_mask &= nt->qp_bitmap;
1054304351Smav	if ((vec_mask & (vec_mask - 1)) != 0)
1055304367Smav		vec_mask &= ntb_db_read(nt->dev);
1056323454Smav	if (vec_mask != 0) {
1057323452Smav		ntb_db_set_mask(nt->dev, vec_mask);
1058323454Smav		ntb_db_clear(nt->dev, vec_mask);
1059323454Smav	}
1060289546Scem	while (vec_mask != 0) {
1061289775Scem		qp_num = ffsll(vec_mask) - 1;
1062289546Scem
1063304368Smav		qp = &nt->qp_vec[qp_num];
1064304368Smav		if (qp->link_is_up)
1065304368Smav			taskqueue_enqueue(qp->rxc_tq, &qp->rxc_db_work);
1066289546Scem
1067289546Scem		vec_mask &= ~(1ull << qp_num);
1068289546Scem	}
1069289546Scem}
1070289546Scem
1071250079Scarl/* Link Event handler */
1072250079Scarlstatic void
1073289546Scemntb_transport_event_callback(void *data)
1074250079Scarl{
1075289545Scem	struct ntb_transport_ctx *nt = data;
1076250079Scarl
1077317892Smav	if (ntb_link_is_up(nt->dev, &nt->link_speed, &nt->link_width)) {
1078290684Scem		ntb_printf(1, "HW link up\n");
1079250079Scarl		callout_reset(&nt->link_work, 0, ntb_transport_link_work, nt);
1080289546Scem	} else {
1081290684Scem		ntb_printf(1, "HW link down\n");
1082290683Scem		taskqueue_enqueue(taskqueue_swi, &nt->link_cleanup);
1083250079Scarl	}
1084250079Scarl}
1085250079Scarl
1086250079Scarl/* Link bring up */
1087250079Scarlstatic void
1088250079Scarlntb_transport_link_work(void *arg)
1089250079Scarl{
1090289545Scem	struct ntb_transport_ctx *nt = arg;
1091304367Smav	device_t dev = nt->dev;
1092250079Scarl	struct ntb_transport_qp *qp;
1093289546Scem	uint64_t val64, size;
1094289546Scem	uint32_t val;
1095289546Scem	unsigned i;
1096289208Scem	int rc;
1097250079Scarl
1098289153Scem	/* send the local info, in the opposite order of the way we read it */
1099289546Scem	for (i = 0; i < nt->mw_count; i++) {
1100289546Scem		size = nt->mw_vec[i].phys_size;
1101250079Scarl
1102289546Scem		if (max_mw_size != 0 && size > max_mw_size)
1103289546Scem			size = max_mw_size;
1104289546Scem
1105304367Smav		ntb_peer_spad_write(dev, NTBT_MW0_SZ_HIGH + (i * 2),
1106289546Scem		    size >> 32);
1107304367Smav		ntb_peer_spad_write(dev, NTBT_MW0_SZ_LOW + (i * 2), size);
1108289153Scem	}
1109304367Smav	ntb_peer_spad_write(dev, NTBT_NUM_MWS, nt->mw_count);
1110304367Smav	ntb_peer_spad_write(dev, NTBT_NUM_QPS, nt->qp_count);
1111304370Smav	ntb_peer_spad_write(dev, NTBT_QP_LINKS, 0);
1112304367Smav	ntb_peer_spad_write(dev, NTBT_VERSION, NTB_TRANSPORT_VERSION);
1113250079Scarl
1114250079Scarl	/* Query the remote side for its info */
1115289546Scem	val = 0;
1116304367Smav	ntb_spad_read(dev, NTBT_VERSION, &val);
1117250079Scarl	if (val != NTB_TRANSPORT_VERSION)
1118250079Scarl		goto out;
1119250079Scarl
1120304367Smav	ntb_spad_read(dev, NTBT_NUM_QPS, &val);
1121289545Scem	if (val != nt->qp_count)
1122250079Scarl		goto out;
1123250079Scarl
1124304367Smav	ntb_spad_read(dev, NTBT_NUM_MWS, &val);
1125289546Scem	if (val != nt->mw_count)
1126250079Scarl		goto out;
1127250079Scarl
1128289546Scem	for (i = 0; i < nt->mw_count; i++) {
1129304367Smav		ntb_spad_read(dev, NTBT_MW0_SZ_HIGH + (i * 2), &val);
1130289153Scem		val64 = (uint64_t)val << 32;
1131250079Scarl
1132304367Smav		ntb_spad_read(dev, NTBT_MW0_SZ_LOW + (i * 2), &val);
1133289153Scem		val64 |= val;
1134250079Scarl
1135289153Scem		rc = ntb_set_mw(nt, i, val64);
1136289153Scem		if (rc != 0)
1137289153Scem			goto free_mws;
1138289153Scem	}
1139289153Scem
1140289545Scem	nt->link_is_up = true;
1141290684Scem	ntb_printf(1, "transport link up\n");
1142250079Scarl
1143289545Scem	for (i = 0; i < nt->qp_count; i++) {
1144289545Scem		qp = &nt->qp_vec[i];
1145250079Scarl
1146250079Scarl		ntb_transport_setup_qp_mw(nt, i);
1147250079Scarl
1148289545Scem		if (qp->client_ready)
1149250079Scarl			callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp);
1150250079Scarl	}
1151250079Scarl
1152250079Scarl	return;
1153250079Scarl
1154289153Scemfree_mws:
1155289546Scem	for (i = 0; i < nt->mw_count; i++)
1156289153Scem		ntb_free_mw(nt, i);
1157250079Scarlout:
1158317892Smav	if (ntb_link_is_up(dev, &nt->link_speed, &nt->link_width))
1159250079Scarl		callout_reset(&nt->link_work,
1160289153Scem		    NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_transport_link_work, nt);
1161250079Scarl}
1162250079Scarl
1163329056Smavstruct ntb_load_cb_args {
1164329056Smav	bus_addr_t addr;
1165329056Smav	int error;
1166329056Smav};
1167329056Smav
1168329056Smavstatic void
1169329056Smavntb_load_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
1170329056Smav{
1171329056Smav	struct ntb_load_cb_args *cba = (struct ntb_load_cb_args *)xsc;
1172329056Smav
1173329056Smav	if (!(cba->error = error))
1174329056Smav		cba->addr = segs[0].ds_addr;
1175329056Smav}
1176329056Smav
1177250079Scarlstatic int
1178289652Scemntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, size_t size)
1179250079Scarl{
1180289545Scem	struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
1181329056Smav	struct ntb_load_cb_args cba;
1182289652Scem	size_t xlat_size, buff_size;
1183289546Scem	int rc;
1184250079Scarl
1185289652Scem	if (size == 0)
1186289652Scem		return (EINVAL);
1187289652Scem
1188289546Scem	xlat_size = roundup(size, mw->xlat_align_size);
1189291706Scem	buff_size = xlat_size;
1190289546Scem
1191289154Scem	/* No need to re-setup */
1192289546Scem	if (mw->xlat_size == xlat_size)
1193289154Scem		return (0);
1194289154Scem
1195289546Scem	if (mw->buff_size != 0)
1196289154Scem		ntb_free_mw(nt, num_mw);
1197289154Scem
1198289546Scem	/* Alloc memory for receiving data.  Must be aligned */
1199289546Scem	mw->xlat_size = xlat_size;
1200289546Scem	mw->buff_size = buff_size;
1201250079Scarl
1202329056Smav	if (bus_dma_tag_create(bus_get_dma_tag(nt->dev), mw->xlat_align, 0,
1203329056Smav	    mw->addr_limit, BUS_SPACE_MAXADDR,
1204329056Smav	    NULL, NULL, mw->buff_size, 1, mw->buff_size,
1205329056Smav	    0, NULL, NULL, &mw->dma_tag)) {
1206329056Smav		ntb_printf(0, "Unable to create MW tag of size %zu/%zu\n",
1207329056Smav		    mw->buff_size, mw->xlat_size);
1208329056Smav		mw->xlat_size = 0;
1209329056Smav		mw->buff_size = 0;
1210329056Smav		return (ENOMEM);
1211329056Smav	}
1212329056Smav	if (bus_dmamem_alloc(mw->dma_tag, (void **)&mw->virt_addr,
1213329056Smav	    BUS_DMA_WAITOK | BUS_DMA_ZERO, &mw->dma_map)) {
1214329056Smav		bus_dma_tag_destroy(mw->dma_tag);
1215291705Scem		ntb_printf(0, "Unable to allocate MW buffer of size %zu/%zu\n",
1216291705Scem		    mw->buff_size, mw->xlat_size);
1217289545Scem		mw->xlat_size = 0;
1218289546Scem		mw->buff_size = 0;
1219250079Scarl		return (ENOMEM);
1220250079Scarl	}
1221329056Smav	if (bus_dmamap_load(mw->dma_tag, mw->dma_map, mw->virt_addr,
1222329056Smav	    mw->buff_size, ntb_load_cb, &cba, BUS_DMA_NOWAIT) || cba.error) {
1223329056Smav		bus_dmamem_free(mw->dma_tag, mw->virt_addr, mw->dma_map);
1224329056Smav		bus_dma_tag_destroy(mw->dma_tag);
1225329056Smav		ntb_printf(0, "Unable to load MW buffer of size %zu/%zu\n",
1226329056Smav		    mw->buff_size, mw->xlat_size);
1227329056Smav		mw->xlat_size = 0;
1228329056Smav		mw->buff_size = 0;
1229289346Scem		return (ENOMEM);
1230289346Scem	}
1231329056Smav	mw->dma_addr = cba.addr;
1232289346Scem
1233250079Scarl	/* Notify HW the memory location of the receive buffer */
1234304367Smav	rc = ntb_mw_set_trans(nt->dev, num_mw, mw->dma_addr, mw->xlat_size);
1235289546Scem	if (rc) {
1236290684Scem		ntb_printf(0, "Unable to set mw%d translation\n", num_mw);
1237289546Scem		ntb_free_mw(nt, num_mw);
1238289546Scem		return (rc);
1239289546Scem	}
1240250079Scarl
1241250079Scarl	return (0);
1242250079Scarl}
1243250079Scarl
1244250079Scarlstatic void
1245289545Scemntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
1246289153Scem{
1247289545Scem	struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
1248289153Scem
1249289153Scem	if (mw->virt_addr == NULL)
1250289153Scem		return;
1251289153Scem
1252304367Smav	ntb_mw_clear_trans(nt->dev, num_mw);
1253329056Smav	bus_dmamap_unload(mw->dma_tag, mw->dma_map);
1254329056Smav	bus_dmamem_free(mw->dma_tag, mw->virt_addr, mw->dma_map);
1255329056Smav	bus_dma_tag_destroy(mw->dma_tag);
1256289546Scem	mw->xlat_size = 0;
1257289546Scem	mw->buff_size = 0;
1258289153Scem	mw->virt_addr = NULL;
1259289153Scem}
1260289153Scem
1261289546Scemstatic int
1262289545Scemntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, unsigned int qp_num)
1263250079Scarl{
1264289545Scem	struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
1265289546Scem	struct ntb_transport_mw *mw;
1266250079Scarl	void *offset;
1267289653Scem	ntb_q_idx_t i;
1268289546Scem	size_t rx_size;
1269289546Scem	unsigned num_qps_mw, mw_num, mw_count;
1270250079Scarl
1271289546Scem	mw_count = nt->mw_count;
1272289545Scem	mw_num = QP_TO_MW(nt, qp_num);
1273289546Scem	mw = &nt->mw_vec[mw_num];
1274289396Scem
1275289546Scem	if (mw->virt_addr == NULL)
1276289546Scem		return (ENOMEM);
1277289546Scem
1278304347Smav	if (mw_num < nt->qp_count % mw_count)
1279289545Scem		num_qps_mw = nt->qp_count / mw_count + 1;
1280250079Scarl	else
1281289545Scem		num_qps_mw = nt->qp_count / mw_count;
1282250079Scarl
1283289546Scem	rx_size = mw->xlat_size / num_qps_mw;
1284290688Scem	qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count);
1285250079Scarl	rx_size -= sizeof(struct ntb_rx_info);
1286250079Scarl
1287290679Scem	qp->remote_rx_info = (void*)(qp->rx_buff + rx_size);
1288289546Scem
1289289156Scem	/* Due to house-keeping, there must be at least 2 buffs */
1290304355Smav	qp->rx_max_frame = qmin(transport_mtu, rx_size / 2);
1291250079Scarl	qp->rx_max_entry = rx_size / qp->rx_max_frame;
1292250079Scarl	qp->rx_index = 0;
1293250079Scarl
1294289156Scem	qp->remote_rx_info->entry = qp->rx_max_entry - 1;
1295250079Scarl
1296289546Scem	/* Set up the hdr offsets with 0s */
1297250079Scarl	for (i = 0; i < qp->rx_max_entry; i++) {
1298290679Scem		offset = (void *)(qp->rx_buff + qp->rx_max_frame * (i + 1) -
1299250079Scarl		    sizeof(struct ntb_payload_header));
1300250079Scarl		memset(offset, 0, sizeof(struct ntb_payload_header));
1301250079Scarl	}
1302250079Scarl
1303250079Scarl	qp->rx_pkts = 0;
1304250079Scarl	qp->tx_pkts = 0;
1305289155Scem	qp->tx_index = 0;
1306289546Scem
1307289546Scem	return (0);
1308250079Scarl}
1309250079Scarl
1310250079Scarlstatic void
1311250079Scarlntb_qp_link_work(void *arg)
1312250079Scarl{
1313250079Scarl	struct ntb_transport_qp *qp = arg;
1314304367Smav	device_t dev = qp->dev;
1315289545Scem	struct ntb_transport_ctx *nt = qp->transport;
1316304370Smav	int i;
1317304370Smav	uint32_t val;
1318250079Scarl
1319304370Smav	/* Report queues that are up on our side */
1320304370Smav	for (i = 0, val = 0; i < nt->qp_count; i++) {
1321304370Smav		if (nt->qp_vec[i].client_ready)
1322304370Smav			val |= (1 << i);
1323304370Smav	}
1324304370Smav	ntb_peer_spad_write(dev, NTBT_QP_LINKS, val);
1325250079Scarl
1326250079Scarl	/* See if the remote side is up */
1327304370Smav	ntb_spad_read(dev, NTBT_QP_LINKS, &val);
1328289546Scem	if ((val & (1ull << qp->qp_num)) != 0) {
1329304347Smav		ntb_printf(2, "qp %d link up\n", qp->qp_num);
1330289545Scem		qp->link_is_up = true;
1331289546Scem
1332250079Scarl		if (qp->event_handler != NULL)
1333250079Scarl			qp->event_handler(qp->cb_data, NTB_LINK_UP);
1334289546Scem
1335304367Smav		ntb_db_clear_mask(dev, 1ull << qp->qp_num);
1336289546Scem	} else if (nt->link_is_up)
1337250079Scarl		callout_reset(&qp->link_work,
1338250079Scarl		    NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_qp_link_work, qp);
1339250079Scarl}
1340250079Scarl
1341250079Scarl/* Link down event*/
1342250079Scarlstatic void
1343289545Scemntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
1344250079Scarl{
1345289546Scem	struct ntb_transport_qp *qp;
1346304368Smav	int i;
1347250079Scarl
1348323265Smav	callout_drain(&nt->link_work);
1349323265Smav	nt->link_is_up = 0;
1350323265Smav
1351289273Scem	/* Pass along the info to any clients */
1352304368Smav	for (i = 0; i < nt->qp_count; i++) {
1353304368Smav		if ((nt->qp_bitmap & (1 << i)) != 0) {
1354289546Scem			qp = &nt->qp_vec[i];
1355289546Scem			ntb_qp_link_cleanup(qp);
1356289546Scem			callout_drain(&qp->link_work);
1357289546Scem		}
1358304368Smav	}
1359289273Scem
1360289341Scem	/*
1361250079Scarl	 * The scratchpad registers keep the values if the remote side
1362250079Scarl	 * goes down, blast them now to give them a sane value the next
1363250079Scarl	 * time they are accessed
1364250079Scarl	 */
1365304368Smav	ntb_spad_clear(nt->dev);
1366250079Scarl}
1367250079Scarl
1368290683Scemstatic void
1369290683Scemntb_transport_link_cleanup_work(void *arg, int pending __unused)
1370290683Scem{
1371250079Scarl
1372290683Scem	ntb_transport_link_cleanup(arg);
1373290683Scem}
1374290683Scem
1375250079Scarlstatic void
1376250079Scarlntb_qp_link_down(struct ntb_transport_qp *qp)
1377250079Scarl{
1378250079Scarl
1379250079Scarl	ntb_qp_link_cleanup(qp);
1380250079Scarl}
1381250079Scarl
1382250079Scarlstatic void
1383289613Scemntb_qp_link_down_reset(struct ntb_transport_qp *qp)
1384289613Scem{
1385289613Scem
1386289613Scem	qp->link_is_up = false;
1387304367Smav	ntb_db_set_mask(qp->dev, 1ull << qp->qp_num);
1388289613Scem
1389289613Scem	qp->tx_index = qp->rx_index = 0;
1390289613Scem	qp->tx_bytes = qp->rx_bytes = 0;
1391289613Scem	qp->tx_pkts = qp->rx_pkts = 0;
1392289613Scem
1393289613Scem	qp->rx_ring_empty = 0;
1394289613Scem	qp->tx_ring_full = 0;
1395289613Scem
1396289653Scem	qp->rx_err_no_buf = qp->tx_err_no_buf = 0;
1397289653Scem	qp->rx_err_oflow = qp->rx_err_ver = 0;
1398289613Scem}
1399289613Scem
1400289613Scemstatic void
1401250079Scarlntb_qp_link_cleanup(struct ntb_transport_qp *qp)
1402250079Scarl{
1403250079Scarl
1404289613Scem	callout_drain(&qp->link_work);
1405289613Scem	ntb_qp_link_down_reset(qp);
1406250079Scarl
1407250079Scarl	if (qp->event_handler != NULL)
1408250079Scarl		qp->event_handler(qp->cb_data, NTB_LINK_DOWN);
1409250079Scarl}
1410250079Scarl
1411250079Scarl/* Link commanded down */
1412250079Scarl/**
1413250079Scarl * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
1414250079Scarl * @qp: NTB transport layer queue to be disabled
1415250079Scarl *
1416250079Scarl * Notify NTB transport layer of client's desire to no longer receive data on
1417250079Scarl * transport queue specified.  It is the client's responsibility to ensure all
1418289266Scem * entries on queue are purged or otherwise handled appropriately.
1419250079Scarl */
1420302484Smavvoid
1421250079Scarlntb_transport_link_down(struct ntb_transport_qp *qp)
1422250079Scarl{
1423304370Smav	struct ntb_transport_ctx *nt = qp->transport;
1424304370Smav	int i;
1425289546Scem	uint32_t val;
1426250079Scarl
1427289545Scem	qp->client_ready = false;
1428304370Smav	for (i = 0, val = 0; i < nt->qp_count; i++) {
1429304370Smav		if (nt->qp_vec[i].client_ready)
1430304370Smav			val |= (1 << i);
1431304370Smav	}
1432304370Smav	ntb_peer_spad_write(qp->dev, NTBT_QP_LINKS, val);
1433250079Scarl
1434289545Scem	if (qp->link_is_up)
1435250079Scarl		ntb_send_link_down(qp);
1436250079Scarl	else
1437250079Scarl		callout_drain(&qp->link_work);
1438250079Scarl}
1439250079Scarl
1440302484Smav/**
1441302484Smav * ntb_transport_link_query - Query transport link state
1442302484Smav * @qp: NTB transport layer queue to be queried
1443302484Smav *
1444302484Smav * Query connectivity to the remote system of the NTB transport queue
1445302484Smav *
1446302484Smav * RETURNS: true for link up or false for link down
1447302484Smav */
1448302484Smavbool
1449302484Smavntb_transport_link_query(struct ntb_transport_qp *qp)
1450302484Smav{
1451302484Smav
1452302484Smav	return (qp->link_is_up);
1453302484Smav}
1454302484Smav
1455317892Smav/**
1456317892Smav * ntb_transport_link_speed - Query transport link speed
1457317892Smav * @qp: NTB transport layer queue to be queried
1458317892Smav *
1459317892Smav * Query connection speed to the remote system of the NTB transport queue
1460317892Smav *
1461317892Smav * RETURNS: link speed in bits per second
1462317892Smav */
1463317892Smavuint64_t
1464317892Smavntb_transport_link_speed(struct ntb_transport_qp *qp)
1465317892Smav{
1466317892Smav	struct ntb_transport_ctx *nt = qp->transport;
1467317892Smav	uint64_t rate;
1468317892Smav
1469317892Smav	if (!nt->link_is_up)
1470317892Smav		return (0);
1471317892Smav	switch (nt->link_speed) {
1472317892Smav	case NTB_SPEED_GEN1:
1473317892Smav		rate = 2500000000 * 8 / 10;
1474317892Smav		break;
1475317892Smav	case NTB_SPEED_GEN2:
1476317892Smav		rate = 5000000000 * 8 / 10;
1477317892Smav		break;
1478317892Smav	case NTB_SPEED_GEN3:
1479317892Smav		rate = 8000000000 * 128 / 130;
1480317892Smav		break;
1481317892Smav	case NTB_SPEED_GEN4:
1482317892Smav		rate = 16000000000 * 128 / 130;
1483317892Smav		break;
1484317892Smav	default:
1485317892Smav		return (0);
1486317892Smav	}
1487317892Smav	if (nt->link_width <= 0)
1488317892Smav		return (0);
1489317892Smav	return (rate * nt->link_width);
1490317892Smav}
1491317892Smav
1492250079Scarlstatic void
1493250079Scarlntb_send_link_down(struct ntb_transport_qp *qp)
1494250079Scarl{
1495250079Scarl	struct ntb_queue_entry *entry;
1496250079Scarl	int i, rc;
1497250079Scarl
1498289545Scem	if (!qp->link_is_up)
1499250079Scarl		return;
1500250079Scarl
1501250079Scarl	for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
1502250079Scarl		entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1503250079Scarl		if (entry != NULL)
1504250079Scarl			break;
1505250079Scarl		pause("NTB Wait for link down", hz / 10);
1506250079Scarl	}
1507250079Scarl
1508250079Scarl	if (entry == NULL)
1509250079Scarl		return;
1510250079Scarl
1511250079Scarl	entry->cb_data = NULL;
1512250079Scarl	entry->buf = NULL;
1513250079Scarl	entry->len = 0;
1514302484Smav	entry->flags = NTBT_LINK_DOWN_FLAG;
1515250079Scarl
1516304349Smav	mtx_lock(&qp->tx_lock);
1517250079Scarl	rc = ntb_process_tx(qp, entry);
1518304349Smav	mtx_unlock(&qp->tx_lock);
1519250079Scarl	if (rc != 0)
1520250079Scarl		printf("ntb: Failed to send link down\n");
1521289613Scem
1522289613Scem	ntb_qp_link_down_reset(qp);
1523250079Scarl}
1524250079Scarl
1525250079Scarl
1526250079Scarl/* List Management */
1527250079Scarl
1528250079Scarlstatic void
1529250079Scarlntb_list_add(struct mtx *lock, struct ntb_queue_entry *entry,
1530250079Scarl    struct ntb_queue_list *list)
1531250079Scarl{
1532250079Scarl
1533250079Scarl	mtx_lock_spin(lock);
1534250079Scarl	STAILQ_INSERT_TAIL(list, entry, entry);
1535250079Scarl	mtx_unlock_spin(lock);
1536250079Scarl}
1537250079Scarl
1538250079Scarlstatic struct ntb_queue_entry *
1539250079Scarlntb_list_rm(struct mtx *lock, struct ntb_queue_list *list)
1540250079Scarl{
1541250079Scarl	struct ntb_queue_entry *entry;
1542250079Scarl
1543250079Scarl	mtx_lock_spin(lock);
1544250079Scarl	if (STAILQ_EMPTY(list)) {
1545250079Scarl		entry = NULL;
1546250079Scarl		goto out;
1547250079Scarl	}
1548250079Scarl	entry = STAILQ_FIRST(list);
1549250079Scarl	STAILQ_REMOVE_HEAD(list, entry);
1550250079Scarlout:
1551250079Scarl	mtx_unlock_spin(lock);
1552250079Scarl
1553250079Scarl	return (entry);
1554250079Scarl}
1555250079Scarl
1556289651Scemstatic struct ntb_queue_entry *
1557289651Scemntb_list_mv(struct mtx *lock, struct ntb_queue_list *from,
1558289651Scem    struct ntb_queue_list *to)
1559289651Scem{
1560289651Scem	struct ntb_queue_entry *entry;
1561289651Scem
1562289651Scem	mtx_lock_spin(lock);
1563289651Scem	if (STAILQ_EMPTY(from)) {
1564289651Scem		entry = NULL;
1565289651Scem		goto out;
1566289651Scem	}
1567289651Scem	entry = STAILQ_FIRST(from);
1568289651Scem	STAILQ_REMOVE_HEAD(from, entry);
1569289651Scem	STAILQ_INSERT_TAIL(to, entry, entry);
1570289651Scem
1571289651Scemout:
1572289651Scem	mtx_unlock_spin(lock);
1573289651Scem	return (entry);
1574289651Scem}
1575289651Scem
1576302484Smav/**
1577302484Smav * ntb_transport_qp_num - Query the qp number
1578302484Smav * @qp: NTB transport layer queue to be queried
1579302484Smav *
1580302484Smav * Query qp number of the NTB transport queue
1581302484Smav *
1582302484Smav * RETURNS: a zero based number specifying the qp number
1583302484Smav */
1584302484Smavunsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
1585250079Scarl{
1586250079Scarl
1587302484Smav	return (qp->qp_num);
1588250079Scarl}
1589250079Scarl
1590250079Scarl/**
1591250079Scarl * ntb_transport_max_size - Query the max payload size of a qp
1592250079Scarl * @qp: NTB transport layer queue to be queried
1593250079Scarl *
1594250079Scarl * Query the maximum payload size permissible on the given qp
1595250079Scarl *
1596250079Scarl * RETURNS: the max payload size of a qp
1597250079Scarl */
1598302484Smavunsigned int
1599250079Scarlntb_transport_max_size(struct ntb_transport_qp *qp)
1600250079Scarl{
1601250079Scarl
1602250079Scarl	return (qp->tx_max_frame - sizeof(struct ntb_payload_header));
1603250079Scarl}
1604302484Smav
1605302484Smavunsigned int
1606302484Smavntb_transport_tx_free_entry(struct ntb_transport_qp *qp)
1607302484Smav{
1608302484Smav	unsigned int head = qp->tx_index;
1609302484Smav	unsigned int tail = qp->remote_rx_info->entry;
1610302484Smav
1611302484Smav	return (tail >= head ? tail - head : qp->tx_max_entry + tail - head);
1612302484Smav}
1613302484Smav
1614302484Smavstatic device_method_t ntb_transport_methods[] = {
1615302484Smav	/* Device interface */
1616302484Smav	DEVMETHOD(device_probe,     ntb_transport_probe),
1617302484Smav	DEVMETHOD(device_attach,    ntb_transport_attach),
1618302484Smav	DEVMETHOD(device_detach,    ntb_transport_detach),
1619323455Smav	/* Bus interface */
1620323455Smav	DEVMETHOD(bus_child_location_str, ntb_transport_child_location_str),
1621323455Smav	DEVMETHOD(bus_print_child,  ntb_transport_print_child),
1622302484Smav	DEVMETHOD_END
1623302484Smav};
1624302484Smav
1625302484Smavdevclass_t ntb_transport_devclass;
1626302484Smavstatic DEFINE_CLASS_0(ntb_transport, ntb_transport_driver,
1627302484Smav    ntb_transport_methods, sizeof(struct ntb_transport_ctx));
1628302484SmavDRIVER_MODULE(ntb_transport, ntb_hw, ntb_transport_driver,
1629302484Smav    ntb_transport_devclass, NULL, NULL);
1630302484SmavMODULE_DEPEND(ntb_transport, ntb, 1, 1, 1);
1631302484SmavMODULE_VERSION(ntb_transport, 1);
1632