t4_tracer.c revision 309560
1/*-
2 * Copyright (c) 2013 Chelsio Communications, Inc.
3 * All rights reserved.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/t4_tracer.c 309560 2016-12-05 20:43:25Z jhb $");
30
31#include "opt_inet.h"
32#include "opt_inet6.h"
33
34#include <sys/param.h>
35#include <sys/lock.h>
36#include <sys/types.h>
37#include <sys/mbuf.h>
38#include <sys/socket.h>
39#include <sys/sockio.h>
40#include <sys/sx.h>
41#include <net/bpf.h>
42#include <net/ethernet.h>
43#include <net/if.h>
44#include <net/if_clone.h>
45#include <net/if_types.h>
46
47#include "common/common.h"
48#include "common/t4_msg.h"
49#include "common/t4_regs.h"
50#include "t4_ioctl.h"
51
52/*
53 * Locking notes
54 * =============
55 *
56 * An interface cloner is registered during mod_load and it can be used to
57 * create or destroy the tracing ifnet for an adapter at any time.  It is
58 * possible for the cloned interface to outlive the adapter (adapter disappears
59 * in t4_detach but the tracing ifnet may live till mod_unload when removal of
60 * the cloner finally destroys any remaining cloned interfaces).  When tracing
61 * filters are active, this ifnet is also receiving data.  There are potential
62 * bad races between ifnet create, ifnet destroy, ifnet rx, ifnet ioctl,
63 * cxgbe_detach/t4_detach, mod_unload.
64 *
65 * a) The driver selects an iq for tracing (sc->traceq) inside a synch op.  The
66 *    iq is destroyed inside a synch op too (and sc->traceq updated).
67 * b) The cloner looks for an adapter that matches the name of the ifnet it's
68 *    been asked to create, starts a synch op on that adapter, and proceeds only
69 *    if the adapter has a tracing iq.
70 * c) The cloned ifnet and the adapter are coupled to each other via
71 *    ifp->if_softc and sc->ifp.  These can be modified only with the global
72 *    t4_trace_lock sx as well as the sc->ifp_lock mutex held.  Holding either
73 *    of these will prevent any change.
74 *
75 * The order in which all the locks involved should be acquired are:
76 * t4_list_lock
77 * adapter lock
78 * (begin synch op and let go of the above two)
79 * t4_trace_lock
80 * sc->ifp_lock
81 */
82
83static struct sx t4_trace_lock;
84static const char *t4_cloner_name = "tXnex";
85static struct if_clone *t4_cloner;
86
87/* tracer ifnet routines.  mostly no-ops. */
88static void tracer_init(void *);
89static int tracer_ioctl(struct ifnet *, unsigned long, caddr_t);
90static int tracer_transmit(struct ifnet *, struct mbuf *);
91static void tracer_qflush(struct ifnet *);
92static int tracer_media_change(struct ifnet *);
93static void tracer_media_status(struct ifnet *, struct ifmediareq *);
94
95/* match name (request/response) */
96struct match_rr {
97	const char *name;
98	int lock;	/* set to 1 to returned sc locked. */
99	struct adapter *sc;
100	int rc;
101};
102
103static void
104match_name(struct adapter *sc, void *arg)
105{
106	struct match_rr *mrr = arg;
107
108	if (strcmp(device_get_nameunit(sc->dev), mrr->name) != 0)
109		return;
110
111	KASSERT(mrr->sc == NULL, ("%s: multiple matches (%p, %p) for %s",
112	    __func__, mrr->sc, sc, mrr->name));
113
114	mrr->sc = sc;
115	if (mrr->lock)
116		mrr->rc = begin_synchronized_op(mrr->sc, NULL, 0, "t4clon");
117	else
118		mrr->rc = 0;
119}
120
121static int
122t4_cloner_match(struct if_clone *ifc, const char *name)
123{
124
125	if (strncmp(name, "t4nex", 5) != 0 &&
126	    strncmp(name, "t5nex", 5) != 0 &&
127	    strncmp(name, "t6nex", 5) != 0)
128		return (0);
129	if (name[5] < '0' || name[5] > '9')
130		return (0);
131	return (1);
132}
133
134static int
135t4_cloner_create(struct if_clone *ifc, char *name, size_t len, caddr_t params)
136{
137	struct match_rr mrr;
138	struct adapter *sc;
139	struct ifnet *ifp;
140	int rc, unit;
141	const uint8_t lla[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
142
143	mrr.name = name;
144	mrr.lock = 1;
145	mrr.sc = NULL;
146	mrr.rc = ENOENT;
147	t4_iterate(match_name, &mrr);
148
149	if (mrr.rc != 0)
150		return (mrr.rc);
151	sc = mrr.sc;
152
153	KASSERT(sc != NULL, ("%s: name (%s) matched but softc is NULL",
154	    __func__, name));
155	ASSERT_SYNCHRONIZED_OP(sc);
156
157	sx_xlock(&t4_trace_lock);
158
159	if (sc->ifp != NULL) {
160		rc = EEXIST;
161		goto done;
162	}
163	if (sc->traceq < 0) {
164		rc = EAGAIN;
165		goto done;
166	}
167
168
169	unit = -1;
170	rc = ifc_alloc_unit(ifc, &unit);
171	if (rc != 0)
172		goto done;
173
174	ifp = if_alloc(IFT_ETHER);
175	if (ifp == NULL) {
176		ifc_free_unit(ifc, unit);
177		rc = ENOMEM;
178		goto done;
179	}
180
181	/* Note that if_xname is not <if_dname><if_dunit>. */
182	strlcpy(ifp->if_xname, name, sizeof(ifp->if_xname));
183	ifp->if_dname = t4_cloner_name;
184	ifp->if_dunit = unit;
185	ifp->if_init = tracer_init;
186	ifp->if_flags = IFF_SIMPLEX | IFF_DRV_RUNNING;
187	ifp->if_ioctl = tracer_ioctl;
188	ifp->if_transmit = tracer_transmit;
189	ifp->if_qflush = tracer_qflush;
190	ifp->if_capabilities = IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
191	ifmedia_init(&sc->media, IFM_IMASK, tracer_media_change,
192	    tracer_media_status);
193	ifmedia_add(&sc->media, IFM_ETHER | IFM_FDX | IFM_NONE, 0, NULL);
194	ifmedia_set(&sc->media, IFM_ETHER | IFM_FDX | IFM_NONE);
195	ether_ifattach(ifp, lla);
196
197	mtx_lock(&sc->ifp_lock);
198	ifp->if_softc = sc;
199	sc->ifp = ifp;
200	mtx_unlock(&sc->ifp_lock);
201done:
202	sx_xunlock(&t4_trace_lock);
203	end_synchronized_op(sc, 0);
204	return (rc);
205}
206
207static int
208t4_cloner_destroy(struct if_clone *ifc, struct ifnet *ifp)
209{
210	struct adapter *sc;
211	int unit = ifp->if_dunit;
212
213	sx_xlock(&t4_trace_lock);
214	sc = ifp->if_softc;
215	if (sc != NULL) {
216		mtx_lock(&sc->ifp_lock);
217		sc->ifp = NULL;
218		ifp->if_softc = NULL;
219		mtx_unlock(&sc->ifp_lock);
220		ifmedia_removeall(&sc->media);
221	}
222	ether_ifdetach(ifp);
223	if_free(ifp);
224	ifc_free_unit(ifc, unit);
225	sx_xunlock(&t4_trace_lock);
226
227	return (0);
228}
229
230void
231t4_tracer_modload()
232{
233
234	sx_init(&t4_trace_lock, "T4/T5 tracer lock");
235	t4_cloner = if_clone_advanced(t4_cloner_name, 0, t4_cloner_match,
236	    t4_cloner_create, t4_cloner_destroy);
237}
238
239void
240t4_tracer_modunload()
241{
242
243	if (t4_cloner != NULL) {
244		/*
245		 * The module is being unloaded so the nexus drivers have
246		 * detached.  The tracing interfaces can not outlive the nexus
247		 * (ifp->if_softc is the nexus) and must have been destroyed
248		 * already.  XXX: but if_clone is opaque to us and we can't
249		 * assert LIST_EMPTY(&t4_cloner->ifc_iflist) at this time.
250		 */
251		if_clone_detach(t4_cloner);
252	}
253	sx_destroy(&t4_trace_lock);
254}
255
256void
257t4_tracer_port_detach(struct adapter *sc)
258{
259
260	sx_xlock(&t4_trace_lock);
261	if (sc->ifp != NULL) {
262		mtx_lock(&sc->ifp_lock);
263		sc->ifp->if_softc = NULL;
264		sc->ifp = NULL;
265		mtx_unlock(&sc->ifp_lock);
266	}
267	ifmedia_removeall(&sc->media);
268	sx_xunlock(&t4_trace_lock);
269}
270
271int
272t4_get_tracer(struct adapter *sc, struct t4_tracer *t)
273{
274	int rc, i, enabled;
275	struct trace_params tp;
276
277	if (t->idx >= NTRACE) {
278		t->idx = 0xff;
279		t->enabled = 0;
280		t->valid = 0;
281		return (0);
282	}
283
284	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
285	    "t4gett");
286	if (rc)
287		return (rc);
288
289	for (i = t->idx; i < NTRACE; i++) {
290		if (isset(&sc->tracer_valid, t->idx)) {
291			t4_get_trace_filter(sc, &tp, i, &enabled);
292			t->idx = i;
293			t->enabled = enabled;
294			t->valid = 1;
295			memcpy(&t->tp.data[0], &tp.data[0], sizeof(t->tp.data));
296			memcpy(&t->tp.mask[0], &tp.mask[0], sizeof(t->tp.mask));
297			t->tp.snap_len = tp.snap_len;
298			t->tp.min_len = tp.min_len;
299			t->tp.skip_ofst = tp.skip_ofst;
300			t->tp.skip_len = tp.skip_len;
301			t->tp.invert = tp.invert;
302
303			/* convert channel to port iff 0 <= port < 8. */
304			if (tp.port < 4)
305				t->tp.port = sc->chan_map[tp.port];
306			else if (tp.port < 8)
307				t->tp.port = sc->chan_map[tp.port - 4] + 4;
308			else
309				t->tp.port = tp.port;
310
311			goto done;
312		}
313	}
314
315	t->idx = 0xff;
316	t->enabled = 0;
317	t->valid = 0;
318done:
319	end_synchronized_op(sc, LOCK_HELD);
320
321	return (rc);
322}
323
324int
325t4_set_tracer(struct adapter *sc, struct t4_tracer *t)
326{
327	int rc;
328	struct trace_params tp, *tpp;
329
330	if (t->idx >= NTRACE)
331		return (EINVAL);
332
333	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
334	    "t4sett");
335	if (rc)
336		return (rc);
337
338	/*
339	 * If no tracing filter is specified this time then check if the filter
340	 * at the index is valid anyway because it was set previously.  If so
341	 * then this is a legitimate enable/disable operation.
342	 */
343	if (t->valid == 0) {
344		if (isset(&sc->tracer_valid, t->idx))
345			tpp = NULL;
346		else
347			rc = EINVAL;
348		goto done;
349	}
350
351	if (t->tp.port > 19 || t->tp.snap_len > 9600 ||
352	    t->tp.min_len > M_TFMINPKTSIZE || t->tp.skip_len > M_TFLENGTH ||
353	    t->tp.skip_ofst > M_TFOFFSET) {
354		rc = EINVAL;
355		goto done;
356	}
357
358	memcpy(&tp.data[0], &t->tp.data[0], sizeof(tp.data));
359	memcpy(&tp.mask[0], &t->tp.mask[0], sizeof(tp.mask));
360	tp.snap_len = t->tp.snap_len;
361	tp.min_len = t->tp.min_len;
362	tp.skip_ofst = t->tp.skip_ofst;
363	tp.skip_len = t->tp.skip_len;
364	tp.invert = !!t->tp.invert;
365
366	/* convert port to channel iff 0 <= port < 8. */
367	if (t->tp.port < 4) {
368		if (sc->port[t->tp.port] == NULL) {
369			rc = EINVAL;
370			goto done;
371		}
372		tp.port = sc->port[t->tp.port]->tx_chan;
373	} else if (t->tp.port < 8) {
374		if (sc->port[t->tp.port - 4] == NULL) {
375			rc = EINVAL;
376			goto done;
377		}
378		tp.port = sc->port[t->tp.port - 4]->tx_chan + 4;
379	}
380	tpp = &tp;
381done:
382	if (rc == 0) {
383		rc = -t4_set_trace_filter(sc, tpp, t->idx, t->enabled);
384		if (rc == 0) {
385			if (t->enabled) {
386				setbit(&sc->tracer_valid, t->idx);
387				if (sc->tracer_enabled == 0) {
388					t4_set_reg_field(sc, A_MPS_TRC_CFG,
389					    F_TRCEN, F_TRCEN);
390				}
391				setbit(&sc->tracer_enabled, t->idx);
392			} else {
393				clrbit(&sc->tracer_enabled, t->idx);
394				if (sc->tracer_enabled == 0) {
395					t4_set_reg_field(sc, A_MPS_TRC_CFG,
396					    F_TRCEN, 0);
397				}
398			}
399		}
400	}
401	end_synchronized_op(sc, LOCK_HELD);
402
403	return (rc);
404}
405
406int
407t4_trace_pkt(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
408{
409	struct adapter *sc = iq->adapter;
410	struct ifnet *ifp;
411
412	KASSERT(m != NULL, ("%s: no payload with opcode %02x", __func__,
413	    rss->opcode));
414
415	mtx_lock(&sc->ifp_lock);
416	ifp = sc->ifp;
417	if (sc->ifp) {
418		m_adj(m, sizeof(struct cpl_trace_pkt));
419		m->m_pkthdr.rcvif = ifp;
420		ETHER_BPF_MTAP(ifp, m);
421	}
422	mtx_unlock(&sc->ifp_lock);
423	m_freem(m);
424
425	return (0);
426}
427
428int
429t5_trace_pkt(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
430{
431	struct adapter *sc = iq->adapter;
432	struct ifnet *ifp;
433
434	KASSERT(m != NULL, ("%s: no payload with opcode %02x", __func__,
435	    rss->opcode));
436
437	mtx_lock(&sc->ifp_lock);
438	ifp = sc->ifp;
439	if (ifp != NULL) {
440		m_adj(m, sizeof(struct cpl_t5_trace_pkt));
441		m->m_pkthdr.rcvif = ifp;
442		ETHER_BPF_MTAP(ifp, m);
443	}
444	mtx_unlock(&sc->ifp_lock);
445	m_freem(m);
446
447	return (0);
448}
449
450
451static void
452tracer_init(void *arg)
453{
454
455	return;
456}
457
458static int
459tracer_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
460{
461	int rc = 0;
462	struct adapter *sc;
463	struct ifreq *ifr = (struct ifreq *)data;
464
465	switch (cmd) {
466	case SIOCSIFMTU:
467	case SIOCSIFFLAGS:
468	case SIOCADDMULTI:
469	case SIOCDELMULTI:
470	case SIOCSIFCAP:
471		break;
472	case SIOCSIFMEDIA:
473	case SIOCGIFMEDIA:
474	case SIOCGIFXMEDIA:
475		sx_xlock(&t4_trace_lock);
476		sc = ifp->if_softc;
477		if (sc == NULL)
478			rc = EIO;
479		else
480			rc = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
481		sx_xunlock(&t4_trace_lock);
482		break;
483	default:
484		rc = ether_ioctl(ifp, cmd, data);
485	}
486
487	return (rc);
488}
489
490static int
491tracer_transmit(struct ifnet *ifp, struct mbuf *m)
492{
493
494	m_freem(m);
495	return (0);
496}
497
498static void
499tracer_qflush(struct ifnet *ifp)
500{
501
502	return;
503}
504
505static int
506tracer_media_change(struct ifnet *ifp)
507{
508
509	return (EOPNOTSUPP);
510}
511
512static void
513tracer_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
514{
515
516	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
517
518	return;
519}
520