t4_tracer.c revision 309560
1/*-
2 * Copyright (c) 2013 Chelsio Communications, Inc.
3 * All rights reserved.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: stable/11/sys/dev/cxgbe/t4_tracer.c 309560 2016-12-05 20:43:25Z jhb $");
30
31#include "opt_inet.h"
32#include "opt_inet6.h"
33
34#include <sys/param.h>
35#include <sys/eventhandler.h>
36#include <sys/lock.h>
37#include <sys/types.h>
38#include <sys/mbuf.h>
39#include <sys/socket.h>
40#include <sys/sockio.h>
41#include <sys/sx.h>
42#include <net/bpf.h>
43#include <net/ethernet.h>
44#include <net/if.h>
45#include <net/if_clone.h>
46#include <net/if_types.h>
47
48#include "common/common.h"
49#include "common/t4_msg.h"
50#include "common/t4_regs.h"
51#include "t4_ioctl.h"
52
53/*
54 * Locking notes
55 * =============
56 *
57 * An interface cloner is registered during mod_load and it can be used to
58 * create or destroy the tracing ifnet for an adapter at any time.  It is
59 * possible for the cloned interface to outlive the adapter (adapter disappears
60 * in t4_detach but the tracing ifnet may live till mod_unload when removal of
61 * the cloner finally destroys any remaining cloned interfaces).  When tracing
62 * filters are active, this ifnet is also receiving data.  There are potential
63 * bad races between ifnet create, ifnet destroy, ifnet rx, ifnet ioctl,
64 * cxgbe_detach/t4_detach, mod_unload.
65 *
66 * a) The driver selects an iq for tracing (sc->traceq) inside a synch op.  The
67 *    iq is destroyed inside a synch op too (and sc->traceq updated).
68 * b) The cloner looks for an adapter that matches the name of the ifnet it's
69 *    been asked to create, starts a synch op on that adapter, and proceeds only
70 *    if the adapter has a tracing iq.
71 * c) The cloned ifnet and the adapter are coupled to each other via
72 *    ifp->if_softc and sc->ifp.  These can be modified only with the global
73 *    t4_trace_lock sx as well as the sc->ifp_lock mutex held.  Holding either
74 *    of these will prevent any change.
75 *
76 * The order in which all the locks involved should be acquired are:
77 * t4_list_lock
78 * adapter lock
79 * (begin synch op and let go of the above two)
80 * t4_trace_lock
81 * sc->ifp_lock
82 */
83
84static struct sx t4_trace_lock;
85static const char *t4_cloner_name = "tXnex";
86static struct if_clone *t4_cloner;
87
88/* tracer ifnet routines.  mostly no-ops. */
89static void tracer_init(void *);
90static int tracer_ioctl(struct ifnet *, unsigned long, caddr_t);
91static int tracer_transmit(struct ifnet *, struct mbuf *);
92static void tracer_qflush(struct ifnet *);
93static int tracer_media_change(struct ifnet *);
94static void tracer_media_status(struct ifnet *, struct ifmediareq *);
95
96/* match name (request/response) */
97struct match_rr {
98	const char *name;
99	int lock;	/* set to 1 to returned sc locked. */
100	struct adapter *sc;
101	int rc;
102};
103
104static void
105match_name(struct adapter *sc, void *arg)
106{
107	struct match_rr *mrr = arg;
108
109	if (strcmp(device_get_nameunit(sc->dev), mrr->name) != 0)
110		return;
111
112	KASSERT(mrr->sc == NULL, ("%s: multiple matches (%p, %p) for %s",
113	    __func__, mrr->sc, sc, mrr->name));
114
115	mrr->sc = sc;
116	if (mrr->lock)
117		mrr->rc = begin_synchronized_op(mrr->sc, NULL, 0, "t4clon");
118	else
119		mrr->rc = 0;
120}
121
122static int
123t4_cloner_match(struct if_clone *ifc, const char *name)
124{
125
126	if (strncmp(name, "t4nex", 5) != 0 &&
127	    strncmp(name, "t5nex", 5) != 0 &&
128	    strncmp(name, "t6nex", 5) != 0)
129		return (0);
130	if (name[5] < '0' || name[5] > '9')
131		return (0);
132	return (1);
133}
134
135static int
136t4_cloner_create(struct if_clone *ifc, char *name, size_t len, caddr_t params)
137{
138	struct match_rr mrr;
139	struct adapter *sc;
140	struct ifnet *ifp;
141	int rc, unit;
142	const uint8_t lla[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
143
144	mrr.name = name;
145	mrr.lock = 1;
146	mrr.sc = NULL;
147	mrr.rc = ENOENT;
148	t4_iterate(match_name, &mrr);
149
150	if (mrr.rc != 0)
151		return (mrr.rc);
152	sc = mrr.sc;
153
154	KASSERT(sc != NULL, ("%s: name (%s) matched but softc is NULL",
155	    __func__, name));
156	ASSERT_SYNCHRONIZED_OP(sc);
157
158	sx_xlock(&t4_trace_lock);
159
160	if (sc->ifp != NULL) {
161		rc = EEXIST;
162		goto done;
163	}
164	if (sc->traceq < 0) {
165		rc = EAGAIN;
166		goto done;
167	}
168
169
170	unit = -1;
171	rc = ifc_alloc_unit(ifc, &unit);
172	if (rc != 0)
173		goto done;
174
175	ifp = if_alloc(IFT_ETHER);
176	if (ifp == NULL) {
177		ifc_free_unit(ifc, unit);
178		rc = ENOMEM;
179		goto done;
180	}
181
182	/* Note that if_xname is not <if_dname><if_dunit>. */
183	strlcpy(ifp->if_xname, name, sizeof(ifp->if_xname));
184	ifp->if_dname = t4_cloner_name;
185	ifp->if_dunit = unit;
186	ifp->if_init = tracer_init;
187	ifp->if_flags = IFF_SIMPLEX | IFF_DRV_RUNNING;
188	ifp->if_ioctl = tracer_ioctl;
189	ifp->if_transmit = tracer_transmit;
190	ifp->if_qflush = tracer_qflush;
191	ifp->if_capabilities = IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
192	ifmedia_init(&sc->media, IFM_IMASK, tracer_media_change,
193	    tracer_media_status);
194	ifmedia_add(&sc->media, IFM_ETHER | IFM_FDX | IFM_NONE, 0, NULL);
195	ifmedia_set(&sc->media, IFM_ETHER | IFM_FDX | IFM_NONE);
196	ether_ifattach(ifp, lla);
197
198	mtx_lock(&sc->ifp_lock);
199	ifp->if_softc = sc;
200	sc->ifp = ifp;
201	mtx_unlock(&sc->ifp_lock);
202done:
203	sx_xunlock(&t4_trace_lock);
204	end_synchronized_op(sc, 0);
205	return (rc);
206}
207
208static int
209t4_cloner_destroy(struct if_clone *ifc, struct ifnet *ifp)
210{
211	struct adapter *sc;
212	int unit = ifp->if_dunit;
213
214	sx_xlock(&t4_trace_lock);
215	sc = ifp->if_softc;
216	if (sc != NULL) {
217		mtx_lock(&sc->ifp_lock);
218		sc->ifp = NULL;
219		ifp->if_softc = NULL;
220		mtx_unlock(&sc->ifp_lock);
221		ifmedia_removeall(&sc->media);
222	}
223	ether_ifdetach(ifp);
224	if_free(ifp);
225	ifc_free_unit(ifc, unit);
226	sx_xunlock(&t4_trace_lock);
227
228	return (0);
229}
230
231void
232t4_tracer_modload()
233{
234
235	sx_init(&t4_trace_lock, "T4/T5 tracer lock");
236	t4_cloner = if_clone_advanced(t4_cloner_name, 0, t4_cloner_match,
237	    t4_cloner_create, t4_cloner_destroy);
238}
239
240void
241t4_tracer_modunload()
242{
243
244	if (t4_cloner != NULL) {
245		/*
246		 * The module is being unloaded so the nexus drivers have
247		 * detached.  The tracing interfaces can not outlive the nexus
248		 * (ifp->if_softc is the nexus) and must have been destroyed
249		 * already.  XXX: but if_clone is opaque to us and we can't
250		 * assert LIST_EMPTY(&t4_cloner->ifc_iflist) at this time.
251		 */
252		if_clone_detach(t4_cloner);
253	}
254	sx_destroy(&t4_trace_lock);
255}
256
257void
258t4_tracer_port_detach(struct adapter *sc)
259{
260
261	sx_xlock(&t4_trace_lock);
262	if (sc->ifp != NULL) {
263		mtx_lock(&sc->ifp_lock);
264		sc->ifp->if_softc = NULL;
265		sc->ifp = NULL;
266		mtx_unlock(&sc->ifp_lock);
267	}
268	ifmedia_removeall(&sc->media);
269	sx_xunlock(&t4_trace_lock);
270}
271
272int
273t4_get_tracer(struct adapter *sc, struct t4_tracer *t)
274{
275	int rc, i, enabled;
276	struct trace_params tp;
277
278	if (t->idx >= NTRACE) {
279		t->idx = 0xff;
280		t->enabled = 0;
281		t->valid = 0;
282		return (0);
283	}
284
285	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
286	    "t4gett");
287	if (rc)
288		return (rc);
289
290	for (i = t->idx; i < NTRACE; i++) {
291		if (isset(&sc->tracer_valid, t->idx)) {
292			t4_get_trace_filter(sc, &tp, i, &enabled);
293			t->idx = i;
294			t->enabled = enabled;
295			t->valid = 1;
296			memcpy(&t->tp.data[0], &tp.data[0], sizeof(t->tp.data));
297			memcpy(&t->tp.mask[0], &tp.mask[0], sizeof(t->tp.mask));
298			t->tp.snap_len = tp.snap_len;
299			t->tp.min_len = tp.min_len;
300			t->tp.skip_ofst = tp.skip_ofst;
301			t->tp.skip_len = tp.skip_len;
302			t->tp.invert = tp.invert;
303
304			/* convert channel to port iff 0 <= port < 8. */
305			if (tp.port < 4)
306				t->tp.port = sc->chan_map[tp.port];
307			else if (tp.port < 8)
308				t->tp.port = sc->chan_map[tp.port - 4] + 4;
309			else
310				t->tp.port = tp.port;
311
312			goto done;
313		}
314	}
315
316	t->idx = 0xff;
317	t->enabled = 0;
318	t->valid = 0;
319done:
320	end_synchronized_op(sc, LOCK_HELD);
321
322	return (rc);
323}
324
325int
326t4_set_tracer(struct adapter *sc, struct t4_tracer *t)
327{
328	int rc;
329	struct trace_params tp, *tpp;
330
331	if (t->idx >= NTRACE)
332		return (EINVAL);
333
334	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
335	    "t4sett");
336	if (rc)
337		return (rc);
338
339	/*
340	 * If no tracing filter is specified this time then check if the filter
341	 * at the index is valid anyway because it was set previously.  If so
342	 * then this is a legitimate enable/disable operation.
343	 */
344	if (t->valid == 0) {
345		if (isset(&sc->tracer_valid, t->idx))
346			tpp = NULL;
347		else
348			rc = EINVAL;
349		goto done;
350	}
351
352	if (t->tp.port > 19 || t->tp.snap_len > 9600 ||
353	    t->tp.min_len > M_TFMINPKTSIZE || t->tp.skip_len > M_TFLENGTH ||
354	    t->tp.skip_ofst > M_TFOFFSET) {
355		rc = EINVAL;
356		goto done;
357	}
358
359	memcpy(&tp.data[0], &t->tp.data[0], sizeof(tp.data));
360	memcpy(&tp.mask[0], &t->tp.mask[0], sizeof(tp.mask));
361	tp.snap_len = t->tp.snap_len;
362	tp.min_len = t->tp.min_len;
363	tp.skip_ofst = t->tp.skip_ofst;
364	tp.skip_len = t->tp.skip_len;
365	tp.invert = !!t->tp.invert;
366
367	/* convert port to channel iff 0 <= port < 8. */
368	if (t->tp.port < 4) {
369		if (sc->port[t->tp.port] == NULL) {
370			rc = EINVAL;
371			goto done;
372		}
373		tp.port = sc->port[t->tp.port]->tx_chan;
374	} else if (t->tp.port < 8) {
375		if (sc->port[t->tp.port - 4] == NULL) {
376			rc = EINVAL;
377			goto done;
378		}
379		tp.port = sc->port[t->tp.port - 4]->tx_chan + 4;
380	}
381	tpp = &tp;
382done:
383	if (rc == 0) {
384		rc = -t4_set_trace_filter(sc, tpp, t->idx, t->enabled);
385		if (rc == 0) {
386			if (t->enabled) {
387				setbit(&sc->tracer_valid, t->idx);
388				if (sc->tracer_enabled == 0) {
389					t4_set_reg_field(sc, A_MPS_TRC_CFG,
390					    F_TRCEN, F_TRCEN);
391				}
392				setbit(&sc->tracer_enabled, t->idx);
393			} else {
394				clrbit(&sc->tracer_enabled, t->idx);
395				if (sc->tracer_enabled == 0) {
396					t4_set_reg_field(sc, A_MPS_TRC_CFG,
397					    F_TRCEN, 0);
398				}
399			}
400		}
401	}
402	end_synchronized_op(sc, LOCK_HELD);
403
404	return (rc);
405}
406
407int
408t4_trace_pkt(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
409{
410	struct adapter *sc = iq->adapter;
411	struct ifnet *ifp;
412
413	KASSERT(m != NULL, ("%s: no payload with opcode %02x", __func__,
414	    rss->opcode));
415
416	mtx_lock(&sc->ifp_lock);
417	ifp = sc->ifp;
418	if (sc->ifp) {
419		m_adj(m, sizeof(struct cpl_trace_pkt));
420		m->m_pkthdr.rcvif = ifp;
421		ETHER_BPF_MTAP(ifp, m);
422	}
423	mtx_unlock(&sc->ifp_lock);
424	m_freem(m);
425
426	return (0);
427}
428
429int
430t5_trace_pkt(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
431{
432	struct adapter *sc = iq->adapter;
433	struct ifnet *ifp;
434
435	KASSERT(m != NULL, ("%s: no payload with opcode %02x", __func__,
436	    rss->opcode));
437
438	mtx_lock(&sc->ifp_lock);
439	ifp = sc->ifp;
440	if (ifp != NULL) {
441		m_adj(m, sizeof(struct cpl_t5_trace_pkt));
442		m->m_pkthdr.rcvif = ifp;
443		ETHER_BPF_MTAP(ifp, m);
444	}
445	mtx_unlock(&sc->ifp_lock);
446	m_freem(m);
447
448	return (0);
449}
450
451
452static void
453tracer_init(void *arg)
454{
455
456	return;
457}
458
459static int
460tracer_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
461{
462	int rc = 0;
463	struct adapter *sc;
464	struct ifreq *ifr = (struct ifreq *)data;
465
466	switch (cmd) {
467	case SIOCSIFMTU:
468	case SIOCSIFFLAGS:
469	case SIOCADDMULTI:
470	case SIOCDELMULTI:
471	case SIOCSIFCAP:
472		break;
473	case SIOCSIFMEDIA:
474	case SIOCGIFMEDIA:
475	case SIOCGIFXMEDIA:
476		sx_xlock(&t4_trace_lock);
477		sc = ifp->if_softc;
478		if (sc == NULL)
479			rc = EIO;
480		else
481			rc = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
482		sx_xunlock(&t4_trace_lock);
483		break;
484	default:
485		rc = ether_ioctl(ifp, cmd, data);
486	}
487
488	return (rc);
489}
490
491static int
492tracer_transmit(struct ifnet *ifp, struct mbuf *m)
493{
494
495	m_freem(m);
496	return (0);
497}
498
499static void
500tracer_qflush(struct ifnet *ifp)
501{
502
503	return;
504}
505
506static int
507tracer_media_change(struct ifnet *ifp)
508{
509
510	return (EOPNOTSUPP);
511}
512
513static void
514tracer_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
515{
516
517	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
518
519	return;
520}
521