1/*-
2 * Copyright (c) 2013 Chelsio Communications, Inc.
3 * All rights reserved.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: releng/10.3/sys/dev/cxgbe/t4_tracer.c 270297 2014-08-21 19:54:02Z np $");
30
31#include "opt_inet.h"
32#include "opt_inet6.h"
33
34#include <sys/param.h>
35#include <sys/lock.h>
36#include <sys/types.h>
37#include <sys/mbuf.h>
38#include <sys/socket.h>
39#include <sys/sockio.h>
40#include <sys/sx.h>
41#include <net/bpf.h>
42#include <net/ethernet.h>
43#include <net/if.h>
44#include <net/if_clone.h>
45#include <net/if_types.h>
46
47#include "common/common.h"
48#include "common/t4_msg.h"
49#include "common/t4_regs.h"
50#include "t4_ioctl.h"
51
52/*
53 * Locking notes
54 * =============
55 *
56 * An interface cloner is registered during mod_load and it can be used to
57 * create or destroy the tracing ifnet for an adapter at any time.  It is
58 * possible for the cloned interface to outlive the adapter (adapter disappears
59 * in t4_detach but the tracing ifnet may live till mod_unload when removal of
60 * the cloner finally destroys any remaining cloned interfaces).  When tracing
61 * filters are active, this ifnet is also receiving data.  There are potential
62 * bad races between ifnet create, ifnet destroy, ifnet rx, ifnet ioctl,
63 * cxgbe_detach/t4_detach, mod_unload.
64 *
65 * a) The driver selects an iq for tracing (sc->traceq) inside a synch op.  The
66 *    iq is destroyed inside a synch op too (and sc->traceq updated).
67 * b) The cloner looks for an adapter that matches the name of the ifnet it's
68 *    been asked to create, starts a synch op on that adapter, and proceeds only
69 *    if the adapter has a tracing iq.
70 * c) The cloned ifnet and the adapter are coupled to each other via
71 *    ifp->if_softc and sc->ifp.  These can be modified only with the global
72 *    t4_trace_lock sx as well as the sc->ifp_lock mutex held.  Holding either
73 *    of these will prevent any change.
74 *
75 * The order in which all the locks involved should be acquired are:
76 * t4_list_lock
77 * adapter lock
78 * (begin synch op and let go of the above two)
79 * t4_trace_lock
80 * sc->ifp_lock
81 */
82
83static struct sx t4_trace_lock;
84static const char *t4_cloner_name = "tXnex";
85static struct if_clone *t4_cloner;
86
87/* tracer ifnet routines.  mostly no-ops. */
88static void tracer_init(void *);
89static int tracer_ioctl(struct ifnet *, unsigned long, caddr_t);
90static int tracer_transmit(struct ifnet *, struct mbuf *);
91static void tracer_qflush(struct ifnet *);
92static int tracer_media_change(struct ifnet *);
93static void tracer_media_status(struct ifnet *, struct ifmediareq *);
94
95/* match name (request/response) */
96struct match_rr {
97	const char *name;
98	int lock;	/* set to 1 to returned sc locked. */
99	struct adapter *sc;
100	int rc;
101};
102
103static void
104match_name(struct adapter *sc, void *arg)
105{
106	struct match_rr *mrr = arg;
107
108	if (strcmp(device_get_nameunit(sc->dev), mrr->name) != 0)
109		return;
110
111	KASSERT(mrr->sc == NULL, ("%s: multiple matches (%p, %p) for %s",
112	    __func__, mrr->sc, sc, mrr->name));
113
114	mrr->sc = sc;
115	if (mrr->lock)
116		mrr->rc = begin_synchronized_op(mrr->sc, NULL, 0, "t4clon");
117	else
118		mrr->rc = 0;
119}
120
121static int
122t4_cloner_match(struct if_clone *ifc, const char *name)
123{
124
125	if (strncmp(name, "t4nex", 5) != 0 &&
126	    strncmp(name, "t5nex", 5) != 0)
127		return (0);
128	if (name[5] < '0' || name[5] > '9')
129		return (0);
130	return (1);
131}
132
133static int
134t4_cloner_create(struct if_clone *ifc, char *name, size_t len, caddr_t params)
135{
136	struct match_rr mrr;
137	struct adapter *sc;
138	struct ifnet *ifp;
139	int rc, unit;
140	const uint8_t lla[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
141
142	mrr.name = name;
143	mrr.lock = 1;
144	mrr.sc = NULL;
145	mrr.rc = ENOENT;
146	t4_iterate(match_name, &mrr);
147
148	if (mrr.rc != 0)
149		return (mrr.rc);
150	sc = mrr.sc;
151
152	KASSERT(sc != NULL, ("%s: name (%s) matched but softc is NULL",
153	    __func__, name));
154	ASSERT_SYNCHRONIZED_OP(sc);
155
156	sx_xlock(&t4_trace_lock);
157
158	if (sc->ifp != NULL) {
159		rc = EEXIST;
160		goto done;
161	}
162	if (sc->traceq < 0) {
163		rc = EAGAIN;
164		goto done;
165	}
166
167
168	unit = -1;
169	rc = ifc_alloc_unit(ifc, &unit);
170	if (rc != 0)
171		goto done;
172
173	ifp = if_alloc(IFT_ETHER);
174	if (ifp == NULL) {
175		ifc_free_unit(ifc, unit);
176		rc = ENOMEM;
177		goto done;
178	}
179
180	/* Note that if_xname is not <if_dname><if_dunit>. */
181	strlcpy(ifp->if_xname, name, sizeof(ifp->if_xname));
182	ifp->if_dname = t4_cloner_name;
183	ifp->if_dunit = unit;
184	ifp->if_init = tracer_init;
185	ifp->if_flags = IFF_SIMPLEX | IFF_DRV_RUNNING;
186	ifp->if_ioctl = tracer_ioctl;
187	ifp->if_transmit = tracer_transmit;
188	ifp->if_qflush = tracer_qflush;
189	ifp->if_capabilities = IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
190	ifmedia_init(&sc->media, IFM_IMASK, tracer_media_change,
191	    tracer_media_status);
192	ifmedia_add(&sc->media, IFM_ETHER | IFM_FDX | IFM_NONE, 0, NULL);
193	ifmedia_set(&sc->media, IFM_ETHER | IFM_FDX | IFM_NONE);
194	ether_ifattach(ifp, lla);
195
196	mtx_lock(&sc->ifp_lock);
197	ifp->if_softc = sc;
198	sc->ifp = ifp;
199	mtx_unlock(&sc->ifp_lock);
200done:
201	sx_xunlock(&t4_trace_lock);
202	end_synchronized_op(sc, 0);
203	return (rc);
204}
205
206static int
207t4_cloner_destroy(struct if_clone *ifc, struct ifnet *ifp)
208{
209	struct adapter *sc;
210	int unit = ifp->if_dunit;
211
212	sx_xlock(&t4_trace_lock);
213	sc = ifp->if_softc;
214	if (sc != NULL) {
215		mtx_lock(&sc->ifp_lock);
216		sc->ifp = NULL;
217		ifp->if_softc = NULL;
218		mtx_unlock(&sc->ifp_lock);
219		ifmedia_removeall(&sc->media);
220	}
221	ether_ifdetach(ifp);
222	if_free(ifp);
223	ifc_free_unit(ifc, unit);
224	sx_xunlock(&t4_trace_lock);
225
226	return (0);
227}
228
229void
230t4_tracer_modload()
231{
232
233	sx_init(&t4_trace_lock, "T4/T5 tracer lock");
234	t4_cloner = if_clone_advanced(t4_cloner_name, 0, t4_cloner_match,
235	    t4_cloner_create, t4_cloner_destroy);
236}
237
238void
239t4_tracer_modunload()
240{
241
242	if (t4_cloner != NULL) {
243		/*
244		 * The module is being unloaded so the nexus drivers have
245		 * detached.  The tracing interfaces can not outlive the nexus
246		 * (ifp->if_softc is the nexus) and must have been destroyed
247		 * already.  XXX: but if_clone is opaque to us and we can't
248		 * assert LIST_EMPTY(&t4_cloner->ifc_iflist) at this time.
249		 */
250		if_clone_detach(t4_cloner);
251	}
252	sx_destroy(&t4_trace_lock);
253}
254
255void
256t4_tracer_port_detach(struct adapter *sc)
257{
258
259	sx_xlock(&t4_trace_lock);
260	if (sc->ifp != NULL) {
261		mtx_lock(&sc->ifp_lock);
262		sc->ifp->if_softc = NULL;
263		sc->ifp = NULL;
264		mtx_unlock(&sc->ifp_lock);
265	}
266	ifmedia_removeall(&sc->media);
267	sx_xunlock(&t4_trace_lock);
268}
269
270int
271t4_get_tracer(struct adapter *sc, struct t4_tracer *t)
272{
273	int rc, i, enabled;
274	struct trace_params tp;
275
276	if (t->idx >= NTRACE) {
277		t->idx = 0xff;
278		t->enabled = 0;
279		t->valid = 0;
280		return (0);
281	}
282
283	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
284	    "t4gett");
285	if (rc)
286		return (rc);
287
288	for (i = t->idx; i < NTRACE; i++) {
289		if (isset(&sc->tracer_valid, t->idx)) {
290			t4_get_trace_filter(sc, &tp, i, &enabled);
291			t->idx = i;
292			t->enabled = enabled;
293			t->valid = 1;
294			memcpy(&t->tp.data[0], &tp.data[0], sizeof(t->tp.data));
295			memcpy(&t->tp.mask[0], &tp.mask[0], sizeof(t->tp.mask));
296			t->tp.snap_len = tp.snap_len;
297			t->tp.min_len = tp.min_len;
298			t->tp.skip_ofst = tp.skip_ofst;
299			t->tp.skip_len = tp.skip_len;
300			t->tp.invert = tp.invert;
301
302			/* convert channel to port iff 0 <= port < 8. */
303			if (tp.port < 4)
304				t->tp.port = sc->chan_map[tp.port];
305			else if (tp.port < 8)
306				t->tp.port = sc->chan_map[tp.port - 4] + 4;
307			else
308				t->tp.port = tp.port;
309
310			goto done;
311		}
312	}
313
314	t->idx = 0xff;
315	t->enabled = 0;
316	t->valid = 0;
317done:
318	end_synchronized_op(sc, LOCK_HELD);
319
320	return (rc);
321}
322
323int
324t4_set_tracer(struct adapter *sc, struct t4_tracer *t)
325{
326	int rc;
327	struct trace_params tp, *tpp;
328
329	if (t->idx >= NTRACE)
330		return (EINVAL);
331
332	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
333	    "t4sett");
334	if (rc)
335		return (rc);
336
337	/*
338	 * If no tracing filter is specified this time then check if the filter
339	 * at the index is valid anyway because it was set previously.  If so
340	 * then this is a legitimate enable/disable operation.
341	 */
342	if (t->valid == 0) {
343		if (isset(&sc->tracer_valid, t->idx))
344			tpp = NULL;
345		else
346			rc = EINVAL;
347		goto done;
348	}
349
350	if (t->tp.port > 19 || t->tp.snap_len > 9600 ||
351	    t->tp.min_len > M_TFMINPKTSIZE || t->tp.skip_len > M_TFLENGTH ||
352	    t->tp.skip_ofst > M_TFOFFSET) {
353		rc = EINVAL;
354		goto done;
355	}
356
357	memcpy(&tp.data[0], &t->tp.data[0], sizeof(tp.data));
358	memcpy(&tp.mask[0], &t->tp.mask[0], sizeof(tp.mask));
359	tp.snap_len = t->tp.snap_len;
360	tp.min_len = t->tp.min_len;
361	tp.skip_ofst = t->tp.skip_ofst;
362	tp.skip_len = t->tp.skip_len;
363	tp.invert = !!t->tp.invert;
364
365	/* convert port to channel iff 0 <= port < 8. */
366	if (t->tp.port < 4) {
367		if (sc->port[t->tp.port] == NULL) {
368			rc = EINVAL;
369			goto done;
370		}
371		tp.port = sc->port[t->tp.port]->tx_chan;
372	} else if (t->tp.port < 8) {
373		if (sc->port[t->tp.port - 4] == NULL) {
374			rc = EINVAL;
375			goto done;
376		}
377		tp.port = sc->port[t->tp.port - 4]->tx_chan + 4;
378	}
379	tpp = &tp;
380done:
381	if (rc == 0) {
382		rc = -t4_set_trace_filter(sc, tpp, t->idx, t->enabled);
383		if (rc == 0) {
384			if (t->enabled) {
385				setbit(&sc->tracer_valid, t->idx);
386				if (sc->tracer_enabled == 0) {
387					t4_set_reg_field(sc, A_MPS_TRC_CFG,
388					    F_TRCEN, F_TRCEN);
389				}
390				setbit(&sc->tracer_enabled, t->idx);
391			} else {
392				clrbit(&sc->tracer_enabled, t->idx);
393				if (sc->tracer_enabled == 0) {
394					t4_set_reg_field(sc, A_MPS_TRC_CFG,
395					    F_TRCEN, 0);
396				}
397			}
398		}
399	}
400	end_synchronized_op(sc, LOCK_HELD);
401
402	return (rc);
403}
404
405int
406t4_trace_pkt(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
407{
408	struct adapter *sc = iq->adapter;
409	struct ifnet *ifp;
410
411	KASSERT(m != NULL, ("%s: no payload with opcode %02x", __func__,
412	    rss->opcode));
413
414	mtx_lock(&sc->ifp_lock);
415	ifp = sc->ifp;
416	if (sc->ifp) {
417		m_adj(m, sizeof(struct cpl_trace_pkt));
418		m->m_pkthdr.rcvif = ifp;
419		ETHER_BPF_MTAP(ifp, m);
420	}
421	mtx_unlock(&sc->ifp_lock);
422	m_freem(m);
423
424	return (0);
425}
426
427int
428t5_trace_pkt(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
429{
430	struct adapter *sc = iq->adapter;
431	struct ifnet *ifp;
432
433	KASSERT(m != NULL, ("%s: no payload with opcode %02x", __func__,
434	    rss->opcode));
435
436	mtx_lock(&sc->ifp_lock);
437	ifp = sc->ifp;
438	if (ifp != NULL) {
439		m_adj(m, sizeof(struct cpl_t5_trace_pkt));
440		m->m_pkthdr.rcvif = ifp;
441		ETHER_BPF_MTAP(ifp, m);
442	}
443	mtx_unlock(&sc->ifp_lock);
444	m_freem(m);
445
446	return (0);
447}
448
449
450static void
451tracer_init(void *arg)
452{
453
454	return;
455}
456
457static int
458tracer_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
459{
460	int rc = 0;
461	struct adapter *sc;
462	struct ifreq *ifr = (struct ifreq *)data;
463
464	switch (cmd) {
465	case SIOCSIFMTU:
466	case SIOCSIFFLAGS:
467	case SIOCADDMULTI:
468	case SIOCDELMULTI:
469	case SIOCSIFCAP:
470		break;
471	case SIOCSIFMEDIA:
472	case SIOCGIFMEDIA:
473		sx_xlock(&t4_trace_lock);
474		sc = ifp->if_softc;
475		if (sc == NULL)
476			rc = EIO;
477		else
478			rc = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
479		sx_xunlock(&t4_trace_lock);
480		break;
481	default:
482		rc = ether_ioctl(ifp, cmd, data);
483	}
484
485	return (rc);
486}
487
488static int
489tracer_transmit(struct ifnet *ifp, struct mbuf *m)
490{
491
492	m_freem(m);
493	return (0);
494}
495
496static void
497tracer_qflush(struct ifnet *ifp)
498{
499
500	return;
501}
502
503static int
504tracer_media_change(struct ifnet *ifp)
505{
506
507	return (EOPNOTSUPP);
508}
509
510static void
511tracer_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
512{
513
514	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
515
516	return;
517}
518