1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2012 Chelsio Communications, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28#include <sys/cdefs.h>
29#include "opt_inet.h"
30#include "opt_inet6.h"
31
32#include <sys/param.h>
33#include <sys/eventhandler.h>
34#include <sys/systm.h>
35#include <sys/kernel.h>
36#include <sys/module.h>
37#include <sys/bus.h>
38#include <sys/lock.h>
39#include <sys/mutex.h>
40#include <sys/rwlock.h>
41#include <sys/socket.h>
42#include <sys/sbuf.h>
43#include <netinet/in.h>
44
45#include "common/common.h"
46#include "common/t4_msg.h"
47#include "t4_l2t.h"
48
49/*
50 * Module locking notes:  There is a RW lock protecting the L2 table as a
51 * whole plus a spinlock per L2T entry.  Entry lookups and allocations happen
52 * under the protection of the table lock, individual entry changes happen
53 * while holding that entry's spinlock.  The table lock nests outside the
54 * entry locks.  Allocations of new entries take the table lock as writers so
55 * no other lookups can happen while allocating new entries.  Entry updates
56 * take the table lock as readers so multiple entries can be updated in
57 * parallel.  An L2T entry can be dropped by decrementing its reference count
58 * and therefore can happen in parallel with entry allocation but no entry
59 * can change state or increment its ref count during allocation as both of
60 * these perform lookups.
61 *
62 * Note: We do not take references to ifnets in this module because both
63 * the TOE and the sockets already hold references to the interfaces and the
64 * lifetime of an L2T entry is fully contained in the lifetime of the TOE.
65 */
66
67/*
68 * Allocate a free L2T entry.  Must be called with l2t_data.lock held.
69 */
70struct l2t_entry *
71t4_alloc_l2e(struct l2t_data *d)
72{
73	struct l2t_entry *end, *e, **p;
74
75	rw_assert(&d->lock, RA_WLOCKED);
76
77	if (!atomic_load_acq_int(&d->nfree))
78		return (NULL);
79
80	/* there's definitely a free entry */
81	for (e = d->rover, end = &d->l2tab[d->l2t_size]; e != end; ++e)
82		if (atomic_load_acq_int(&e->refcnt) == 0)
83			goto found;
84
85	for (e = d->l2tab; atomic_load_acq_int(&e->refcnt); ++e)
86		continue;
87found:
88	d->rover = e + 1;
89	atomic_subtract_int(&d->nfree, 1);
90
91	/*
92	 * The entry we found may be an inactive entry that is
93	 * presently in the hash table.  We need to remove it.
94	 */
95	if (e->state < L2T_STATE_SWITCHING) {
96		for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) {
97			if (*p == e) {
98				*p = e->next;
99				e->next = NULL;
100				break;
101			}
102		}
103	}
104
105	e->state = L2T_STATE_UNUSED;
106	return (e);
107}
108
109static struct l2t_entry *
110find_or_alloc_l2e(struct l2t_data *d, uint16_t vlan, uint8_t port, uint8_t *dmac)
111{
112	struct l2t_entry *end, *e, **p;
113	struct l2t_entry *first_free = NULL;
114
115	for (e = &d->l2tab[0], end = &d->l2tab[d->l2t_size]; e != end; ++e) {
116		if (atomic_load_acq_int(&e->refcnt) == 0) {
117			if (!first_free)
118				first_free = e;
119		} else if (e->state == L2T_STATE_SWITCHING &&
120		    memcmp(e->dmac, dmac, ETHER_ADDR_LEN) == 0 &&
121		    e->vlan == vlan && e->lport == port)
122			return (e);	/* Found existing entry that matches. */
123	}
124
125	if (first_free == NULL)
126		return (NULL);	/* No match and no room for a new entry. */
127
128	/*
129	 * The entry we found may be an inactive entry that is
130	 * presently in the hash table.  We need to remove it.
131	 */
132	e = first_free;
133	if (e->state < L2T_STATE_SWITCHING) {
134		for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next) {
135			if (*p == e) {
136				*p = e->next;
137				e->next = NULL;
138				break;
139			}
140		}
141	}
142	e->state = L2T_STATE_UNUSED;
143	return (e);
144}
145
146static void
147mk_write_l2e(struct adapter *sc, struct l2t_entry *e, int sync, int reply,
148    void *dst)
149{
150	struct cpl_l2t_write_req *req;
151	int idx;
152
153	req = dst;
154	idx = e->idx + sc->vres.l2t.start;
155	INIT_TP_WR(req, 0);
156	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, idx |
157	    V_SYNC_WR(sync) | V_TID_QID(e->iqid)));
158	req->params = htons(V_L2T_W_PORT(e->lport) | V_L2T_W_NOREPLY(!reply));
159	req->l2t_idx = htons(idx);
160	req->vlan = htons(e->vlan);
161	memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
162}
163
164/*
165 * Write an L2T entry.  Must be called with the entry locked.
166 * The write may be synchronous or asynchronous.
167 */
168int
169t4_write_l2e(struct l2t_entry *e, int sync)
170{
171	struct sge_wrq *wrq;
172	struct adapter *sc;
173	struct wrq_cookie cookie;
174	struct cpl_l2t_write_req *req;
175
176	mtx_assert(&e->lock, MA_OWNED);
177	MPASS(e->wrq != NULL);
178
179	wrq = e->wrq;
180	sc = wrq->adapter;
181
182	req = start_wrq_wr(wrq, howmany(sizeof(*req), 16), &cookie);
183	if (req == NULL)
184		return (ENOMEM);
185
186	mk_write_l2e(sc, e, sync, sync, req);
187
188	commit_wrq_wr(wrq, req, &cookie);
189
190	if (sync && e->state != L2T_STATE_SWITCHING)
191		e->state = L2T_STATE_SYNC_WRITE;
192
193	return (0);
194}
195
196/*
197 * Allocate an L2T entry for use by a TLS connection.  These entries are
198 * associated with a specific VLAN and destination MAC that never changes.
199 * However, multiple TLS connections might share a single entry.
200 *
201 * If a new L2T entry is allocated, a work request to initialize it is
202 * written to 'txq' and 'ndesc' will be set to 1.  Otherwise, 'ndesc'
203 * will be set to 0.
204 *
205 * To avoid races, separate L2T entries are reserved for individual
206 * queues since the L2T entry update is written to a txq just prior to
207 * TLS work requests that will depend on it being written.
208 */
209struct l2t_entry *
210t4_l2t_alloc_tls(struct adapter *sc, struct sge_txq *txq, void *dst,
211    int *ndesc, uint16_t vlan, uint8_t port, uint8_t *eth_addr)
212{
213	struct l2t_data *d;
214	struct l2t_entry *e;
215	int i;
216
217	TXQ_LOCK_ASSERT_OWNED(txq);
218
219	d = sc->l2t;
220	*ndesc = 0;
221
222	rw_rlock(&d->lock);
223
224	/* First, try to find an existing entry. */
225	for (i = 0; i < d->l2t_size; i++) {
226		e = &d->l2tab[i];
227		if (e->state != L2T_STATE_TLS)
228			continue;
229		if (e->vlan == vlan && e->lport == port &&
230		    e->wrq == (struct sge_wrq *)txq &&
231		    memcmp(e->dmac, eth_addr, ETHER_ADDR_LEN) == 0) {
232			if (atomic_fetchadd_int(&e->refcnt, 1) == 0) {
233				/*
234				 * This entry wasn't held but is still
235				 * valid, so decrement nfree.
236				 */
237				atomic_subtract_int(&d->nfree, 1);
238			}
239			KASSERT(e->refcnt > 0,
240			    ("%s: refcount overflow", __func__));
241			rw_runlock(&d->lock);
242			return (e);
243		}
244	}
245
246	/*
247	 * Don't bother rechecking if the upgrade fails since the txq is
248	 * already locked.
249	 */
250	if (!rw_try_upgrade(&d->lock)) {
251		rw_runlock(&d->lock);
252		rw_wlock(&d->lock);
253	}
254
255	/* Match not found, allocate a new entry. */
256	e = t4_alloc_l2e(d);
257	if (e == NULL) {
258		rw_wunlock(&d->lock);
259		return (e);
260	}
261
262	/* Initialize the entry. */
263	e->state = L2T_STATE_TLS;
264	e->vlan = vlan;
265	e->lport = port;
266	e->iqid = sc->sge.fwq.abs_id;
267	e->wrq = (struct sge_wrq *)txq;
268	memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);
269	atomic_store_rel_int(&e->refcnt, 1);
270	rw_wunlock(&d->lock);
271
272	/* Write out the work request. */
273	*ndesc = howmany(sizeof(struct cpl_l2t_write_req), EQ_ESIZE);
274	MPASS(*ndesc == 1);
275	mk_write_l2e(sc, e, 1, 0, dst);
276
277	return (e);
278}
279
280/*
281 * Allocate an L2T entry for use by a switching rule.  Such need to be
282 * explicitly freed and while busy they are not on any hash chain, so normal
283 * address resolution updates do not see them.
284 */
285struct l2t_entry *
286t4_l2t_alloc_switching(struct adapter *sc, uint16_t vlan, uint8_t port,
287    uint8_t *eth_addr)
288{
289	struct l2t_data *d = sc->l2t;
290	struct l2t_entry *e;
291	int rc;
292
293	rw_wlock(&d->lock);
294	e = find_or_alloc_l2e(d, vlan, port, eth_addr);
295	if (e) {
296		if (atomic_load_acq_int(&e->refcnt) == 0) {
297			mtx_lock(&e->lock);    /* avoid race with t4_l2t_free */
298			e->wrq = &sc->sge.ctrlq[0];
299			e->iqid = sc->sge.fwq.abs_id;
300			e->state = L2T_STATE_SWITCHING;
301			e->vlan = vlan;
302			e->lport = port;
303			memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);
304			atomic_store_rel_int(&e->refcnt, 1);
305			atomic_subtract_int(&d->nfree, 1);
306			rc = t4_write_l2e(e, 0);
307			mtx_unlock(&e->lock);
308			if (rc != 0)
309				e = NULL;
310		} else {
311			MPASS(e->vlan == vlan);
312			MPASS(e->lport == port);
313			atomic_add_int(&e->refcnt, 1);
314		}
315	}
316	rw_wunlock(&d->lock);
317	return (e);
318}
319
320int
321t4_init_l2t(struct adapter *sc, int flags)
322{
323	int i, l2t_size;
324	struct l2t_data *d;
325
326	l2t_size = sc->vres.l2t.size;
327	if (l2t_size < 2)	/* At least 1 bucket for IP and 1 for IPv6 */
328		return (EINVAL);
329
330	d = malloc(sizeof(*d) + l2t_size * sizeof (struct l2t_entry), M_CXGBE,
331	    M_ZERO | flags);
332	if (!d)
333		return (ENOMEM);
334
335	d->l2t_size = l2t_size;
336	d->rover = d->l2tab;
337	atomic_store_rel_int(&d->nfree, l2t_size);
338	rw_init(&d->lock, "L2T");
339
340	for (i = 0; i < l2t_size; i++) {
341		struct l2t_entry *e = &d->l2tab[i];
342
343		e->idx = i;
344		e->state = L2T_STATE_UNUSED;
345		mtx_init(&e->lock, "L2T_E", NULL, MTX_DEF);
346		STAILQ_INIT(&e->wr_list);
347		atomic_store_rel_int(&e->refcnt, 0);
348	}
349
350	sc->l2t = d;
351
352	return (0);
353}
354
355int
356t4_free_l2t(struct l2t_data *d)
357{
358	int i;
359
360	for (i = 0; i < d->l2t_size; i++)
361		mtx_destroy(&d->l2tab[i].lock);
362	rw_destroy(&d->lock);
363	free(d, M_CXGBE);
364
365	return (0);
366}
367
368int
369do_l2t_write_rpl(struct sge_iq *iq, const struct rss_header *rss,
370    struct mbuf *m)
371{
372	const struct cpl_l2t_write_rpl *rpl = (const void *)(rss + 1);
373	unsigned int tid = GET_TID(rpl);
374	unsigned int idx = tid % L2T_SIZE;
375
376	if (__predict_false(rpl->status != CPL_ERR_NONE)) {
377		log(LOG_ERR,
378		    "Unexpected L2T_WRITE_RPL (%u) for entry at hw_idx %u\n",
379		    rpl->status, idx);
380		return (EINVAL);
381	}
382
383	return (0);
384}
385
386static inline unsigned int
387vlan_prio(const struct l2t_entry *e)
388{
389	return e->vlan >> 13;
390}
391
392static char
393l2e_state(const struct l2t_entry *e)
394{
395	switch (e->state) {
396	case L2T_STATE_VALID: return 'V';  /* valid, fast-path entry */
397	case L2T_STATE_STALE: return 'S';  /* needs revalidation, but usable */
398	case L2T_STATE_SYNC_WRITE: return 'W';
399	case L2T_STATE_RESOLVING: return STAILQ_EMPTY(&e->wr_list) ? 'R' : 'A';
400	case L2T_STATE_SWITCHING: return 'X';
401	case L2T_STATE_TLS: return 'T';
402	default: return 'U';
403	}
404}
405
406int
407sysctl_l2t(SYSCTL_HANDLER_ARGS)
408{
409	struct adapter *sc = arg1;
410	struct l2t_data *l2t = sc->l2t;
411	struct l2t_entry *e;
412	struct sbuf *sb;
413	int rc, i, header = 0;
414	char ip[INET6_ADDRSTRLEN];
415
416	if (l2t == NULL)
417		return (ENXIO);
418
419	rc = sysctl_wire_old_buffer(req, 0);
420	if (rc != 0)
421		return (rc);
422
423	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
424	if (sb == NULL)
425		return (ENOMEM);
426
427	e = &l2t->l2tab[0];
428	for (i = 0; i < l2t->l2t_size; i++, e++) {
429		mtx_lock(&e->lock);
430		if (e->state == L2T_STATE_UNUSED)
431			goto skip;
432
433		if (header == 0) {
434			sbuf_printf(sb, " Idx IP address      "
435			    "Ethernet address  VLAN/P LP State Users Port");
436			header = 1;
437		}
438		if (e->state >= L2T_STATE_SWITCHING)
439			ip[0] = 0;
440		else {
441			inet_ntop(e->ipv6 ? AF_INET6 : AF_INET, &e->addr[0],
442			    &ip[0], sizeof(ip));
443		}
444
445		/*
446		 * XXX: IPv6 addresses may not align properly in the output.
447		 */
448		sbuf_printf(sb, "\n%4u %-15s %02x:%02x:%02x:%02x:%02x:%02x %4d"
449			   " %u %2u   %c   %5u %s",
450			   e->idx, ip, e->dmac[0], e->dmac[1], e->dmac[2],
451			   e->dmac[3], e->dmac[4], e->dmac[5],
452			   e->vlan & 0xfff, vlan_prio(e), e->lport,
453			   l2e_state(e), atomic_load_acq_int(&e->refcnt),
454			   e->ifp ? if_name(e->ifp) : "-");
455skip:
456		mtx_unlock(&e->lock);
457	}
458
459	rc = sbuf_finish(sb);
460	sbuf_delete(sb);
461
462	return (rc);
463}
464