1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4 */
5
6#include "opt_rss.h"
7
8#include <sys/param.h>
9#include <sys/systm.h>
10#include <sys/kernel.h>
11#include <sys/endian.h>
12#include <sys/sockio.h>
13#include <sys/mbuf.h>
14#include <sys/malloc.h>
15#include <sys/module.h>
16#include <sys/socket.h>
17#include <sys/sysctl.h>
18#include <sys/smp.h>
19#include <vm/vm.h>
20#include <vm/pmap.h>
21
22#include <net/ethernet.h>
23#include <net/if.h>
24#include <net/if_var.h>
25#include <net/if_arp.h>
26#include <net/if_dl.h>
27#include <net/if_types.h>
28#include <net/if_media.h>
29#include <net/if_vlan_var.h>
30#include <net/iflib.h>
31#ifdef RSS
32#include <net/rss_config.h>
33#endif
34
35#include <netinet/in_systm.h>
36#include <netinet/in.h>
37#include <netinet/ip.h>
38#include <netinet/ip6.h>
39#include <netinet6/ip6_var.h>
40#include <netinet/udp.h>
41#include <netinet/tcp.h>
42
43#include <machine/bus.h>
44#include <machine/resource.h>
45#include <sys/bus.h>
46#include <sys/rman.h>
47
48#include <dev/pci/pcireg.h>
49#include <dev/pci/pcivar.h>
50
51#include "ifdi_if.h"
52#include "enic.h"
53
54#include "opt_inet.h"
55#include "opt_inet6.h"
56
57static int enic_isc_txd_encap(void *, if_pkt_info_t);
58static void enic_isc_txd_flush(void *, uint16_t, qidx_t);
59static int enic_isc_txd_credits_update(void *, uint16_t, bool);
60static int enic_isc_rxd_available(void *, uint16_t, qidx_t, qidx_t);
61static int enic_isc_rxd_pkt_get(void *, if_rxd_info_t);
62static void enic_isc_rxd_refill(void *, if_rxd_update_t);
63static void enic_isc_rxd_flush(void *, uint16_t, uint8_t, qidx_t);
64static int enic_legacy_intr(void *);
65static void enic_initial_post_rx(struct enic *, struct vnic_rq *);
66static int enic_wq_service(struct vnic_dev *, struct cq_desc *, u8, u16, u16,
67    void *);
68static int enic_rq_service(struct vnic_dev *, struct cq_desc *, u8, u16, u16,
69    void *);
70
71struct if_txrx	enic_txrx = {
72	.ift_txd_encap = enic_isc_txd_encap,
73	.ift_txd_flush = enic_isc_txd_flush,
74	.ift_txd_credits_update = enic_isc_txd_credits_update,
75	.ift_rxd_available = enic_isc_rxd_available,
76	.ift_rxd_pkt_get = enic_isc_rxd_pkt_get,
77	.ift_rxd_refill = enic_isc_rxd_refill,
78	.ift_rxd_flush = enic_isc_rxd_flush,
79	.ift_legacy_intr = enic_legacy_intr
80};
81
82static int
83enic_isc_txd_encap(void *vsc, if_pkt_info_t pi)
84{
85	struct enic_softc *softc;
86	struct enic *enic;
87	struct vnic_wq *wq;
88	int nsegs;
89	int i;
90
91	struct wq_enet_desc *desc;
92	uint64_t bus_addr;
93	uint16_t mss = 7;
94	uint16_t header_len = 0;
95	uint8_t offload_mode = 0;
96	uint8_t eop = 0, cq;
97	uint8_t vlan_tag_insert = 0;
98	unsigned short vlan_id = 0;
99
100	unsigned int wq_desc_avail;
101	int head_idx;
102	unsigned int desc_count, data_len;
103
104	softc = vsc;
105	enic = &softc->enic;
106
107	wq = &enic->wq[pi->ipi_qsidx];
108	nsegs = pi->ipi_nsegs;
109
110	ENIC_LOCK(softc);
111	wq_desc_avail = vnic_wq_desc_avail(wq);
112	head_idx = wq->head_idx;
113	desc_count = wq->ring.desc_count;
114
115	for (i = 0; i < nsegs; i++) {
116		eop = 0;
117		cq = 0;
118		wq->cq_pend++;
119		if (i + 1 == nsegs) {
120			eop = 1;
121			cq = 1;
122			wq->cq_pend = 0;
123		}
124		desc = wq->ring.descs;
125		bus_addr = pi->ipi_segs[i].ds_addr;
126		data_len = pi->ipi_segs[i].ds_len;
127
128		wq_enet_desc_enc(&desc[head_idx], bus_addr, data_len, mss,
129				 header_len, offload_mode, eop, cq, 0,
130				 vlan_tag_insert, vlan_id, 0);
131
132		head_idx = enic_ring_incr(desc_count, head_idx);
133		wq_desc_avail--;
134	}
135
136	wq->ring.desc_avail = wq_desc_avail;
137	wq->head_idx = head_idx;
138
139	pi->ipi_new_pidx = head_idx;
140	ENIC_UNLOCK(softc);
141
142	return (0);
143}
144
145static void
146enic_isc_txd_flush(void *vsc, uint16_t txqid, qidx_t pidx)
147{
148	struct enic_softc *softc;
149	struct enic *enic;
150	struct vnic_wq *wq;
151	int head_idx;
152
153	softc = vsc;
154	enic = &softc->enic;
155
156	ENIC_LOCK(softc);
157	wq = &enic->wq[txqid];
158	head_idx = wq->head_idx;
159
160	ENIC_BUS_WRITE_4(wq->ctrl, TX_POSTED_INDEX, head_idx);
161	ENIC_UNLOCK(softc);
162}
163
164static int
165enic_isc_txd_credits_update(void *vsc, uint16_t txqid, bool clear)
166{
167
168	struct enic_softc *softc;
169	struct enic *enic;
170	struct vnic_wq *wq;
171	struct vnic_cq *cq;
172	int processed;
173	unsigned int cq_wq;
174	unsigned int wq_work_to_do = 10;
175	unsigned int wq_work_avail;
176
177	softc = vsc;
178	enic = &softc->enic;
179	wq = &softc->enic.wq[txqid];
180
181	cq_wq = enic_cq_wq(enic, txqid);
182	cq = &enic->cq[cq_wq];
183
184	ENIC_LOCK(softc);
185	wq_work_avail = vnic_cq_work(cq, wq_work_to_do);
186	ENIC_UNLOCK(softc);
187
188	if (wq_work_avail == 0)
189		return (0);
190
191	if (!clear)
192		return (1);
193
194	ENIC_LOCK(softc);
195	vnic_cq_service(cq, wq_work_to_do,
196		    enic_wq_service, NULL);
197
198	processed = wq->processed;
199	wq->processed = 0;
200
201	ENIC_UNLOCK(softc);
202
203	return (processed);
204}
205
206static int
207enic_isc_rxd_available(void *vsc, uint16_t rxqid, qidx_t idx, qidx_t budget)
208{
209	struct enic_softc *softc;
210	struct enic *enic;
211	struct vnic_cq *cq;
212	unsigned int rq_work_to_do = budget;
213	unsigned int rq_work_avail = 0;
214	unsigned int cq_rq;
215
216	softc = vsc;
217	enic = &softc->enic;
218
219	cq_rq = enic_cq_rq(&softc->enic, rxqid);
220	cq = &enic->cq[cq_rq];
221
222	rq_work_avail = vnic_cq_work(cq, rq_work_to_do);
223	return rq_work_avail;
224}
225
226static int
227enic_isc_rxd_pkt_get(void *vsc, if_rxd_info_t ri)
228{
229	struct enic_softc *softc;
230	struct enic *enic;
231	struct vnic_cq *cq;
232	unsigned int rq_work_to_do = 1;
233	unsigned int rq_work_done = 0;
234	unsigned int cq_rq;
235
236	softc = vsc;
237	enic = &softc->enic;
238
239	cq_rq = enic_cq_rq(&softc->enic, ri->iri_qsidx);
240	cq = &enic->cq[cq_rq];
241	ENIC_LOCK(softc);
242	rq_work_done = vnic_cq_service(cq, rq_work_to_do, enic_rq_service, ri);
243
244	if (rq_work_done != 0) {
245		vnic_intr_return_credits(&enic->intr[cq_rq], rq_work_done, 0,
246		    1);
247		ENIC_UNLOCK(softc);
248		return (0);
249	} else {
250		ENIC_UNLOCK(softc);
251		return (-1);
252	}
253
254}
255
256static void
257enic_isc_rxd_refill(void *vsc, if_rxd_update_t iru)
258{
259	struct enic_softc *softc;
260	struct vnic_rq *rq;
261	struct rq_enet_desc *rqd;
262
263	uint64_t *paddrs;
264	int count;
265	uint32_t pidx;
266	int len;
267	int idx;
268	int i;
269
270	count = iru->iru_count;
271	len = iru->iru_buf_size;
272	paddrs = iru->iru_paddrs;
273	pidx = iru->iru_pidx;
274
275	softc = vsc;
276	rq = &softc->enic.rq[iru->iru_qsidx];
277	rqd = rq->ring.descs;
278
279	idx = pidx;
280	for (i = 0; i < count; i++, idx++) {
281
282		if (idx == rq->ring.desc_count)
283			idx = 0;
284		rq_enet_desc_enc(&rqd[idx], paddrs[i],
285				 RQ_ENET_TYPE_ONLY_SOP,
286				 len);
287
288	}
289
290	rq->in_use = 1;
291
292	if (rq->need_initial_post) {
293		ENIC_BUS_WRITE_4(rq->ctrl, RX_FETCH_INDEX, 0);
294	}
295
296	enic_initial_post_rx(&softc->enic, rq);
297}
298
299static void
300enic_isc_rxd_flush(void *vsc, uint16_t rxqid, uint8_t flid, qidx_t pidx)
301{
302
303	struct enic_softc *softc;
304	struct vnic_rq *rq;
305
306	softc = vsc;
307	rq = &softc->enic.rq[rxqid];
308
309	/*
310	 * pidx is the index of the last descriptor with a buffer the device
311	 * can use, and the device needs to be told which index is one past
312	 * that.
313	 */
314
315	ENIC_LOCK(softc);
316	ENIC_BUS_WRITE_4(rq->ctrl, RX_POSTED_INDEX, pidx);
317	ENIC_UNLOCK(softc);
318}
319
320static int
321enic_legacy_intr(void *xsc)
322{
323	return -1;
324}
325
326static inline void
327vnic_wq_service(struct vnic_wq *wq, struct cq_desc *cq_desc,
328    u16 completed_index, void (*buf_service) (struct vnic_wq *wq,
329    struct cq_desc *cq_desc, /* struct vnic_wq_buf * *buf, */ void *opaque),
330    void *opaque)
331{
332	int processed;
333
334	processed = completed_index - wq->ring.last_count;
335	if (processed < 0)
336		processed += wq->ring.desc_count;
337	if (processed == 0)
338		processed++;
339
340	wq->ring.desc_avail += processed;
341	wq->processed += processed;
342	wq->ring.last_count = completed_index;
343}
344
345/*
346 * Post the Rx buffers for the first time. enic_alloc_rx_queue_mbufs() has
347 * allocated the buffers and filled the RQ descriptor ring. Just need to push
348 * the post index to the NIC.
349 */
350static void
351enic_initial_post_rx(struct enic *enic, struct vnic_rq *rq)
352{
353	struct enic_softc *softc = enic->softc;
354	if (!rq->in_use || !rq->need_initial_post)
355		return;
356
357	ENIC_LOCK(softc);
358	/* make sure all prior writes are complete before doing the PIO write */
359	/* Post all but the last buffer to VIC. */
360	rq->posted_index = rq->ring.desc_count - 1;
361
362	rq->rx_nb_hold = 0;
363
364	ENIC_BUS_WRITE_4(rq->ctrl, RX_POSTED_INDEX, rq->posted_index);
365
366	rq->need_initial_post = false;
367	ENIC_UNLOCK(softc);
368}
369
370static int
371enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, u8 type,
372    u16 q_number, u16 completed_index, void *opaque)
373{
374	struct enic *enic = vnic_dev_priv(vdev);
375
376	vnic_wq_service(&enic->wq[q_number], cq_desc,
377			completed_index, NULL, opaque);
378	return 0;
379}
380
381static void
382vnic_rq_service(struct vnic_rq *rq, struct cq_desc *cq_desc,
383    u16 in_completed_index, int desc_return,
384    void(*buf_service)(struct vnic_rq *rq, struct cq_desc *cq_desc,
385    /* struct vnic_rq_buf * *buf, */ int skipped, void *opaque), void *opaque)
386{
387
388	if_rxd_info_t ri = (if_rxd_info_t) opaque;
389	u8 type, color, eop, sop, ingress_port, vlan_stripped;
390	u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
391	u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
392	u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
393	u8 packet_error;
394	u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
395	u32 rss_hash;
396	int cqidx;
397	if_rxd_frag_t frag;
398
399	cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
400	    &type, &color, &q_number, &completed_index,
401	    &ingress_port, &fcoe, &eop, &sop, &rss_type,
402	    &csum_not_calc, &rss_hash, &bytes_written,
403	    &packet_error, &vlan_stripped, &vlan_tci, &checksum,
404	    &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
405	    &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
406	    &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
407	    &fcs_ok);
408
409	cqidx = ri->iri_cidx;
410
411	frag = &ri->iri_frags[0];
412	frag->irf_idx = cqidx;
413	frag->irf_len = bytes_written;
414
415	if (++cqidx == rq->ring.desc_count) {
416		cqidx = 0;
417	}
418
419	ri->iri_cidx = cqidx;
420	ri->iri_nfrags = 1;
421	ri->iri_len = bytes_written;
422}
423
424static int
425enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
426		u8 type, u16 q_number, u16 completed_index, void *opaque)
427{
428	struct enic *enic = vnic_dev_priv(vdev);
429	if_rxd_info_t ri = (if_rxd_info_t) opaque;
430
431	vnic_rq_service(&enic->rq[ri->iri_qsidx], cq_desc, completed_index,
432	    VNIC_RQ_RETURN_DESC, NULL, /* enic_rq_indicate_buf, */ opaque);
433
434	return 0;
435}
436
437void
438enic_prep_wq_for_simple_tx(struct enic *enic, uint16_t queue_idx)
439{
440	struct wq_enet_desc *desc;
441	struct vnic_wq *wq;
442	unsigned int i;
443
444	/*
445	 * Fill WQ descriptor fields that never change. Every descriptor is
446	 * one packet, so set EOP. Also set CQ_ENTRY every ENIC_WQ_CQ_THRESH
447	 * descriptors (i.e. request one completion update every 32 packets).
448	 */
449	wq = &enic->wq[queue_idx];
450	desc = (struct wq_enet_desc *)wq->ring.descs;
451	for (i = 0; i < wq->ring.desc_count; i++, desc++) {
452		desc->header_length_flags = 1 << WQ_ENET_FLAGS_EOP_SHIFT;
453		if (i % ENIC_WQ_CQ_THRESH == ENIC_WQ_CQ_THRESH - 1)
454			desc->header_length_flags |=
455			    (1 << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT);
456	}
457}
458
459void
460enic_start_wq(struct enic *enic, uint16_t queue_idx)
461{
462	vnic_wq_enable(&enic->wq[queue_idx]);
463}
464
465int
466enic_stop_wq(struct enic *enic, uint16_t queue_idx)
467{
468	int ret;
469
470	ret = vnic_wq_disable(&enic->wq[queue_idx]);
471	if (ret)
472		return ret;
473
474	return 0;
475}
476
477void
478enic_start_rq(struct enic *enic, uint16_t queue_idx)
479{
480	struct vnic_rq *rq;
481
482	rq = &enic->rq[queue_idx];
483	vnic_rq_enable(rq);
484	enic_initial_post_rx(enic, rq);
485}
486