1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4 */
5
6#include "enic.h"
7#include "vnic_dev.h"
8#include "vnic_resource.h"
9#include "vnic_devcmd.h"
10#include "vnic_nic.h"
11#include "vnic_stats.h"
12
13#define VNIC_MAX_RES_HDR_SIZE \
14	(sizeof(struct vnic_resource_header) + \
15	sizeof(struct vnic_resource) * RES_TYPE_MAX)
16#define VNIC_RES_STRIDE	128
17
18#define VNIC_MAX_FLOW_COUNTERS 2048
19
20void *vnic_dev_priv(struct vnic_dev *vdev)
21{
22	return vdev->priv;
23}
24
25void vnic_register_cbacks(struct vnic_dev *vdev,
26	void *(*alloc_consistent)(void *priv, size_t size,
27	    bus_addr_t *dma_handle, struct iflib_dma_info *res,u8 *name),
28	void (*free_consistent)(void *priv,
29	    size_t size, void *vaddr,
30	    bus_addr_t dma_handle,struct iflib_dma_info *res))
31{
32	vdev->alloc_consistent = alloc_consistent;
33	vdev->free_consistent = free_consistent;
34}
35
36static int vnic_dev_discover_res(struct vnic_dev *vdev,
37	struct vnic_dev_bar *bar, unsigned int num_bars)
38{
39	struct enic_softc *softc = vdev->softc;
40	struct vnic_resource_header __iomem *rh;
41	struct mgmt_barmap_hdr __iomem *mrh;
42	struct vnic_resource __iomem *r;
43	int r_offset;
44	u8 type;
45
46	if (num_bars == 0)
47		return -EINVAL;
48
49	rh = malloc(sizeof(*rh), M_DEVBUF, M_NOWAIT | M_ZERO);
50	mrh = malloc(sizeof(*mrh), M_DEVBUF, M_NOWAIT | M_ZERO);
51	if (!rh) {
52		pr_err("vNIC BAR0 res hdr not mem-mapped\n");
53		free(rh, M_DEVBUF);
54		free(mrh, M_DEVBUF);
55		return -EINVAL;
56	}
57
58	/* Check for mgmt vnic in addition to normal vnic */
59	ENIC_BUS_READ_REGION_4(softc, mem, 0, (void *)rh, sizeof(*rh) / 4);
60	ENIC_BUS_READ_REGION_4(softc, mem, 0, (void *)mrh, sizeof(*mrh) / 4);
61	if ((rh->magic != VNIC_RES_MAGIC) ||
62	    (rh->version != VNIC_RES_VERSION)) {
63		if ((mrh->magic != MGMTVNIC_MAGIC) ||
64			mrh->version != MGMTVNIC_VERSION) {
65			pr_err("vNIC BAR0 res magic/version error " \
66				"exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
67				VNIC_RES_MAGIC, VNIC_RES_VERSION,
68				MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
69				rh->magic, rh->version);
70			free(rh, M_DEVBUF);
71			free(mrh, M_DEVBUF);
72			return -EINVAL;
73		}
74	}
75
76	if (mrh->magic == MGMTVNIC_MAGIC)
77		r_offset = sizeof(*mrh);
78	else
79		r_offset = sizeof(*rh);
80
81	r = malloc(sizeof(*r), M_DEVBUF, M_NOWAIT | M_ZERO);
82	ENIC_BUS_READ_REGION_4(softc, mem, r_offset, (void *)r, sizeof(*r) / 4);
83	while ((type = r->type) != RES_TYPE_EOL) {
84		u8 bar_num = r->bar;
85		u32 bar_offset =r->bar_offset;
86		u32 count = r->count;
87
88		r_offset += sizeof(*r);
89
90		if (bar_num >= num_bars)
91			continue;
92
93		switch (type) {
94		case RES_TYPE_WQ:
95		case RES_TYPE_RQ:
96		case RES_TYPE_CQ:
97		case RES_TYPE_INTR_CTRL:
98		case RES_TYPE_INTR_PBA_LEGACY:
99		case RES_TYPE_DEVCMD:
100			break;
101		default:
102			ENIC_BUS_READ_REGION_4(softc, mem, r_offset, (void *)r, sizeof(*r) / 4);
103			continue;
104		}
105
106		vdev->res[type].count = count;
107		bcopy(&softc->mem, &vdev->res[type].bar, sizeof(softc->mem));
108		vdev->res[type].bar.offset = bar_offset;
109		ENIC_BUS_READ_REGION_4(softc, mem, r_offset, (void *)r, sizeof(*r) / 4);
110	}
111
112	free(rh, M_DEVBUF);
113	free(mrh, M_DEVBUF);
114	free(r, M_DEVBUF);
115	return 0;
116}
117
118unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
119	enum vnic_res_type type)
120{
121	return vdev->res[type].count;
122}
123
124void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
125	unsigned int index)
126{
127	struct vnic_res *res;
128
129	if (!vdev->res[type].bar.tag)
130		return NULL;
131
132	res = malloc(sizeof(*res), M_DEVBUF, M_NOWAIT | M_ZERO);
133	bcopy(&vdev->res[type], res, sizeof(*res));
134
135	switch (type) {
136	case RES_TYPE_WQ:
137	case RES_TYPE_RQ:
138	case RES_TYPE_CQ:
139	case RES_TYPE_INTR_CTRL:
140		res->bar.offset +=
141		    index * VNIC_RES_STRIDE;
142	default:
143		res->bar.offset += 0;
144	}
145
146	return res;
147}
148
149unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
150	unsigned int desc_count, unsigned int desc_size)
151{
152	/* The base address of the desc rings must be 512 byte aligned.
153	 * Descriptor count is aligned to groups of 32 descriptors.  A
154	 * count of 0 means the maximum 4096 descriptors.  Descriptor
155	 * size is aligned to 16 bytes.
156	 */
157
158	unsigned int count_align = 32;
159	unsigned int desc_align = 16;
160
161	ring->base_align = 512;
162
163	if (desc_count == 0)
164		desc_count = 4096;
165
166	ring->desc_count = VNIC_ALIGN(desc_count, count_align);
167
168	ring->desc_size = VNIC_ALIGN(desc_size, desc_align);
169
170	ring->size = ring->desc_count * ring->desc_size;
171	ring->size_unaligned = ring->size + ring->base_align;
172
173	return ring->size_unaligned;
174}
175
176void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
177{
178	memset(ring->descs, 0, ring->size);
179}
180
181static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
182	int wait)
183{
184	struct vnic_res __iomem *devcmd = vdev->devcmd;
185	int delay;
186	u32 status;
187	int err;
188
189	status = ENIC_BUS_READ_4(devcmd, DEVCMD_STATUS);
190	if (status == 0xFFFFFFFF) {
191		/* PCI-e target device is gone */
192		return -ENODEV;
193	}
194	if (status & STAT_BUSY) {
195
196		pr_err("Busy devcmd %d\n",  _CMD_N(cmd));
197		return -EBUSY;
198	}
199
200	if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
201		ENIC_BUS_WRITE_REGION_4(devcmd, DEVCMD_ARGS(0), (void *)&vdev->args[0], VNIC_DEVCMD_NARGS * 2);
202	}
203
204	ENIC_BUS_WRITE_4(devcmd, DEVCMD_CMD, cmd);
205
206	if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) {
207		return 0;
208	}
209
210	for (delay = 0; delay < wait; delay++) {
211
212		udelay(100);
213
214		status = ENIC_BUS_READ_4(devcmd, DEVCMD_STATUS);
215		if (status == 0xFFFFFFFF) {
216			/* PCI-e target device is gone */
217			return -ENODEV;
218		}
219
220		if (!(status & STAT_BUSY)) {
221			if (status & STAT_ERROR) {
222				err = -(int)ENIC_BUS_READ_8(devcmd, DEVCMD_ARGS(0));
223
224				if (cmd != CMD_CAPABILITY)
225					pr_err("Devcmd %d failed " \
226						"with error code %d\n",
227						_CMD_N(cmd), err);
228				return err;
229			}
230
231			if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
232				ENIC_BUS_READ_REGION_4(devcmd, bar, DEVCMD_ARGS(0), (void *)&vdev->args[0], VNIC_DEVCMD_NARGS * 2);
233			}
234
235			return 0;
236		}
237	}
238
239	pr_err("Timedout devcmd %d\n", _CMD_N(cmd));
240	return -ETIMEDOUT;
241}
242
243static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
244	enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd,
245	u64 *args, int nargs, int wait)
246{
247	u32 status;
248	int err;
249
250	/*
251	 * Proxy command consumes 2 arguments. One for proxy index,
252	 * the other is for command to be proxied
253	 */
254	if (nargs > VNIC_DEVCMD_NARGS - 2) {
255		pr_err("number of args %d exceeds the maximum\n", nargs);
256		return -EINVAL;
257	}
258	memset(vdev->args, 0, sizeof(vdev->args));
259
260	vdev->args[0] = vdev->proxy_index;
261	vdev->args[1] = cmd;
262	memcpy(&vdev->args[2], args, nargs * sizeof(args[0]));
263
264	err = _vnic_dev_cmd(vdev, proxy_cmd, wait);
265	if (err)
266		return err;
267
268	status = (u32)vdev->args[0];
269	if (status & STAT_ERROR) {
270		err = (int)vdev->args[1];
271		if (err != ERR_ECMDUNKNOWN ||
272		    cmd != CMD_CAPABILITY)
273			pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd));
274		return err;
275	}
276
277	memcpy(args, &vdev->args[1], nargs * sizeof(args[0]));
278
279	return 0;
280}
281
282static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
283	enum vnic_devcmd_cmd cmd, u64 *args, int nargs, int wait)
284{
285	int err;
286
287	if (nargs > VNIC_DEVCMD_NARGS) {
288		pr_err("number of args %d exceeds the maximum\n", nargs);
289		return -EINVAL;
290	}
291	memset(vdev->args, 0, sizeof(vdev->args));
292	memcpy(vdev->args, args, nargs * sizeof(args[0]));
293
294	err = _vnic_dev_cmd(vdev, cmd, wait);
295
296	memcpy(args, vdev->args, nargs * sizeof(args[0]));
297
298	return err;
299}
300
301int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
302	u64 *a0, u64 *a1, int wait)
303{
304	u64 args[2];
305	int err;
306
307	args[0] = *a0;
308	args[1] = *a1;
309	memset(vdev->args, 0, sizeof(vdev->args));
310
311	switch (vdev->proxy) {
312	case PROXY_BY_INDEX:
313		err =  vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
314				args, ARRAY_SIZE(args), wait);
315		break;
316	case PROXY_BY_BDF:
317		err =  vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
318				args, ARRAY_SIZE(args), wait);
319		break;
320	case PROXY_NONE:
321	default:
322		err = vnic_dev_cmd_no_proxy(vdev, cmd, args, 2, wait);
323		break;
324	}
325
326	if (err == 0) {
327		*a0 = args[0];
328		*a1 = args[1];
329	}
330
331	return err;
332}
333
334int vnic_dev_cmd_args(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
335		      u64 *args, int nargs, int wait)
336{
337	switch (vdev->proxy) {
338	case PROXY_BY_INDEX:
339		return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
340				args, nargs, wait);
341	case PROXY_BY_BDF:
342		return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
343				args, nargs, wait);
344	case PROXY_NONE:
345	default:
346		return vnic_dev_cmd_no_proxy(vdev, cmd, args, nargs, wait);
347	}
348}
349
350static int vnic_dev_advanced_filters_cap(struct vnic_dev *vdev, u64 *args,
351		int nargs)
352{
353	memset(args, 0, nargs * sizeof(*args));
354	args[0] = CMD_ADD_ADV_FILTER;
355	args[1] = FILTER_CAP_MODE_V1_FLAG;
356	return vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, nargs, 1000);
357}
358
359int vnic_dev_capable_adv_filters(struct vnic_dev *vdev)
360{
361	u64 a0 = CMD_ADD_ADV_FILTER, a1 = 0;
362	int wait = 1000;
363	int err;
364
365	err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
366	if (err)
367		return 0;
368	return (a1 >= (u32)FILTER_DPDK_1);
369}
370
371/*  Determine the "best" filtering mode VIC is capaible of. Returns one of 3
372 *  value or 0 on error:
373 *	FILTER_DPDK_1- advanced filters availabile
374 *	FILTER_USNIC_IP_FLAG - advanced filters but with the restriction that
375 *		the IP layer must explicitly specified. I.e. cannot have a UDP
376 *		filter that matches both IPv4 and IPv6.
377 *	FILTER_IPV4_5TUPLE - fallback if either of the 2 above aren't available.
378 *		all other filter types are not available.
379 *   Retrun true in filter_tags if supported
380 */
381int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode,
382				 u8 *filter_actions)
383{
384	u64 args[4];
385	int err;
386	u32 max_level = 0;
387
388	err = vnic_dev_advanced_filters_cap(vdev, args, 4);
389
390	/* determine supported filter actions */
391	*filter_actions = FILTER_ACTION_RQ_STEERING_FLAG; /* always available */
392	if (args[2] == FILTER_CAP_MODE_V1)
393		*filter_actions = args[3];
394
395	if (err || ((args[0] == 1) && (args[1] == 0))) {
396		/* Adv filter Command not supported or adv filters available but
397		 * not enabled. Try the normal filter capability command.
398		 */
399		args[0] = CMD_ADD_FILTER;
400		args[1] = 0;
401		err = vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, 2, 1000);
402		if (err)
403			return err;
404		max_level = args[1];
405		goto parse_max_level;
406	} else if (args[2] == FILTER_CAP_MODE_V1) {
407		/* parse filter capability mask in args[1] */
408		if (args[1] & FILTER_DPDK_1_FLAG)
409			*mode = FILTER_DPDK_1;
410		else if (args[1] & FILTER_USNIC_IP_FLAG)
411			*mode = FILTER_USNIC_IP;
412		else if (args[1] & FILTER_IPV4_5TUPLE_FLAG)
413			*mode = FILTER_IPV4_5TUPLE;
414		return 0;
415	}
416	max_level = args[1];
417parse_max_level:
418	if (max_level >= (u32)FILTER_USNIC_IP)
419		*mode = FILTER_USNIC_IP;
420	else
421		*mode = FILTER_IPV4_5TUPLE;
422	return 0;
423}
424
425void vnic_dev_capable_udp_rss_weak(struct vnic_dev *vdev, bool *cfg_chk,
426				   bool *weak)
427{
428	u64 a0 = CMD_NIC_CFG, a1 = 0;
429	int wait = 1000;
430	int err;
431
432	*cfg_chk = false;
433	*weak = false;
434	err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
435	if (err == 0 && a0 != 0 && a1 != 0) {
436		*cfg_chk = true;
437		*weak = !!((a1 >> 32) & CMD_NIC_CFG_CAPF_UDP_WEAK);
438	}
439}
440
441int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
442{
443	u64 a0 = (u32)cmd, a1 = 0;
444	int wait = 1000;
445	int err;
446
447	err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
448
449	return !(err || a0);
450}
451
452int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size,
453	void *value)
454{
455	u64 a0, a1;
456	int wait = 1000;
457	int err;
458
459	a0 = offset;
460	a1 = size;
461
462	err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
463
464	switch (size) {
465	case 1:
466		*(u8 *)value = (u8)a0;
467		break;
468	case 2:
469		*(u16 *)value = (u16)a0;
470		break;
471	case 4:
472		*(u32 *)value = (u32)a0;
473		break;
474	case 8:
475		*(u64 *)value = a0;
476		break;
477	default:
478		BUG();
479		break;
480	}
481
482	return err;
483}
484
485int vnic_dev_stats_clear(struct vnic_dev *vdev)
486{
487	u64 a0 = 0, a1 = 0;
488	int wait = 1000;
489
490	return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
491}
492
493int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
494{
495	u64 a0, a1;
496	int wait = 1000;
497	int rc;
498
499	if (!vdev->stats)
500		return -ENOMEM;
501
502	*stats = vdev->stats;
503	a0 = vdev->stats_res.idi_paddr;
504	a1 = sizeof(struct vnic_stats);
505
506	bus_dmamap_sync(vdev->stats_res.idi_tag,
507			vdev->stats_res.idi_map,
508			BUS_DMASYNC_POSTREAD);
509	rc = vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
510	bus_dmamap_sync(vdev->stats_res.idi_tag,
511			vdev->stats_res.idi_map,
512			BUS_DMASYNC_PREREAD);
513	return (rc);
514}
515
516/*
517 * Configure counter DMA
518 */
519int vnic_dev_counter_dma_cfg(struct vnic_dev *vdev, u32 period,
520			     u32 num_counters)
521{
522	u64 args[3];
523	int wait = 1000;
524	int err;
525
526	if (num_counters > VNIC_MAX_FLOW_COUNTERS)
527		return -ENOMEM;
528	if (period > 0 && (period < VNIC_COUNTER_DMA_MIN_PERIOD ||
529	    num_counters == 0))
530		return -EINVAL;
531
532	args[0] = num_counters;
533	args[1] = vdev->flow_counters_res.idi_paddr;
534	args[2] = period;
535	bus_dmamap_sync(vdev->flow_counters_res.idi_tag,
536			vdev->flow_counters_res.idi_map,
537			BUS_DMASYNC_POSTREAD);
538	err =  vnic_dev_cmd_args(vdev, CMD_COUNTER_DMA_CONFIG, args, 3, wait);
539	bus_dmamap_sync(vdev->flow_counters_res.idi_tag,
540			vdev->flow_counters_res.idi_map,
541			BUS_DMASYNC_PREREAD);
542
543	/* record if DMAs need to be stopped on close */
544	if (!err)
545		vdev->flow_counters_dma_active = (num_counters != 0 &&
546						  period != 0);
547
548	return err;
549}
550
551int vnic_dev_close(struct vnic_dev *vdev)
552{
553	u64 a0 = 0, a1 = 0;
554	int wait = 1000;
555
556	return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
557}
558
559int vnic_dev_enable_wait(struct vnic_dev *vdev)
560{
561	u64 a0 = 0, a1 = 0;
562	int wait = 1000;
563
564	if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT))
565		return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
566	else
567		return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
568}
569
570int vnic_dev_disable(struct vnic_dev *vdev)
571{
572	u64 a0 = 0, a1 = 0;
573	int wait = 1000;
574
575	return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
576}
577
578int vnic_dev_open(struct vnic_dev *vdev, int arg)
579{
580	u64 a0 = (u32)arg, a1 = 0;
581	int wait = 1000;
582
583	return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
584}
585
586int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
587{
588	u64 a0 = 0, a1 = 0;
589	int wait = 1000;
590	int err;
591
592	*done = 0;
593
594	err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
595	if (err)
596		return err;
597
598	*done = (a0 == 0);
599
600	return 0;
601}
602
603int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
604{
605	u64 a0 = 0, a1 = 0;
606	int wait = 1000;
607	int err, i;
608
609	for (i = 0; i < ETH_ALEN; i++)
610		mac_addr[i] = 0;
611
612	err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
613	if (err)
614		return err;
615
616	for (i = 0; i < ETH_ALEN; i++)
617		mac_addr[i] = ((u8 *)&a0)[i];
618
619	return 0;
620}
621
622int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
623	int broadcast, int promisc, int allmulti)
624{
625	u64 a0, a1 = 0;
626	int wait = 1000;
627	int err;
628
629	a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
630	     (multicast ? CMD_PFILTER_MULTICAST : 0) |
631	     (broadcast ? CMD_PFILTER_BROADCAST : 0) |
632	     (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
633	     (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
634
635	err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
636	if (err)
637		pr_err("Can't set packet filter\n");
638
639	return err;
640}
641
642int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
643{
644	u64 a0 = 0, a1 = 0;
645	int wait = 1000;
646	int err;
647	int i;
648
649	for (i = 0; i < ETH_ALEN; i++)
650		((u8 *)&a0)[i] = addr[i];
651
652	err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
653	if (err)
654		pr_err("Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
655			addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
656			err);
657
658	return err;
659}
660
661int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
662{
663	u64 a0 = 0, a1 = 0;
664	int wait = 1000;
665	int err;
666	int i;
667
668	for (i = 0; i < ETH_ALEN; i++)
669		((u8 *)&a0)[i] = addr[i];
670
671	err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
672	if (err)
673		pr_err("Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
674			addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
675			err);
676
677	return err;
678}
679
680int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
681	u8 ig_vlan_rewrite_mode)
682{
683	u64 a0 = ig_vlan_rewrite_mode, a1 = 0;
684	int wait = 1000;
685
686	if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE))
687		return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE,
688				&a0, &a1, wait);
689	else
690		return 0;
691}
692
693void vnic_dev_set_reset_flag(struct vnic_dev *vdev, int state)
694{
695	vdev->in_reset = state;
696}
697
698static inline int vnic_dev_in_reset(struct vnic_dev *vdev)
699{
700	return vdev->in_reset;
701}
702
703int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
704	void *notify_addr, bus_addr_t notify_pa, u16 intr)
705{
706	u64 a0, a1;
707	int wait = 1000;
708	int r;
709
710	bus_dmamap_sync(vdev->notify_res.idi_tag,
711			vdev->notify_res.idi_map,
712			BUS_DMASYNC_PREWRITE);
713	memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify));
714	bus_dmamap_sync(vdev->notify_res.idi_tag,
715			vdev->notify_res.idi_map,
716			BUS_DMASYNC_POSTWRITE);
717	if (!vnic_dev_in_reset(vdev)) {
718		vdev->notify = notify_addr;
719		vdev->notify_pa = notify_pa;
720	}
721
722	a0 = (u64)notify_pa;
723	a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
724	a1 += sizeof(struct vnic_devcmd_notify);
725
726	r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
727	if (!vnic_dev_in_reset(vdev))
728		vdev->notify_sz = (r == 0) ? (u32)a1 : 0;
729
730	return r;
731}
732
733int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
734{
735	void *notify_addr = NULL;
736	bus_addr_t notify_pa = 0;
737	char name[NAME_MAX];
738	static u32 instance;
739
740	if (vdev->notify || vdev->notify_pa) {
741		return vnic_dev_notify_setcmd(vdev, vdev->notify,
742					      vdev->notify_pa, intr);
743	}
744	if (!vnic_dev_in_reset(vdev)) {
745		snprintf((char *)name, sizeof(name),
746			"vnic_notify-%u", instance++);
747		iflib_dma_alloc(vdev->softc->ctx,
748				     sizeof(struct vnic_devcmd_notify),
749				     &vdev->notify_res, BUS_DMA_NOWAIT);
750		notify_pa = vdev->notify_res.idi_paddr;
751		notify_addr = vdev->notify_res.idi_vaddr;
752	}
753
754	return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
755}
756
757int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
758{
759	u64 a0, a1;
760	int wait = 1000;
761	int err;
762
763	a0 = 0;  /* paddr = 0 to unset notify buffer */
764	a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
765	a1 += sizeof(struct vnic_devcmd_notify);
766
767	err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
768	if (!vnic_dev_in_reset(vdev)) {
769		vdev->notify = NULL;
770		vdev->notify_pa = 0;
771		vdev->notify_sz = 0;
772	}
773
774	return err;
775}
776
777int vnic_dev_notify_unset(struct vnic_dev *vdev)
778{
779	if (vdev->notify && !vnic_dev_in_reset(vdev)) {
780		iflib_dma_free(&vdev->notify_res);
781	}
782
783	return vnic_dev_notify_unsetcmd(vdev);
784}
785
786static int vnic_dev_notify_ready(struct vnic_dev *vdev)
787{
788	u32 *words;
789	unsigned int nwords = vdev->notify_sz / 4;
790	unsigned int i;
791	u32 csum;
792
793	if (!vdev->notify || !vdev->notify_sz)
794		return 0;
795
796	do {
797		csum = 0;
798		bus_dmamap_sync(vdev->notify_res.idi_tag,
799				vdev->notify_res.idi_map,
800				BUS_DMASYNC_PREREAD);
801		memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz);
802		bus_dmamap_sync(vdev->notify_res.idi_tag,
803				vdev->notify_res.idi_map,
804				BUS_DMASYNC_POSTREAD);
805		words = (u32 *)&vdev->notify_copy;
806		for (i = 1; i < nwords; i++)
807			csum += words[i];
808	} while (csum != words[0]);
809
810	return 1;
811}
812
813int vnic_dev_init(struct vnic_dev *vdev, int arg)
814{
815	u64 a0 = (u32)arg, a1 = 0;
816	int wait = 1000;
817	int r = 0;
818
819	if (vnic_dev_capable(vdev, CMD_INIT))
820		r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
821	else {
822		vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
823		if (a0 & CMD_INITF_DEFAULT_MAC) {
824			/* Emulate these for old CMD_INIT_v1 which
825			 * didn't pass a0 so no CMD_INITF_*.
826			 */
827			vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
828			vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
829		}
830	}
831	return r;
832}
833
834void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev)
835{
836	/* Default: hardware intr coal timer is in units of 1.5 usecs */
837	vdev->intr_coal_timer_info.mul = 2;
838	vdev->intr_coal_timer_info.div = 3;
839	vdev->intr_coal_timer_info.max_usec =
840		vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff);
841}
842
843int vnic_dev_link_status(struct vnic_dev *vdev)
844{
845	if (!vnic_dev_notify_ready(vdev))
846		return 0;
847
848	return vdev->notify_copy.link_state;
849}
850
851u32 vnic_dev_port_speed(struct vnic_dev *vdev)
852{
853	if (!vnic_dev_notify_ready(vdev))
854		return 0;
855
856	return vdev->notify_copy.port_speed;
857}
858
859u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec)
860{
861	return (usec * vdev->intr_coal_timer_info.mul) /
862		vdev->intr_coal_timer_info.div;
863}
864
865u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles)
866{
867	return (hw_cycles * vdev->intr_coal_timer_info.div) /
868		vdev->intr_coal_timer_info.mul;
869}
870
871u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev)
872{
873	return vdev->intr_coal_timer_info.max_usec;
874}
875
876u32 vnic_dev_mtu(struct vnic_dev *vdev)
877{
878	if (!vnic_dev_notify_ready(vdev))
879		return 0;
880
881	return vdev->notify_copy.mtu;
882}
883
884void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
885        enum vnic_dev_intr_mode intr_mode)
886{
887	vdev->intr_mode = intr_mode;
888}
889
890enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
891        struct vnic_dev *vdev)
892{
893	return vdev->intr_mode;
894}
895
896
897int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev)
898{
899	char name[NAME_MAX];
900	static u32 instance;
901	struct enic_softc *softc;
902
903	softc = vdev->softc;
904
905	snprintf((char *)name, sizeof(name), "vnic_stats-%u", instance++);
906	iflib_dma_alloc(softc->ctx, sizeof(struct vnic_stats), &vdev->stats_res, 0);
907	vdev->stats = (struct vnic_stats *)vdev->stats_res.idi_vaddr;
908	return vdev->stats == NULL ? -ENOMEM : 0;
909}
910
911/*
912 * Initialize for up to VNIC_MAX_FLOW_COUNTERS
913 */
914int vnic_dev_alloc_counter_mem(struct vnic_dev *vdev)
915{
916	char name[NAME_MAX];
917	static u32 instance;
918	struct enic_softc *softc;
919
920	softc = vdev->softc;
921
922	snprintf((char *)name, sizeof(name), "vnic_flow_ctrs-%u", instance++);
923	iflib_dma_alloc(softc->ctx, sizeof(struct vnic_counter_counts) * VNIC_MAX_FLOW_COUNTERS, &vdev->flow_counters_res, 0);
924	vdev->flow_counters = (struct vnic_counter_counts *)vdev->flow_counters_res.idi_vaddr;
925	vdev->flow_counters_dma_active = 0;
926	return vdev->flow_counters == NULL ? -ENOMEM : 0;
927}
928
929struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
930    struct enic_bar_info *mem, unsigned int num_bars)
931{
932	if (vnic_dev_discover_res(vdev, NULL, num_bars))
933		goto err_out;
934
935	vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
936	if (!vdev->devcmd)
937		goto err_out;
938
939	return vdev;
940
941err_out:
942	return NULL;
943}
944
945/*
946 *  vnic_dev_classifier: Add/Delete classifier entries
947 *  @vdev: vdev of the device
948 *  @cmd: CLSF_ADD for Add filter
949 *        CLSF_DEL for Delete filter
950 *  @entry: In case of ADD filter, the caller passes the RQ number in this
951 *          variable.
952 *          This function stores the filter_id returned by the
953 *          firmware in the same variable before return;
954 *
955 *          In case of DEL filter, the caller passes the RQ number. Return
956 *          value is irrelevant.
957 * @data: filter data
958 * @action: action data
959 */
960
961int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config)
962{
963	u64 a0 = overlay;
964	u64 a1 = config;
965	int wait = 1000;
966
967	return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait);
968}
969
970int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
971				 u16 vxlan_udp_port_number)
972{
973	u64 a1 = vxlan_udp_port_number;
974	u64 a0 = overlay;
975	int wait = 1000;
976
977	return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait);
978}
979
980int vnic_dev_capable_vxlan(struct vnic_dev *vdev)
981{
982	u64 a0 = VIC_FEATURE_VXLAN;
983	u64 a1 = 0;
984	int wait = 1000;
985	int ret;
986
987	ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait);
988	/* 1 if the NIC can do VXLAN for both IPv4 and IPv6 with multiple WQs */
989	return ret == 0 &&
990		(a1 & (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ)) ==
991		(FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ);
992}
993
994bool vnic_dev_counter_alloc(struct vnic_dev *vdev, uint32_t *idx)
995{
996	u64 a0 = 0;
997	u64 a1 = 0;
998	int wait = 1000;
999
1000	if (vnic_dev_cmd(vdev, CMD_COUNTER_ALLOC, &a0, &a1, wait))
1001		return false;
1002	*idx = (uint32_t)a0;
1003	return true;
1004}
1005
1006bool vnic_dev_counter_free(struct vnic_dev *vdev, uint32_t idx)
1007{
1008	u64 a0 = idx;
1009	u64 a1 = 0;
1010	int wait = 1000;
1011
1012	return vnic_dev_cmd(vdev, CMD_COUNTER_FREE, &a0, &a1,
1013			    wait) == 0;
1014}
1015
1016bool vnic_dev_counter_query(struct vnic_dev *vdev, uint32_t idx,
1017			    bool reset, uint64_t *packets, uint64_t *bytes)
1018{
1019	u64 a0 = idx;
1020	u64 a1 = reset ? 1 : 0;
1021	int wait = 1000;
1022
1023	if (reset) {
1024		/* query/reset returns updated counters */
1025		if (vnic_dev_cmd(vdev, CMD_COUNTER_QUERY, &a0, &a1, wait))
1026			return false;
1027		*packets = a0;
1028		*bytes = a1;
1029	} else {
1030		/* Get values DMA'd from the adapter */
1031		*packets = vdev->flow_counters[idx].vcc_packets;
1032		*bytes = vdev->flow_counters[idx].vcc_bytes;
1033	}
1034	return true;
1035}
1036
1037device_t dev_from_vnic_dev(struct vnic_dev *vdev) {
1038	return (vdev->softc->dev);
1039}
1040