1/*
2 *   BSD LICENSE
3 *
4 *   Copyright(c) 2017 Cavium, Inc.. All rights reserved.
5 *   All rights reserved.
6 *
7 *   Redistribution and use in source and binary forms, with or without
8 *   modification, are permitted provided that the following conditions
9 *   are met:
10 *
11 *     * Redistributions of source code must retain the above copyright
12 *       notice, this list of conditions and the following disclaimer.
13 *     * Redistributions in binary form must reproduce the above copyright
14 *       notice, this list of conditions and the following disclaimer in
15 *       the documentation and/or other materials provided with the
16 *       distribution.
17 *     * Neither the name of Cavium, Inc. nor the names of its
18 *       contributors may be used to endorse or promote products derived
19 *       from this software without specific prior written permission.
20 *
21 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 *   OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include "lio_bsd.h"
35#include "lio_common.h"
36#include "lio_droq.h"
37#include "lio_iq.h"
38#include "lio_response_manager.h"
39#include "lio_device.h"
40#include "lio_network.h"
41#include "lio_ctrl.h"
42#include "cn23xx_pf_device.h"
43#include "lio_image.h"
44#include "lio_ioctl.h"
45#include "lio_main.h"
46#include "lio_rxtx.h"
47
48static int	lio_set_rx_csum(if_t ifp, uint32_t data);
49static int	lio_set_tso4(if_t ifp);
50static int	lio_set_tso6(if_t ifp);
51static int	lio_set_lro(if_t ifp);
52static int	lio_change_mtu(if_t ifp, int new_mtu);
53static int	lio_set_mcast_list(if_t ifp);
54static inline enum	lio_ifflags lio_get_new_flags(if_t ifp);
55
56static inline bool
57lio_is_valid_ether_addr(const uint8_t *addr)
58{
59
60	return (!(0x01 & addr[0]) && !((addr[0] + addr[1] + addr[2] + addr[3] +
61					addr[4] + addr[5]) == 0x00));
62}
63
64static int
65lio_change_dev_flags(if_t ifp)
66{
67	struct lio_ctrl_pkt	nctrl;
68	struct lio		*lio = if_getsoftc(ifp);
69	struct octeon_device	*oct = lio->oct_dev;
70	int ret = 0;
71
72	bzero(&nctrl, sizeof(struct lio_ctrl_pkt));
73
74	/* Create a ctrl pkt command to be sent to core app. */
75	nctrl.ncmd.cmd64 = 0;
76	nctrl.ncmd.s.cmd = LIO_CMD_CHANGE_DEVFLAGS;
77	nctrl.ncmd.s.param1 = lio_get_new_flags(ifp);
78	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
79	nctrl.lio = lio;
80	nctrl.cb_fn = lio_ctrl_cmd_completion;
81
82	ret = lio_send_ctrl_pkt(oct, &nctrl);
83	if (ret)
84		lio_dev_err(oct, "Failed to change flags ret %d\n", ret);
85
86	return (ret);
87}
88
89/*
90 * lio_ioctl : User calls this routine for configuring
91 * the interface.
92 *
93 * return 0 on success, positive on failure
94 */
95int
96lio_ioctl(if_t ifp, u_long cmd, caddr_t data)
97{
98	struct lio	*lio = if_getsoftc(ifp);
99	struct ifreq	*ifrequest = (struct ifreq *)data;
100	int	error = 0;
101
102	switch (cmd) {
103	case SIOCSIFADDR:
104		lio_dev_dbg(lio->oct_dev, "ioctl: SIOCSIFADDR\n");
105		if_setflagbits(ifp, IFF_UP, 0);
106		error = ether_ioctl(ifp, cmd, data);
107		break;
108	case SIOCSIFMTU:
109		lio_dev_dbg(lio->oct_dev, "ioctl: SIOCSIFMTU\n");
110		error = lio_change_mtu(ifp, ifrequest->ifr_mtu);
111		break;
112	case SIOCSIFFLAGS:
113		lio_dev_dbg(lio->oct_dev, "ioctl: SIOCSIFFLAGS\n");
114		if (if_getflags(ifp) & IFF_UP) {
115			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
116				if ((if_getflags(ifp) ^ lio->if_flags) &
117				    (IFF_PROMISC | IFF_ALLMULTI))
118					error = lio_change_dev_flags(ifp);
119			} else {
120				if (!(atomic_load_acq_int(&lio->ifstate) &
121				      LIO_IFSTATE_DETACH))
122					lio_open(lio);
123			}
124		} else {
125			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
126				lio_stop(ifp);
127		}
128		lio->if_flags = if_getflags(ifp);
129		break;
130	case SIOCADDMULTI:
131		lio_dev_dbg(lio->oct_dev, "ioctl: SIOCADDMULTI\n");
132		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
133			error = lio_set_mcast_list(ifp);
134		break;
135	case SIOCDELMULTI:
136		lio_dev_dbg(lio->oct_dev, "ioctl: SIOCSIFMULTI\n");
137		break;
138	case SIOCSIFMEDIA:
139		lio_dev_dbg(lio->oct_dev, "ioctl: SIOCSIFMEDIA\n");
140	case SIOCGIFMEDIA:
141		lio_dev_dbg(lio->oct_dev, "ioctl: SIOCGIFMEDIA\n");
142	case SIOCGIFXMEDIA:
143		lio_dev_dbg(lio->oct_dev, "ioctl: SIOCGIFXMEDIA\n");
144		error = ifmedia_ioctl(ifp, ifrequest, &lio->ifmedia, cmd);
145		break;
146	case SIOCSIFCAP:
147		{
148			int	features = ifrequest->ifr_reqcap ^
149					if_getcapenable(ifp);
150
151			lio_dev_dbg(lio->oct_dev, "ioctl: SIOCSIFCAP (Set Capabilities)\n");
152
153			if (!features)
154				break;
155
156			if (features & IFCAP_TXCSUM) {
157				if_togglecapenable(ifp, IFCAP_TXCSUM);
158				if (if_getcapenable(ifp) & IFCAP_TXCSUM)
159					if_sethwassistbits(ifp, (CSUM_TCP |
160								 CSUM_UDP |
161								 CSUM_IP), 0);
162				else
163					if_sethwassistbits(ifp, 0,
164							(CSUM_TCP | CSUM_UDP |
165							 CSUM_IP));
166			}
167			if (features & IFCAP_TXCSUM_IPV6) {
168				if_togglecapenable(ifp, IFCAP_TXCSUM_IPV6);
169				if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6)
170					if_sethwassistbits(ifp, (CSUM_UDP_IPV6 |
171							   CSUM_TCP_IPV6), 0);
172				else
173					if_sethwassistbits(ifp, 0,
174							   (CSUM_UDP_IPV6 |
175							    CSUM_TCP_IPV6));
176			}
177			if (features & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
178				error |= lio_set_rx_csum(ifp, (features &
179							       (IFCAP_RXCSUM |
180							 IFCAP_RXCSUM_IPV6)));
181
182			if (features & IFCAP_TSO4)
183				error |= lio_set_tso4(ifp);
184
185			if (features & IFCAP_TSO6)
186				error |= lio_set_tso6(ifp);
187
188			if (features & IFCAP_LRO)
189				error |= lio_set_lro(ifp);
190
191			if (features & IFCAP_VLAN_HWTAGGING)
192				if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
193
194			if (features & IFCAP_VLAN_HWFILTER)
195				if_togglecapenable(ifp, IFCAP_VLAN_HWFILTER);
196
197			if (features & IFCAP_VLAN_HWTSO)
198				if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
199
200			VLAN_CAPABILITIES(ifp);
201			break;
202		}
203	default:
204		lio_dev_dbg(lio->oct_dev, "ioctl: UNKNOWN (0x%X)\n", (int)cmd);
205		error = ether_ioctl(ifp, cmd, data);
206		break;
207	}
208
209	return (error);
210}
211
212static int
213lio_set_tso4(if_t ifp)
214{
215	struct lio	*lio = if_getsoftc(ifp);
216
217	if (if_getcapabilities(ifp) & IFCAP_TSO4) {
218		if_togglecapenable(ifp, IFCAP_TSO4);
219		if (if_getcapenable(ifp) & IFCAP_TSO4)
220			if_sethwassistbits(ifp, CSUM_IP_TSO, 0);
221		else
222			if_sethwassistbits(ifp, 0, CSUM_IP_TSO);
223	} else {
224		lio_dev_info(lio->oct_dev, "TSO4 capability not supported\n");
225		return (EINVAL);
226	}
227
228	return (0);
229}
230
231static int
232lio_set_tso6(if_t ifp)
233{
234	struct lio	*lio = if_getsoftc(ifp);
235
236	if (if_getcapabilities(ifp) & IFCAP_TSO6) {
237		if_togglecapenable(ifp, IFCAP_TSO6);
238		if (if_getcapenable(ifp) & IFCAP_TSO6)
239			if_sethwassistbits(ifp, CSUM_IP6_TSO, 0);
240		else
241			if_sethwassistbits(ifp, 0, CSUM_IP6_TSO);
242	} else {
243		lio_dev_info(lio->oct_dev, "TSO6 capability not supported\n");
244		return (EINVAL);
245	}
246
247	return (0);
248}
249
250static int
251lio_set_rx_csum(if_t ifp, uint32_t data)
252{
253	struct lio	*lio = if_getsoftc(ifp);
254	int	ret = 0;
255
256	if (if_getcapabilities(ifp) & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
257		if_togglecapenable(ifp, (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6));
258
259		if (data) {
260			/* LRO requires RXCSUM */
261			if ((if_getcapabilities(ifp) & IFCAP_LRO) &&
262			    (if_getcapenable(ifp) & IFCAP_LRO)) {
263				ret = lio_set_feature(ifp, LIO_CMD_LRO_DISABLE,
264						      LIO_LROIPV4 |
265						      LIO_LROIPV6);
266				if_togglecapenable(ifp, IFCAP_LRO);
267			}
268		}
269	} else {
270		lio_dev_info(lio->oct_dev, "Rx checksum offload capability not supported\n");
271		return (ENODEV);
272	}
273
274	return ((ret) ? EINVAL : 0);
275}
276
277static int
278lio_set_lro(if_t ifp)
279{
280	struct lio	*lio = if_getsoftc(ifp);
281	int	ret = 0;
282
283	if (!(if_getcapabilities(ifp) & IFCAP_LRO)) {
284		lio_dev_info(lio->oct_dev, "LRO capability not supported\n");
285		return (ENODEV);
286	}
287
288	if ((!(if_getcapenable(ifp) & IFCAP_LRO)) &&
289	    (if_getcapenable(ifp) & IFCAP_RXCSUM) &&
290	    (if_getcapenable(ifp) & IFCAP_RXCSUM_IPV6)) {
291		if_togglecapenable(ifp, IFCAP_LRO);
292
293		if (lio_hwlro)
294			ret = lio_set_feature(ifp, LIO_CMD_LRO_ENABLE, LIO_LROIPV4 |
295					      LIO_LROIPV6);
296
297	} else if (if_getcapenable(ifp) & IFCAP_LRO) {
298		if_togglecapenable(ifp, IFCAP_LRO);
299
300		if (lio_hwlro)
301			ret = lio_set_feature(ifp, LIO_CMD_LRO_DISABLE, LIO_LROIPV4 |
302					      LIO_LROIPV6);
303	} else
304		lio_dev_info(lio->oct_dev, "LRO requires RXCSUM");
305
306	return ((ret) ? EINVAL : 0);
307}
308
309static void
310lio_mtu_ctl_callback(struct octeon_device *oct, uint32_t status, void *buf)
311{
312	struct lio_soft_command	*sc = buf;
313	volatile int		*mtu_sc_ctx;
314
315	mtu_sc_ctx = sc->ctxptr;
316
317	if (status) {
318		lio_dev_err(oct, "MTU updation ctl instruction failed. Status: %llx\n",
319			    LIO_CAST64(status));
320		*mtu_sc_ctx = -1;
321		/*
322		 * This barrier is required to be sure that the
323		 * response has been written fully.
324		 */
325		wmb();
326		return;
327	}
328
329	*mtu_sc_ctx = 1;
330
331	/*
332	 * This barrier is required to be sure that the response has been
333	 * written fully.
334	 */
335	wmb();
336}
337
338/* @param ifp is network device */
339static int
340lio_change_mtu(if_t ifp, int new_mtu)
341{
342	struct lio		*lio = if_getsoftc(ifp);
343	struct octeon_device	*oct = lio->oct_dev;
344	struct lio_soft_command	*sc;
345	union octeon_cmd	*ncmd;
346	volatile int		*mtu_sc_ctx;
347	int	retval = 0;
348
349	if (lio->mtu == new_mtu)
350		return (0);
351
352	/*
353	 * Limit the MTU to make sure the ethernet packets are between
354	 * LIO_MIN_MTU_SIZE bytes and LIO_MAX_MTU_SIZE bytes
355	 */
356	if ((new_mtu < LIO_MIN_MTU_SIZE) || (new_mtu > LIO_MAX_MTU_SIZE)) {
357		lio_dev_err(oct, "Invalid MTU: %d\n", new_mtu);
358		lio_dev_err(oct, "Valid range %d and %d\n",
359			    LIO_MIN_MTU_SIZE, LIO_MAX_MTU_SIZE);
360		return (EINVAL);
361	}
362
363	sc = lio_alloc_soft_command(oct, OCTEON_CMD_SIZE, 16,
364				    sizeof(*mtu_sc_ctx));
365	if (sc == NULL)
366		return (ENOMEM);
367
368	ncmd = (union octeon_cmd *)sc->virtdptr;
369	mtu_sc_ctx = sc->ctxptr;
370
371	*mtu_sc_ctx = 0;
372
373	ncmd->cmd64 = 0;
374	ncmd->s.cmd = LIO_CMD_CHANGE_MTU;
375	ncmd->s.param1 = new_mtu;
376
377	lio_swap_8B_data((uint64_t *)ncmd, (OCTEON_CMD_SIZE >> 3));
378
379	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
380
381	lio_prepare_soft_command(oct, sc, LIO_OPCODE_NIC,
382				 LIO_OPCODE_NIC_CMD, 0, 0, 0);
383
384	sc->callback = lio_mtu_ctl_callback;
385	sc->callback_arg = sc;
386	sc->wait_time = 5000;
387
388	retval = lio_send_soft_command(oct, sc);
389	if (retval == LIO_IQ_SEND_FAILED) {
390		lio_dev_info(oct,
391			     "Failed to send MTU update Control message\n");
392		retval = EBUSY;
393		goto mtu_updation_failed;
394	}
395
396	/*
397	 * Sleep on a wait queue till the cond flag indicates that the
398	 * response arrived or timed-out.
399	 */
400	lio_sleep_cond(oct, mtu_sc_ctx);
401
402	if (*mtu_sc_ctx < 0) {
403		retval = EBUSY;
404		goto mtu_updation_failed;
405	}
406	lio_dev_info(oct, "MTU Changed from %d to %d\n", if_getmtu(ifp),
407		     new_mtu);
408	if_setmtu(ifp, new_mtu);
409	lio->mtu = new_mtu;
410	retval = 0;			/*
411				         * this updation is make sure that LIO_IQ_SEND_STOP case
412				         * also success
413				         */
414
415mtu_updation_failed:
416	lio_free_soft_command(oct, sc);
417
418	return (retval);
419}
420
421/* @param ifp network device */
422int
423lio_set_mac(if_t ifp, uint8_t *p)
424{
425	struct lio_ctrl_pkt	nctrl;
426	struct lio		*lio = if_getsoftc(ifp);
427	struct octeon_device	*oct = lio->oct_dev;
428	int	ret = 0;
429
430	if (!lio_is_valid_ether_addr(p))
431		return (EADDRNOTAVAIL);
432
433	bzero(&nctrl, sizeof(struct lio_ctrl_pkt));
434
435	nctrl.ncmd.cmd64 = 0;
436	nctrl.ncmd.s.cmd = LIO_CMD_CHANGE_MACADDR;
437	nctrl.ncmd.s.param1 = 0;
438	nctrl.ncmd.s.more = 1;
439	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
440	nctrl.lio = lio;
441	nctrl.cb_fn = lio_ctrl_cmd_completion;
442	nctrl.wait_time = 100;
443
444	nctrl.udd[0] = 0;
445	/* The MAC Address is presented in network byte order. */
446	memcpy((uint8_t *)&nctrl.udd[0] + 2, p, ETHER_HDR_LEN);
447
448	ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
449	if (ret < 0) {
450		lio_dev_err(oct, "MAC Address change failed\n");
451		return (ENOMEM);
452	}
453
454	memcpy(((uint8_t *)&lio->linfo.hw_addr) + 2, p, ETHER_HDR_LEN);
455
456	return (0);
457}
458
459/*
460 * \brief Converts a mask based on ifp flags
461 * @param ifp network device
462 *
463 * This routine generates a lio_ifflags mask from the ifp flags
464 * received from the OS.
465 */
466static inline enum lio_ifflags
467lio_get_new_flags(if_t ifp)
468{
469	enum lio_ifflags f = LIO_IFFLAG_UNICAST;
470
471	if (if_getflags(ifp) & IFF_PROMISC)
472		f |= LIO_IFFLAG_PROMISC;
473
474	if (if_getflags(ifp) & IFF_ALLMULTI)
475		f |= LIO_IFFLAG_ALLMULTI;
476
477	if (if_getflags(ifp) & IFF_MULTICAST) {
478		f |= LIO_IFFLAG_MULTICAST;
479
480		/*
481		 * Accept all multicast addresses if there are more than we
482		 * can handle
483		 */
484		if (if_getamcount(ifp) > LIO_MAX_MULTICAST_ADDR)
485			f |= LIO_IFFLAG_ALLMULTI;
486	}
487	if (if_getflags(ifp) & IFF_BROADCAST)
488		f |= LIO_IFFLAG_BROADCAST;
489
490	return (f);
491}
492
493static u_int
494lio_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
495{
496	uint64_t *mc = arg;
497
498	if (cnt == LIO_MAX_MULTICAST_ADDR)
499		return (0);
500
501	mc += cnt;
502	*mc = 0;
503	memcpy(((uint8_t *)mc) + 2, LLADDR(sdl), ETHER_ADDR_LEN);
504	/* no need to swap bytes */
505
506	return (1);
507}
508
509/* @param ifp network device */
510static int
511lio_set_mcast_list(if_t ifp)
512{
513	struct lio		*lio = if_getsoftc(ifp);
514	struct octeon_device	*oct = lio->oct_dev;
515	struct lio_ctrl_pkt	nctrl;
516	int	mc_count;
517	int	ret;
518
519	bzero(&nctrl, sizeof(struct lio_ctrl_pkt));
520
521	/* Create a ctrl pkt command to be sent to core app. */
522	nctrl.ncmd.cmd64 = 0;
523	nctrl.ncmd.s.cmd = LIO_CMD_SET_MULTI_LIST;
524	nctrl.ncmd.s.param1 = lio_get_new_flags(ifp);
525	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
526	nctrl.lio = lio;
527	nctrl.cb_fn = lio_ctrl_cmd_completion;
528
529	/* copy all the addresses into the udd */
530	mc_count = if_foreach_llmaddr(ifp, lio_copy_maddr, &nctrl.udd[0]);
531
532	/*
533	 * Apparently, any activity in this call from the kernel has to
534	 * be atomic. So we won't wait for response.
535	 */
536	nctrl.wait_time = 0;
537	nctrl.ncmd.s.param2 = mc_count;
538	nctrl.ncmd.s.more = mc_count;
539
540	ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
541	if (ret < 0) {
542		lio_dev_err(oct, "DEVFLAGS change failed in core (ret: 0x%x)\n",
543			    ret);
544	}
545
546	return ((ret) ? EINVAL : 0);
547}
548