1/*************************************************************************
2Copyright (c) 2003-2007  Cavium Networks (support@cavium.com). All rights
3reserved.
4
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are
8met:
9
10    * Redistributions of source code must retain the above copyright
11      notice, this list of conditions and the following disclaimer.
12
13    * Redistributions in binary form must reproduce the above
14      copyright notice, this list of conditions and the following
15      disclaimer in the documentation and/or other materials provided
16      with the distribution.
17
18    * Neither the name of Cavium Networks nor the names of
19      its contributors may be used to endorse or promote products
20      derived from this software without specific prior written
21      permission.
22
23This Software, including technical data, may be subject to U.S. export  control laws, including the U.S. Export Administration Act and its  associated regulations, and may be subject to export or import  regulations in other countries.
24
25TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
26AND WITH ALL FAULTS AND CAVIUM  NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
27*************************************************************************/
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD$");
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/bus.h>
35#include <sys/conf.h>
36#include <sys/endian.h>
37#include <sys/kernel.h>
38#include <sys/rman.h>
39#include <sys/mbuf.h>
40#include <sys/socket.h>
41#include <sys/module.h>
42#include <sys/smp.h>
43#include <sys/taskqueue.h>
44
45#include <net/ethernet.h>
46#include <net/if.h>
47#include <net/if_var.h>
48#include <net/if_types.h>
49
50#include "wrapper-cvmx-includes.h"
51#include "ethernet-headers.h"
52
53#include "octebusvar.h"
54
55/*
56 * XXX/juli
57 * Convert 0444 to tunables, 0644 to sysctls.
58 */
59#if defined(CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS) && CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS
60int num_packet_buffers = CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS;
61#else
62int num_packet_buffers = 1024;
63#endif
64TUNABLE_INT("hw.octe.num_packet_buffers", &num_packet_buffers);
65/*
66		 "\t\tNumber of packet buffers to allocate and store in the\n"
67		 "\t\tFPA. By default, 1024 packet buffers are used unless\n"
68		 "\t\tCONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS is defined." */
69
70int pow_receive_group = 15;
71TUNABLE_INT("hw.octe.pow_receive_group", &pow_receive_group);
72/*
73		 "\t\tPOW group to receive packets from. All ethernet hardware\n"
74		 "\t\twill be configured to send incomming packets to this POW\n"
75		 "\t\tgroup. Also any other software can submit packets to this\n"
76		 "\t\tgroup for the kernel to process." */
77
78/**
79 * Periodic timer to check auto negotiation
80 */
81static struct callout cvm_oct_poll_timer;
82
83/**
84 * Array of every ethernet device owned by this driver indexed by
85 * the ipd input port number.
86 */
87struct ifnet *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
88
89/**
90 * Task to handle link status changes.
91 */
92static struct taskqueue *cvm_oct_link_taskq;
93
94/*
95 * Number of buffers in output buffer pool.
96 */
97static int cvm_oct_num_output_buffers;
98
99/**
100 * Function to update link status.
101 */
102static void cvm_oct_update_link(void *context, int pending)
103{
104	cvm_oct_private_t *priv = (cvm_oct_private_t *)context;
105	struct ifnet *ifp = priv->ifp;
106	cvmx_helper_link_info_t link_info;
107
108	link_info.u64 = priv->link_info;
109
110	if (link_info.s.link_up) {
111		if_link_state_change(ifp, LINK_STATE_UP);
112		DEBUGPRINT("%s: %u Mbps %s duplex, port %2d, queue %2d\n",
113			   if_name(ifp), link_info.s.speed,
114			   (link_info.s.full_duplex) ? "Full" : "Half",
115			   priv->port, priv->queue);
116	} else {
117		if_link_state_change(ifp, LINK_STATE_DOWN);
118		DEBUGPRINT("%s: Link down\n", if_name(ifp));
119	}
120	priv->need_link_update = 0;
121}
122
123/**
124 * Periodic timer tick for slow management operations
125 *
126 * @param arg    Device to check
127 */
128static void cvm_do_timer(void *arg)
129{
130	static int port;
131	static int updated;
132	if (port < CVMX_PIP_NUM_INPUT_PORTS) {
133		if (cvm_oct_device[port]) {
134			int queues_per_port;
135			int qos;
136			cvm_oct_private_t *priv = (cvm_oct_private_t *)cvm_oct_device[port]->if_softc;
137
138			cvm_oct_common_poll(priv->ifp);
139			if (priv->need_link_update) {
140				updated++;
141				taskqueue_enqueue(cvm_oct_link_taskq, &priv->link_task);
142			}
143
144			queues_per_port = cvmx_pko_get_num_queues(port);
145			/* Drain any pending packets in the free list */
146			for (qos = 0; qos < queues_per_port; qos++) {
147				if (_IF_QLEN(&priv->tx_free_queue[qos]) > 0) {
148					IF_LOCK(&priv->tx_free_queue[qos]);
149					while (_IF_QLEN(&priv->tx_free_queue[qos]) > cvmx_fau_fetch_and_add32(priv->fau+qos*4, 0)) {
150						struct mbuf *m;
151
152						_IF_DEQUEUE(&priv->tx_free_queue[qos], m);
153						m_freem(m);
154					}
155					IF_UNLOCK(&priv->tx_free_queue[qos]);
156
157					/*
158					 * XXX locking!
159					 */
160					priv->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
161				}
162			}
163		}
164		port++;
165		/* Poll the next port in a 50th of a second.
166		   This spreads the polling of ports out a little bit */
167		callout_reset(&cvm_oct_poll_timer, hz / 50, cvm_do_timer, NULL);
168	} else {
169		port = 0;
170		/* If any updates were made in this run, continue iterating at
171		 * 1/50th of a second, so that if a link has merely gone down
172		 * temporarily (e.g. because of interface reinitialization) it
173		 * will not be forced to stay down for an entire second.
174		 */
175		if (updated > 0) {
176			updated = 0;
177			callout_reset(&cvm_oct_poll_timer, hz / 50, cvm_do_timer, NULL);
178		} else {
179			/* All ports have been polled. Start the next iteration through
180			   the ports in one second */
181			callout_reset(&cvm_oct_poll_timer, hz, cvm_do_timer, NULL);
182		}
183	}
184}
185
186/**
187 * Configure common hardware for all interfaces
188 */
189static void cvm_oct_configure_common_hw(device_t bus)
190{
191	struct octebus_softc *sc;
192	int pko_queues;
193	int error;
194	int rid;
195
196        sc = device_get_softc(bus);
197
198	/* Setup the FPA */
199	cvmx_fpa_enable();
200	cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
201			     num_packet_buffers);
202	cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
203			     num_packet_buffers);
204	if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL) {
205		/*
206		 * If the FPA uses different pools for output buffers and
207		 * packets, size the output buffer pool based on the number
208		 * of PKO queues.
209		 */
210		if (OCTEON_IS_MODEL(OCTEON_CN38XX))
211			pko_queues = 128;
212		else if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
213			pko_queues = 32;
214		else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
215			pko_queues = 32;
216		else
217			pko_queues = 256;
218
219		cvm_oct_num_output_buffers = 4 * pko_queues;
220		cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
221				     CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE,
222				     cvm_oct_num_output_buffers);
223	}
224
225	if (USE_RED)
226		cvmx_helper_setup_red(num_packet_buffers/4,
227				      num_packet_buffers/8);
228
229	/* Enable the MII interface */
230	if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM)
231		cvmx_write_csr(CVMX_SMI_EN, 1);
232
233	/* Register an IRQ hander for to receive POW interrupts */
234        rid = 0;
235        sc->sc_rx_irq = bus_alloc_resource(bus, SYS_RES_IRQ, &rid,
236					   OCTEON_IRQ_WORKQ0 + pow_receive_group,
237					   OCTEON_IRQ_WORKQ0 + pow_receive_group,
238					   1, RF_ACTIVE);
239        if (sc->sc_rx_irq == NULL) {
240                device_printf(bus, "could not allocate workq irq");
241		return;
242        }
243
244        error = bus_setup_intr(bus, sc->sc_rx_irq, INTR_TYPE_NET | INTR_MPSAFE,
245			       cvm_oct_do_interrupt, NULL, cvm_oct_device,
246			       &sc->sc_rx_intr_cookie);
247        if (error != 0) {
248                device_printf(bus, "could not setup workq irq");
249		return;
250        }
251
252
253#ifdef SMP
254	{
255		cvmx_ciu_intx0_t en;
256		int core;
257
258		CPU_FOREACH(core) {
259			if (core == PCPU_GET(cpuid))
260				continue;
261
262			en.u64 = cvmx_read_csr(CVMX_CIU_INTX_EN0(core*2));
263			en.s.workq |= (1<<pow_receive_group);
264			cvmx_write_csr(CVMX_CIU_INTX_EN0(core*2), en.u64);
265		}
266	}
267#endif
268}
269
270
271/**
272 * Free a work queue entry received in a intercept callback.
273 *
274 * @param work_queue_entry
275 *               Work queue entry to free
276 * @return Zero on success, Negative on failure.
277 */
278int cvm_oct_free_work(void *work_queue_entry)
279{
280	cvmx_wqe_t *work = work_queue_entry;
281
282	int segments = work->word2.s.bufs;
283	cvmx_buf_ptr_t segment_ptr = work->packet_ptr;
284
285	while (segments--) {
286		cvmx_buf_ptr_t next_ptr = *(cvmx_buf_ptr_t *)cvmx_phys_to_ptr(segment_ptr.s.addr-8);
287		if (__predict_false(!segment_ptr.s.i))
288			cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr), segment_ptr.s.pool, DONT_WRITEBACK(CVMX_FPA_PACKET_POOL_SIZE/128));
289		segment_ptr = next_ptr;
290	}
291	cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
292
293	return 0;
294}
295
296
297/**
298 * Module/ driver initialization. Creates the linux network
299 * devices.
300 *
301 * @return Zero on success
302 */
303int cvm_oct_init_module(device_t bus)
304{
305	device_t dev;
306	int ifnum;
307	int num_interfaces;
308	int interface;
309	int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
310	int qos;
311
312	cvm_oct_rx_initialize();
313	cvm_oct_configure_common_hw(bus);
314
315	cvmx_helper_initialize_packet_io_global();
316
317	/* Change the input group for all ports before input is enabled */
318	num_interfaces = cvmx_helper_get_number_of_interfaces();
319	for (interface = 0; interface < num_interfaces; interface++) {
320		int num_ports = cvmx_helper_ports_on_interface(interface);
321		int port;
322
323		for (port = 0; port < num_ports; port++) {
324			cvmx_pip_prt_tagx_t pip_prt_tagx;
325			int pkind = cvmx_helper_get_ipd_port(interface, port);
326
327			pip_prt_tagx.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(pkind));
328			pip_prt_tagx.s.grp = pow_receive_group;
329			cvmx_write_csr(CVMX_PIP_PRT_TAGX(pkind), pip_prt_tagx.u64);
330		}
331	}
332
333	cvmx_helper_ipd_and_packet_input_enable();
334
335	memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
336
337	cvm_oct_link_taskq = taskqueue_create("octe link", M_NOWAIT,
338	    taskqueue_thread_enqueue, &cvm_oct_link_taskq);
339	taskqueue_start_threads(&cvm_oct_link_taskq, 1, PI_NET,
340	    "octe link taskq");
341
342	/* Initialize the FAU used for counting packet buffers that need to be freed */
343	cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
344
345	ifnum = 0;
346	num_interfaces = cvmx_helper_get_number_of_interfaces();
347	for (interface = 0; interface < num_interfaces; interface++) {
348		cvmx_helper_interface_mode_t imode = cvmx_helper_interface_get_mode(interface);
349		int num_ports = cvmx_helper_ports_on_interface(interface);
350		int port;
351
352		for (port = cvmx_helper_get_ipd_port(interface, 0);
353		     port < cvmx_helper_get_ipd_port(interface, num_ports);
354		     ifnum++, port++) {
355			cvm_oct_private_t *priv;
356			struct ifnet *ifp;
357
358			dev = BUS_ADD_CHILD(bus, 0, "octe", ifnum);
359			if (dev != NULL)
360				ifp = if_alloc(IFT_ETHER);
361			if (dev == NULL || ifp == NULL) {
362				printf("Failed to allocate ethernet device for interface %d port %d\n", interface, port);
363				continue;
364			}
365
366			/* Initialize the device private structure. */
367			device_probe(dev);
368			priv = device_get_softc(dev);
369			priv->dev = dev;
370			priv->ifp = ifp;
371			priv->imode = imode;
372			priv->port = port;
373			priv->queue = cvmx_pko_get_base_queue(priv->port);
374			priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
375			for (qos = 0; qos < cvmx_pko_get_num_queues(port); qos++)
376				cvmx_fau_atomic_write32(priv->fau+qos*4, 0);
377			TASK_INIT(&priv->link_task, 0, cvm_oct_update_link, priv);
378
379			switch (priv->imode) {
380
381			/* These types don't support ports to IPD/PKO */
382			case CVMX_HELPER_INTERFACE_MODE_DISABLED:
383			case CVMX_HELPER_INTERFACE_MODE_PCIE:
384			case CVMX_HELPER_INTERFACE_MODE_PICMG:
385				break;
386
387			case CVMX_HELPER_INTERFACE_MODE_NPI:
388				priv->init = cvm_oct_common_init;
389				priv->uninit = cvm_oct_common_uninit;
390				device_set_desc(dev, "Cavium Octeon NPI Ethernet");
391				break;
392
393			case CVMX_HELPER_INTERFACE_MODE_XAUI:
394				priv->init = cvm_oct_xaui_init;
395				priv->uninit = cvm_oct_common_uninit;
396				device_set_desc(dev, "Cavium Octeon XAUI Ethernet");
397				break;
398
399			case CVMX_HELPER_INTERFACE_MODE_LOOP:
400				priv->init = cvm_oct_common_init;
401				priv->uninit = cvm_oct_common_uninit;
402				device_set_desc(dev, "Cavium Octeon LOOP Ethernet");
403				break;
404
405			case CVMX_HELPER_INTERFACE_MODE_SGMII:
406				priv->init = cvm_oct_sgmii_init;
407				priv->uninit = cvm_oct_common_uninit;
408				device_set_desc(dev, "Cavium Octeon SGMII Ethernet");
409				break;
410
411			case CVMX_HELPER_INTERFACE_MODE_SPI:
412				priv->init = cvm_oct_spi_init;
413				priv->uninit = cvm_oct_spi_uninit;
414				device_set_desc(dev, "Cavium Octeon SPI Ethernet");
415				break;
416
417			case CVMX_HELPER_INTERFACE_MODE_RGMII:
418				priv->init = cvm_oct_rgmii_init;
419				priv->uninit = cvm_oct_rgmii_uninit;
420				device_set_desc(dev, "Cavium Octeon RGMII Ethernet");
421				break;
422
423			case CVMX_HELPER_INTERFACE_MODE_GMII:
424				priv->init = cvm_oct_rgmii_init;
425				priv->uninit = cvm_oct_rgmii_uninit;
426				device_set_desc(dev, "Cavium Octeon GMII Ethernet");
427				break;
428			}
429
430			ifp->if_softc = priv;
431
432			if (!priv->init) {
433				printf("octe%d: unsupported device type interface %d, port %d\n",
434				       ifnum, interface, priv->port);
435				if_free(ifp);
436			} else if (priv->init(ifp) != 0) {
437				printf("octe%d: failed to register device for interface %d, port %d\n",
438				       ifnum, interface, priv->port);
439				if_free(ifp);
440			} else {
441				cvm_oct_device[priv->port] = ifp;
442				fau -= cvmx_pko_get_num_queues(priv->port) * sizeof(uint32_t);
443			}
444		}
445	}
446
447	if (INTERRUPT_LIMIT) {
448		/* Set the POW timer rate to give an interrupt at most INTERRUPT_LIMIT times per second */
449		cvmx_write_csr(CVMX_POW_WQ_INT_PC, cvmx_clock_get_rate(CVMX_CLOCK_CORE)/(INTERRUPT_LIMIT*16*256)<<8);
450
451		/* Enable POW timer interrupt. It will count when there are packets available */
452		cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0x1ful<<24);
453	} else {
454		/* Enable POW interrupt when our port has at least one packet */
455		cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0x1001);
456	}
457
458	callout_init(&cvm_oct_poll_timer, 1);
459	callout_reset(&cvm_oct_poll_timer, hz, cvm_do_timer, NULL);
460
461	return 0;
462}
463
464
465/**
466 * Module / driver shutdown
467 *
468 * @return Zero on success
469 */
470void cvm_oct_cleanup_module(device_t bus)
471{
472	int port;
473	struct octebus_softc *sc = device_get_softc(bus);
474
475	/* Disable POW interrupt */
476	cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
477
478	/* Free the interrupt handler */
479	bus_teardown_intr(bus, sc->sc_rx_irq, sc->sc_rx_intr_cookie);
480
481	callout_stop(&cvm_oct_poll_timer);
482	cvm_oct_rx_shutdown();
483
484	cvmx_helper_shutdown_packet_io_global();
485
486	/* Free the ethernet devices */
487	for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
488		if (cvm_oct_device[port]) {
489			cvm_oct_tx_shutdown(cvm_oct_device[port]);
490#if 0
491			unregister_netdev(cvm_oct_device[port]);
492			kfree(cvm_oct_device[port]);
493#else
494			panic("%s: need to detach and free interface.", __func__);
495#endif
496			cvm_oct_device[port] = NULL;
497		}
498	}
499	/* Free the HW pools */
500	cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE, num_packet_buffers);
501	cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE, num_packet_buffers);
502
503	if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
504		cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL, CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, cvm_oct_num_output_buffers);
505
506	/* Disable FPA, all buffers are free, not done by helper shutdown. */
507	cvmx_fpa_disable();
508}
509