fmn.c revision 208165
1/*-
2 * Copyright (c) 2003-2009 RMI Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of RMI Corporation, nor the names of its contributors,
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * RMI_BSD */
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/mips/rmi/on_chip.c 208165 2010-05-16 19:43:48Z rrs $");
32#include <sys/types.h>
33#include <sys/systm.h>
34#include <sys/param.h>
35#include <sys/lock.h>
36#include <sys/mutex.h>
37#include <sys/proc.h>
38#include <sys/limits.h>
39#include <sys/bus.h>
40
41#include <sys/ktr.h>
42#include <sys/kernel.h>
43#include <sys/kthread.h>
44#include <sys/proc.h>
45#include <sys/resourcevar.h>
46#include <sys/sched.h>
47#include <sys/unistd.h>
48#include <sys/sysctl.h>
49#include <sys/malloc.h>
50
51#include <machine/reg.h>
52#include <machine/cpu.h>
53#include <machine/hwfunc.h>
54#include <machine/mips_opcode.h>
55
56#include <machine/param.h>
57#include <machine/intr_machdep.h>
58#include <mips/rmi/interrupt.h>
59#include <mips/rmi/msgring.h>
60#include <mips/rmi/iomap.h>
61#include <mips/rmi/debug.h>
62#include <mips/rmi/pic.h>
63#include <mips/rmi/board.h>
64
65void
66disable_msgring_int(void *arg);
67void
68enable_msgring_int(void *arg);
69
70/* definitions */
71struct tx_stn_handler {
72	void (*action) (int, int, int, int, struct msgrng_msg *, void *);
73	void *dev_id;
74};
75
76struct msgring_ithread {
77	struct thread *i_thread;
78	u_int i_pending;
79	u_int i_flags;
80	int i_cpu;
81};
82
83struct msgring_ithread *msgring_ithreads[MAXCPU];
84
85/* globals */
86static struct tx_stn_handler tx_stn_handlers[MAX_TX_STNS];
87
88#define MSGRNG_CC_INIT_CPU_DEST(dest, counter) \
89do { \
90     msgrng_write_cc(MSGRNG_CC_##dest##_REG, counter[dest][0], 0 ); \
91     msgrng_write_cc(MSGRNG_CC_##dest##_REG, counter[dest][1], 1 ); \
92     msgrng_write_cc(MSGRNG_CC_##dest##_REG, counter[dest][2], 2 ); \
93     msgrng_write_cc(MSGRNG_CC_##dest##_REG, counter[dest][3], 3 ); \
94     msgrng_write_cc(MSGRNG_CC_##dest##_REG, counter[dest][4], 4 ); \
95     msgrng_write_cc(MSGRNG_CC_##dest##_REG, counter[dest][5], 5 ); \
96     msgrng_write_cc(MSGRNG_CC_##dest##_REG, counter[dest][6], 6 ); \
97     msgrng_write_cc(MSGRNG_CC_##dest##_REG, counter[dest][7], 7 ); \
98} while(0)
99
100
101/* make this a read/write spinlock */
102static struct mtx msgrng_lock;
103static int msgring_int_enabled;
104struct mtx xlr_pic_lock;
105
106static int msgring_pop_num_buckets;
107static uint32_t msgring_pop_bucket_mask;
108static int msgring_int_type;
109static int msgring_watermark_count;
110static uint32_t msgring_thread_mask;
111
112uint32_t msgrng_msg_cycles = 0;
113
114void xlr_msgring_handler(struct trapframe *);
115
116void
117xlr_msgring_cpu_init(void)
118{
119	struct stn_cc *cc_config;
120	struct bucket_size *bucket_sizes;
121	int id;
122	unsigned long flags;
123
124	KASSERT(xlr_thr_id() == 0,
125		("xlr_msgring_cpu_init from non-zero thread\n"));
126
127	id = xlr_core_id();
128
129	bucket_sizes = xlr_board_info.bucket_sizes;
130	cc_config = xlr_board_info.credit_configs[id];
131
132	msgrng_flags_save(flags);
133
134	/*
135	 * Message Stations are shared among all threads in a cpu core
136	 * Assume, thread 0 on all cores are always active when more than 1
137	 * thread is active in a core
138	 */
139	msgrng_write_bucksize(0, bucket_sizes->bucket[id * 8 + 0]);
140	msgrng_write_bucksize(1, bucket_sizes->bucket[id * 8 + 1]);
141	msgrng_write_bucksize(2, bucket_sizes->bucket[id * 8 + 2]);
142	msgrng_write_bucksize(3, bucket_sizes->bucket[id * 8 + 3]);
143	msgrng_write_bucksize(4, bucket_sizes->bucket[id * 8 + 4]);
144	msgrng_write_bucksize(5, bucket_sizes->bucket[id * 8 + 5]);
145	msgrng_write_bucksize(6, bucket_sizes->bucket[id * 8 + 6]);
146	msgrng_write_bucksize(7, bucket_sizes->bucket[id * 8 + 7]);
147
148	MSGRNG_CC_INIT_CPU_DEST(0, cc_config->counters);
149	MSGRNG_CC_INIT_CPU_DEST(1, cc_config->counters);
150	MSGRNG_CC_INIT_CPU_DEST(2, cc_config->counters);
151	MSGRNG_CC_INIT_CPU_DEST(3, cc_config->counters);
152	MSGRNG_CC_INIT_CPU_DEST(4, cc_config->counters);
153	MSGRNG_CC_INIT_CPU_DEST(5, cc_config->counters);
154	MSGRNG_CC_INIT_CPU_DEST(6, cc_config->counters);
155	MSGRNG_CC_INIT_CPU_DEST(7, cc_config->counters);
156	MSGRNG_CC_INIT_CPU_DEST(8, cc_config->counters);
157	MSGRNG_CC_INIT_CPU_DEST(9, cc_config->counters);
158	MSGRNG_CC_INIT_CPU_DEST(10, cc_config->counters);
159	MSGRNG_CC_INIT_CPU_DEST(11, cc_config->counters);
160	MSGRNG_CC_INIT_CPU_DEST(12, cc_config->counters);
161	MSGRNG_CC_INIT_CPU_DEST(13, cc_config->counters);
162	MSGRNG_CC_INIT_CPU_DEST(14, cc_config->counters);
163	MSGRNG_CC_INIT_CPU_DEST(15, cc_config->counters);
164
165	msgrng_flags_restore(flags);
166}
167
168void
169xlr_msgring_config(void)
170{
171	msgring_int_type = 0x02;
172	msgring_pop_num_buckets = 8;
173	msgring_pop_bucket_mask = 0xff;
174
175	msgring_watermark_count = 1;
176	msgring_thread_mask = 0x01;
177}
178
179void
180xlr_msgring_handler(struct trapframe *tf)
181{
182	unsigned long mflags;
183	int bucket = 0;
184	int size = 0, code = 0, rx_stid = 0, tx_stid = 0;
185	struct msgrng_msg msg;
186	unsigned int bucket_empty_bm = 0;
187	unsigned int status = 0;
188
189	/* TODO: not necessary to disable preemption */
190	msgrng_flags_save(mflags);
191
192	/* First Drain all the high priority messages */
193	for (;;) {
194		bucket_empty_bm = (msgrng_read_status() >> 24) & msgring_pop_bucket_mask;
195
196		/* all buckets empty, break */
197		if (bucket_empty_bm == msgring_pop_bucket_mask)
198			break;
199
200		for (bucket = 0; bucket < msgring_pop_num_buckets; bucket++) {
201			if ((bucket_empty_bm & (1 << bucket)) /* empty */ )
202				continue;
203
204			status = message_receive(bucket, &size, &code, &rx_stid, &msg);
205			if (status)
206				continue;
207
208			tx_stid = xlr_board_info.msgmap[rx_stid];
209
210			if (!tx_stn_handlers[tx_stid].action) {
211				printf("[%s]: No Handler for message from stn_id=%d, bucket=%d, "
212				    "size=%d, msg0=%llx, dropping message\n",
213				    __FUNCTION__, tx_stid, bucket, size, msg.msg0);
214			} else {
215				//printf("[%s]: rx_stid = %d\n", __FUNCTION__, rx_stid);
216				msgrng_flags_restore(mflags);
217				(*tx_stn_handlers[tx_stid].action) (bucket, size, code, rx_stid,
218				    &msg, tx_stn_handlers[tx_stid].dev_id);
219				msgrng_flags_save(mflags);
220			}
221		}
222	}
223
224	xlr_set_counter(MSGRNG_EXIT_STATUS, msgrng_read_status());
225
226	msgrng_flags_restore(mflags);
227}
228
229void
230enable_msgring_int(void *arg)
231{
232	unsigned long mflags = 0;
233
234	msgrng_access_save(&msgrng_lock, mflags);
235	/* enable the message ring interrupts */
236	msgrng_write_config((msgring_watermark_count << 24) | (IRQ_MSGRING << 16)
237	    | (msgring_thread_mask << 8) | msgring_int_type);
238	msgrng_access_restore(&msgrng_lock, mflags);
239}
240
241void
242disable_msgring_int(void *arg)
243{
244	unsigned long mflags = 0;
245	uint32_t config;
246
247	msgrng_access_save(&msgrng_lock, mflags);
248	config = msgrng_read_config();
249	config &= ~0x3;
250	msgrng_write_config(config);
251	msgrng_access_restore(&msgrng_lock, mflags);
252}
253
254static int
255msgring_process_fast_intr(void *arg)
256{
257	int cpu = PCPU_GET(cpuid);
258	volatile struct msgring_ithread *it;
259	struct thread *td;
260
261	/* wakeup an appropriate intr_thread for processing this interrupt */
262	it = (volatile struct msgring_ithread *)msgring_ithreads[cpu];
263	KASSERT(it != NULL, ("No interrupt thread on cpu %d", cpu));
264	td = it->i_thread;
265
266	/*
267	 * Interrupt thread will enable the interrupts after processing all
268	 * messages
269	 */
270	disable_msgring_int(NULL);
271	atomic_store_rel_int(&it->i_pending, 1);
272	thread_lock(td);
273	if (TD_AWAITING_INTR(td)) {
274		TD_CLR_IWAIT(td);
275		sched_add(td, SRQ_INTR);
276	}
277	thread_unlock(td);
278	return FILTER_HANDLED;
279}
280
281static void
282msgring_process(void *arg)
283{
284	volatile struct msgring_ithread *ithd;
285	struct thread *td;
286	struct proc *p;
287
288	td = curthread;
289	p = td->td_proc;
290	ithd = (volatile struct msgring_ithread *)arg;
291	KASSERT(ithd->i_thread == td,
292	    ("%s:msg_ithread and proc linkage out of sync", __func__));
293
294	/* First bind this thread to the right CPU */
295	thread_lock(td);
296	sched_bind(td, ithd->i_cpu);
297	thread_unlock(td);
298
299	atomic_store_rel_ptr((volatile uintptr_t *)&msgring_ithreads[ithd->i_cpu],
300	     (uintptr_t)arg);
301	enable_msgring_int(NULL);
302
303	while (1) {
304		while (ithd->i_pending) {
305			/*
306			 * This might need a full read and write barrier to
307			 * make sure that this write posts before any of the
308			 * memory or device accesses in the handlers.
309			 */
310			xlr_msgring_handler(NULL);
311			atomic_store_rel_int(&ithd->i_pending, 0);
312			enable_msgring_int(NULL);
313		}
314		if (!ithd->i_pending) {
315			thread_lock(td);
316			if (ithd->i_pending) {
317			  thread_unlock(td);
318			  continue;
319			}
320			sched_class(td, PRI_ITHD);
321			TD_SET_IWAIT(td);
322			mi_switch(SW_VOL, NULL);
323			thread_unlock(td);
324		}
325	}
326
327}
328
329static void
330create_msgring_thread(int cpu)
331{
332	struct msgring_ithread *ithd;
333	struct thread *td;
334	struct proc *p;
335	int error;
336
337	/* Create kernel thread for message ring interrupt processing */
338	/* Currently create one task for thread 0 of each core */
339	ithd = malloc(sizeof(struct msgring_ithread),
340	    M_DEVBUF, M_WAITOK | M_ZERO);
341	error = kproc_create(msgring_process, (void *)ithd, &p,
342	    RFSTOPPED | RFHIGHPID, 2, "msg_intr%d", cpu);
343
344	if (error)
345		panic("kproc_create() failed with %d", error);
346	td = FIRST_THREAD_IN_PROC(p);	/* XXXKSE */
347
348	ithd->i_thread = td;
349	ithd->i_pending = 0;
350	ithd->i_cpu = cpu;
351
352	thread_lock(td);
353	sched_class(td, PRI_ITHD);
354	sched_add(td, SRQ_INTR);
355	thread_unlock(td);
356	CTR2(KTR_INTR, "%s: created %s", __func__, ithd_name[cpu]);
357}
358
359int
360register_msgring_handler(int major,
361    void (*action) (int, int, int, int, struct msgrng_msg *, void *),
362    void *dev_id)
363{
364	void *cookie;		/* FIXME - use? */
365
366	if (major >= MAX_TX_STNS)
367		return 1;
368
369	//dbg_msg("major=%d, action=%p, dev_id=%p\n", major, action, dev_id);
370
371	if (rmi_spin_mutex_safe)
372	  mtx_lock_spin(&msgrng_lock);
373	tx_stn_handlers[major].action = action;
374	tx_stn_handlers[major].dev_id = dev_id;
375	if (rmi_spin_mutex_safe)
376	  mtx_unlock_spin(&msgrng_lock);
377
378	if (xlr_test_and_set(&msgring_int_enabled)) {
379		create_msgring_thread(0);
380		cpu_establish_hardintr("msgring", (driver_filter_t *) msgring_process_fast_intr,
381			NULL, NULL, IRQ_MSGRING,
382			INTR_TYPE_NET | INTR_FAST, &cookie);
383	}
384	return 0;
385}
386
387static void
388pic_init(void)
389{
390	xlr_reg_t *mmio = xlr_io_mmio(XLR_IO_PIC_OFFSET);
391	int i = 0;
392	int level;
393
394	dbg_msg("Initializing PIC...\n");
395	for (i = 0; i < PIC_NUM_IRTS; i++) {
396
397		level = PIC_IRQ_IS_EDGE_TRIGGERED(i);
398
399		/* Bind all PIC irqs to cpu 0 */
400		xlr_write_reg(mmio, PIC_IRT_0_BASE + i, 0x01);
401
402		/*
403		 * Use local scheduling and high polarity for all IRTs
404		 * Invalidate all IRTs, by default
405		 */
406		xlr_write_reg(mmio, PIC_IRT_1_BASE + i, (level << 30) | (1 << 6) |
407		    (PIC_IRQ_BASE + i));
408	}
409	dbg_msg("PIC init now done\n");
410}
411
412void
413on_chip_init(void)
414{
415	/* Set xlr_io_base to the run time value */
416	mtx_init(&msgrng_lock, "msgring", NULL, MTX_SPIN | MTX_RECURSE);
417	mtx_init(&xlr_pic_lock, "pic", NULL, MTX_SPIN);
418
419	xlr_board_info_setup();
420
421	msgring_int_enabled = 0;
422
423	xlr_msgring_config();
424	pic_init();
425
426	xlr_msgring_cpu_init();
427}
428
429static void
430start_msgring_threads(void *arg)
431{
432	uint32_t cpu_mask;
433	int cpu;
434
435	cpu_mask = PCPU_GET(cpumask) | PCPU_GET(other_cpus);
436	for (cpu = 4; cpu < MAXCPU; cpu += 4)
437		if (cpu_mask & (1<<cpu))
438			create_msgring_thread(cpu);
439}
440
441SYSINIT(start_msgring_threads, SI_SUB_SMP, SI_ORDER_MIDDLE, start_msgring_threads, NULL);
442