fmn.c revision 211809
1/*-
2 * Copyright (c) 2003-2009 RMI Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of RMI Corporation, nor the names of its contributors,
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * RMI_BSD */
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/mips/rmi/fmn.c 211809 2010-08-25 09:53:00Z jchandra $");
32#include <sys/types.h>
33#include <sys/systm.h>
34#include <sys/param.h>
35#include <sys/lock.h>
36#include <sys/mutex.h>
37#include <sys/proc.h>
38#include <sys/limits.h>
39#include <sys/bus.h>
40
41#include <sys/ktr.h>
42#include <sys/kernel.h>
43#include <sys/kthread.h>
44#include <sys/proc.h>
45#include <sys/resourcevar.h>
46#include <sys/sched.h>
47#include <sys/unistd.h>
48#include <sys/sysctl.h>
49#include <sys/malloc.h>
50
51#include <machine/reg.h>
52#include <machine/cpu.h>
53#include <machine/hwfunc.h>
54#include <machine/mips_opcode.h>
55
56#include <machine/param.h>
57#include <machine/intr_machdep.h>
58#include <mips/rmi/interrupt.h>
59#include <mips/rmi/msgring.h>
60#include <mips/rmi/pic.h>
61#include <mips/rmi/board.h>
62
63void
64disable_msgring_int(void *arg);
65void
66enable_msgring_int(void *arg);
67
68/* definitions */
69struct tx_stn_handler {
70	void (*action) (int, int, int, int, struct msgrng_msg *, void *);
71	void *dev_id;
72};
73
74struct msgring_ithread {
75	struct thread *i_thread;
76	u_int i_pending;
77	u_int i_flags;
78	int i_cpu;
79	int i_core;
80};
81
82struct msgring_ithread *msgring_ithreads[MAXCPU];
83
84/* globals */
85static struct tx_stn_handler tx_stn_handlers[MAX_TX_STNS];
86
87#define MSGRNG_CC_INIT_CPU_DEST(dest, counter) \
88do { \
89     msgrng_write_cc(MSGRNG_CC_##dest##_REG, counter[dest][0], 0 ); \
90     msgrng_write_cc(MSGRNG_CC_##dest##_REG, counter[dest][1], 1 ); \
91     msgrng_write_cc(MSGRNG_CC_##dest##_REG, counter[dest][2], 2 ); \
92     msgrng_write_cc(MSGRNG_CC_##dest##_REG, counter[dest][3], 3 ); \
93     msgrng_write_cc(MSGRNG_CC_##dest##_REG, counter[dest][4], 4 ); \
94     msgrng_write_cc(MSGRNG_CC_##dest##_REG, counter[dest][5], 5 ); \
95     msgrng_write_cc(MSGRNG_CC_##dest##_REG, counter[dest][6], 6 ); \
96     msgrng_write_cc(MSGRNG_CC_##dest##_REG, counter[dest][7], 7 ); \
97} while(0)
98
99
100/* make this a read/write spinlock */
101static struct mtx msgrng_lock;
102static int msgring_int_enabled;
103static int msgring_pop_num_buckets;
104static uint32_t msgring_pop_bucket_mask;
105static int msgring_int_type;
106static int msgring_watermark_count;
107static uint32_t msgring_thread_mask;
108uint32_t msgrng_msg_cycles = 0;
109
110void xlr_msgring_handler(struct trapframe *);
111
112void
113xlr_msgring_cpu_init(void)
114{
115	struct stn_cc *cc_config;
116	struct bucket_size *bucket_sizes;
117	int id;
118	unsigned long flags;
119
120	KASSERT(xlr_thr_id() == 0,
121		("xlr_msgring_cpu_init from non-zero thread\n"));
122
123	id = xlr_core_id();
124
125	bucket_sizes = xlr_board_info.bucket_sizes;
126	cc_config = xlr_board_info.credit_configs[id];
127
128	msgrng_flags_save(flags);
129
130	/*
131	 * Message Stations are shared among all threads in a cpu core
132	 * Assume, thread 0 on all cores are always active when more than 1
133	 * thread is active in a core
134	 */
135	msgrng_write_bucksize(0, bucket_sizes->bucket[id * 8 + 0]);
136	msgrng_write_bucksize(1, bucket_sizes->bucket[id * 8 + 1]);
137	msgrng_write_bucksize(2, bucket_sizes->bucket[id * 8 + 2]);
138	msgrng_write_bucksize(3, bucket_sizes->bucket[id * 8 + 3]);
139	msgrng_write_bucksize(4, bucket_sizes->bucket[id * 8 + 4]);
140	msgrng_write_bucksize(5, bucket_sizes->bucket[id * 8 + 5]);
141	msgrng_write_bucksize(6, bucket_sizes->bucket[id * 8 + 6]);
142	msgrng_write_bucksize(7, bucket_sizes->bucket[id * 8 + 7]);
143
144	MSGRNG_CC_INIT_CPU_DEST(0, cc_config->counters);
145	MSGRNG_CC_INIT_CPU_DEST(1, cc_config->counters);
146	MSGRNG_CC_INIT_CPU_DEST(2, cc_config->counters);
147	MSGRNG_CC_INIT_CPU_DEST(3, cc_config->counters);
148	MSGRNG_CC_INIT_CPU_DEST(4, cc_config->counters);
149	MSGRNG_CC_INIT_CPU_DEST(5, cc_config->counters);
150	MSGRNG_CC_INIT_CPU_DEST(6, cc_config->counters);
151	MSGRNG_CC_INIT_CPU_DEST(7, cc_config->counters);
152	MSGRNG_CC_INIT_CPU_DEST(8, cc_config->counters);
153	MSGRNG_CC_INIT_CPU_DEST(9, cc_config->counters);
154	MSGRNG_CC_INIT_CPU_DEST(10, cc_config->counters);
155	MSGRNG_CC_INIT_CPU_DEST(11, cc_config->counters);
156	MSGRNG_CC_INIT_CPU_DEST(12, cc_config->counters);
157	MSGRNG_CC_INIT_CPU_DEST(13, cc_config->counters);
158	MSGRNG_CC_INIT_CPU_DEST(14, cc_config->counters);
159	MSGRNG_CC_INIT_CPU_DEST(15, cc_config->counters);
160
161	msgrng_flags_restore(flags);
162}
163
164void
165xlr_msgring_config(void)
166{
167	mtx_init(&msgrng_lock, "msgring", NULL, MTX_SPIN | MTX_RECURSE);
168	msgring_int_type = 0x02;
169	msgring_pop_num_buckets = 8;
170	msgring_pop_bucket_mask = 0xff;
171	msgring_int_enabled = 0;
172	msgring_watermark_count = 1;
173	msgring_thread_mask = 0x01;
174}
175
176void
177xlr_msgring_handler(struct trapframe *tf)
178{
179	unsigned long mflags;
180	int bucket = 0;
181	int size = 0, code = 0, rx_stid = 0, tx_stid = 0;
182	struct msgrng_msg msg;
183	unsigned int bucket_empty_bm = 0;
184	unsigned int status = 0;
185
186	/* TODO: not necessary to disable preemption */
187	msgrng_flags_save(mflags);
188
189	/* First Drain all the high priority messages */
190	for (;;) {
191		bucket_empty_bm = (msgrng_read_status() >> 24) & msgring_pop_bucket_mask;
192
193		/* all buckets empty, break */
194		if (bucket_empty_bm == msgring_pop_bucket_mask)
195			break;
196
197		for (bucket = 0; bucket < msgring_pop_num_buckets; bucket++) {
198			if ((bucket_empty_bm & (1 << bucket)) /* empty */ )
199				continue;
200
201			status = message_receive(bucket, &size, &code, &rx_stid, &msg);
202			if (status)
203				continue;
204
205			tx_stid = xlr_board_info.msgmap[rx_stid];
206
207			if (!tx_stn_handlers[tx_stid].action) {
208				printf("[%s]: No Handler for message from stn_id=%d, bucket=%d, "
209				    "size=%d, msg0=%jx, dropping message\n",
210				    __FUNCTION__, tx_stid, bucket, size, (uintmax_t)msg.msg0);
211			} else {
212				//printf("[%s]: rx_stid = %d\n", __FUNCTION__, rx_stid);
213				msgrng_flags_restore(mflags);
214				(*tx_stn_handlers[tx_stid].action) (bucket, size, code, rx_stid,
215				    &msg, tx_stn_handlers[tx_stid].dev_id);
216				msgrng_flags_save(mflags);
217			}
218		}
219	}
220	msgrng_flags_restore(mflags);
221}
222
223void
224enable_msgring_int(void *arg)
225{
226	unsigned long mflags = 0;
227
228	msgrng_access_save(&msgrng_lock, mflags);
229	/* enable the message ring interrupts */
230	msgrng_write_config((msgring_watermark_count << 24) | (IRQ_MSGRING << 16)
231	    | (msgring_thread_mask << 8) | msgring_int_type);
232	msgrng_access_restore(&msgrng_lock, mflags);
233}
234
235void
236disable_msgring_int(void *arg)
237{
238	unsigned long mflags = 0;
239	uint32_t config;
240
241	msgrng_access_save(&msgrng_lock, mflags);
242	config = msgrng_read_config();
243	config &= ~0x3;
244	msgrng_write_config(config);
245	msgrng_access_restore(&msgrng_lock, mflags);
246}
247
248static int
249msgring_process_fast_intr(void *arg)
250{
251	int core = xlr_core_id();
252	volatile struct msgring_ithread *it;
253	struct thread *td;
254
255	/* wakeup an appropriate intr_thread for processing this interrupt */
256	it = (volatile struct msgring_ithread *)msgring_ithreads[core];
257	KASSERT(it != NULL, ("No interrupt thread on cpu %d", core));
258	td = it->i_thread;
259
260	/*
261	 * Interrupt thread will enable the interrupts after processing all
262	 * messages
263	 */
264	disable_msgring_int(NULL);
265	atomic_store_rel_int(&it->i_pending, 1);
266	thread_lock(td);
267	if (TD_AWAITING_INTR(td)) {
268		TD_CLR_IWAIT(td);
269		sched_add(td, SRQ_INTR);
270	}
271	thread_unlock(td);
272	return FILTER_HANDLED;
273}
274
275static void
276msgring_process(void *arg)
277{
278	volatile struct msgring_ithread *ithd;
279	struct thread *td;
280	struct proc *p;
281
282	td = curthread;
283	p = td->td_proc;
284	ithd = (volatile struct msgring_ithread *)arg;
285	KASSERT(ithd->i_thread == td,
286	    ("%s:msg_ithread and proc linkage out of sync", __func__));
287
288	/* First bind this thread to the right CPU */
289	thread_lock(td);
290
291	sched_bind(td, ithd->i_cpu);
292	thread_unlock(td);
293
294	atomic_store_rel_ptr((volatile uintptr_t *)&msgring_ithreads[ithd->i_core],
295	     (uintptr_t)arg);
296	enable_msgring_int(NULL);
297
298	while (1) {
299		while (ithd->i_pending) {
300			/*
301			 * This might need a full read and write barrier to
302			 * make sure that this write posts before any of the
303			 * memory or device accesses in the handlers.
304			 */
305			xlr_msgring_handler(NULL);
306			atomic_store_rel_int(&ithd->i_pending, 0);
307			enable_msgring_int(NULL);
308		}
309		if (!ithd->i_pending) {
310			thread_lock(td);
311			if (ithd->i_pending) {
312			  thread_unlock(td);
313			  continue;
314			}
315			sched_class(td, PRI_ITHD);
316			TD_SET_IWAIT(td);
317			mi_switch(SW_VOL, NULL);
318			thread_unlock(td);
319		}
320	}
321
322}
323
324static void
325create_msgring_thread(int core, int cpu)
326{
327	struct msgring_ithread *ithd;
328	struct thread *td;
329	struct proc *p;
330	int error;
331
332	/* Create kernel thread for message ring interrupt processing */
333	/* Currently create one task for thread 0 of each core */
334	ithd = malloc(sizeof(struct msgring_ithread),
335	    M_DEVBUF, M_WAITOK | M_ZERO);
336	error = kproc_create(msgring_process, (void *)ithd, &p,
337	    RFSTOPPED | RFHIGHPID, 2, "msg_intr%d", cpu);
338
339	if (error)
340		panic("kproc_create() failed with %d", error);
341	td = FIRST_THREAD_IN_PROC(p);	/* XXXKSE */
342
343	ithd->i_thread = td;
344	ithd->i_pending = 0;
345	ithd->i_cpu = cpu;
346	ithd->i_core = core;
347
348	thread_lock(td);
349	sched_class(td, PRI_ITHD);
350	sched_add(td, SRQ_INTR);
351	thread_unlock(td);
352	CTR2(KTR_INTR, "%s: created %s", __func__, td->td_name);
353}
354
355int
356register_msgring_handler(int major,
357    void (*action) (int, int, int, int, struct msgrng_msg *, void *),
358    void *dev_id)
359{
360	void *cookie;		/* FIXME - use? */
361
362	if (major >= MAX_TX_STNS)
363		return 1;
364
365	//dbg_msg("major=%d, action=%p, dev_id=%p\n", major, action, dev_id);
366
367	if (rmi_spin_mutex_safe)
368	  mtx_lock_spin(&msgrng_lock);
369	tx_stn_handlers[major].action = action;
370	tx_stn_handlers[major].dev_id = dev_id;
371	if (rmi_spin_mutex_safe)
372	  mtx_unlock_spin(&msgrng_lock);
373
374	if (xlr_test_and_set(&msgring_int_enabled)) {
375		create_msgring_thread(0, 0);
376		cpu_establish_hardintr("msgring", (driver_filter_t *) msgring_process_fast_intr,
377			NULL, NULL, IRQ_MSGRING,
378			INTR_TYPE_NET | INTR_FAST, &cookie);
379	}
380	return 0;
381}
382
383static void
384start_msgring_threads(void *arg)
385{
386	int core, cpu;
387
388	for (core = 1; core < XLR_MAX_CORES; core++) {
389		if ((xlr_hw_thread_mask >> (4 * core)) & 0xf) {
390			/* start one thread for an enabled core */
391			cpu = xlr_hwtid_to_cpuid[4 * core];
392			create_msgring_thread(core, cpu);
393		}
394	}
395}
396
397SYSINIT(start_msgring_threads, SI_SUB_SMP, SI_ORDER_MIDDLE, start_msgring_threads, NULL);
398