fmn.c revision 213377
144963Sjb/*-
244963Sjb * Copyright (c) 2003-2009 RMI Corporation
344963Sjb * All rights reserved.
444963Sjb *
544963Sjb * Redistribution and use in source and binary forms, with or without
644963Sjb * modification, are permitted provided that the following conditions
744963Sjb * are met:
844963Sjb * 1. Redistributions of source code must retain the above copyright
944963Sjb *    notice, this list of conditions and the following disclaimer.
1044963Sjb * 2. Redistributions in binary form must reproduce the above copyright
1144963Sjb *    notice, this list of conditions and the following disclaimer in the
1244963Sjb *    documentation and/or other materials provided with the distribution.
1344963Sjb * 3. Neither the name of RMI Corporation, nor the names of its contributors,
1444963Sjb *    may be used to endorse or promote products derived from this software
1544963Sjb *    without specific prior written permission.
1644963Sjb *
1744963Sjb * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1844963Sjb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1944963Sjb * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2044963Sjb * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
2144963Sjb * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2244963Sjb * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2344963Sjb * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2444963Sjb * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2544963Sjb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2644963Sjb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2744963Sjb * SUCH DAMAGE.
2844963Sjb *
2944963Sjb * RMI_BSD */
3044963Sjb#include <sys/cdefs.h>
3144963Sjb__FBSDID("$FreeBSD: head/sys/mips/rmi/fmn.c 213377 2010-10-03 04:33:58Z jchandra $");
3250476Speter#include <sys/types.h>
3344963Sjb#include <sys/systm.h>
34174112Sdeischen#include <sys/param.h>
35174112Sdeischen#include <sys/lock.h>
3644963Sjb#include <sys/mutex.h>
3744963Sjb#include <sys/proc.h>
38174112Sdeischen#include <sys/limits.h>
39103388Smini#include <sys/bus.h>
4044963Sjb
4175369Sdeischen#include <sys/ktr.h>
4271581Sdeischen#include <sys/kernel.h>
4344963Sjb#include <sys/kthread.h>
4471581Sdeischen#include <sys/proc.h>
4544963Sjb#include <sys/resourcevar.h>
4644963Sjb#include <sys/sched.h>
4744963Sjb#include <sys/unistd.h>
4844963Sjb#include <sys/sysctl.h>
4944963Sjb#include <sys/malloc.h>
5044963Sjb
5144963Sjb#include <machine/reg.h>
5244963Sjb#include <machine/cpu.h>
5344963Sjb#include <machine/hwfunc.h>
5444963Sjb#include <machine/mips_opcode.h>
55
56#include <machine/param.h>
57#include <machine/intr_machdep.h>
58#include <mips/rmi/interrupt.h>
59#include <mips/rmi/msgring.h>
60#include <mips/rmi/pic.h>
61#include <mips/rmi/board.h>
62
63#define MSGRNG_CC_INIT_CPU_DEST(dest, counter) \
64do { \
65     msgrng_write_cc(MSGRNG_CC_##dest##_REG, counter[dest][0], 0 ); \
66     msgrng_write_cc(MSGRNG_CC_##dest##_REG, counter[dest][1], 1 ); \
67     msgrng_write_cc(MSGRNG_CC_##dest##_REG, counter[dest][2], 2 ); \
68     msgrng_write_cc(MSGRNG_CC_##dest##_REG, counter[dest][3], 3 ); \
69     msgrng_write_cc(MSGRNG_CC_##dest##_REG, counter[dest][4], 4 ); \
70     msgrng_write_cc(MSGRNG_CC_##dest##_REG, counter[dest][5], 5 ); \
71     msgrng_write_cc(MSGRNG_CC_##dest##_REG, counter[dest][6], 6 ); \
72     msgrng_write_cc(MSGRNG_CC_##dest##_REG, counter[dest][7], 7 ); \
73} while(0)
74
75
76/*
77 * Keep track of our message ring handler threads, each core has a
78 * different message station. Ideally we will need to start a few
79 * message handling threads every core, and wake them up depending on
80 * load
81 */
82struct msgring_thread {
83	struct {
84		struct thread	*thread; /* msgring handler threads */
85		int	needed;		/* thread needs to wake up */
86	} threads[XLR_NTHREADS];
87	int	running;		/* number of threads running */
88	int	nthreads;		/* number of threads started */
89	struct mtx lock;		/* for changing running/active */
90};
91static struct msgring_thread msgring_threads[XLR_MAX_CORES];
92static struct proc *msgring_proc;	/* all threads are under a proc */
93
94/*
95 * The maximum number of software message handler threads to be started
96 * per core. Default is 2 per core
97 */
98static int	msgring_maxthreads = 2;
99TUNABLE_INT("hw.fmn.maxthreads", &msgring_maxthreads);
100
101/*
102 * The device drivers can register a handler for the the messages sent
103 * from a station (corresponding to the device).
104 */
105struct tx_stn_handler {
106	msgring_handler action;
107	void *arg;
108};
109static struct tx_stn_handler msgmap[MSGRNG_NSTATIONS];
110static struct mtx	msgmap_lock;
111
112/*
113 * Initialize the messaging subsystem.
114 *
115 * Message Stations are shared among all threads in a cpu core, this
116 * has to be called once from every core which is online.
117 */
118void
119xlr_msgring_cpu_init(void)
120{
121	struct stn_cc *cc_config;
122	struct bucket_size *bucket_sizes;
123	uint32_t flags;
124	int id;
125
126	KASSERT(xlr_thr_id() == 0,
127		("xlr_msgring_cpu_init from non-zero thread"));
128	id = xlr_core_id();
129	bucket_sizes = xlr_board_info.bucket_sizes;
130	cc_config = xlr_board_info.credit_configs[id];
131
132	flags = msgrng_access_enable();
133
134	/*
135	 * FMN messages are received in 8 buckets per core, set up
136	 * the bucket sizes for each bucket
137	 */
138	msgrng_write_bucksize(0, bucket_sizes->bucket[id * 8 + 0]);
139	msgrng_write_bucksize(1, bucket_sizes->bucket[id * 8 + 1]);
140	msgrng_write_bucksize(2, bucket_sizes->bucket[id * 8 + 2]);
141	msgrng_write_bucksize(3, bucket_sizes->bucket[id * 8 + 3]);
142	msgrng_write_bucksize(4, bucket_sizes->bucket[id * 8 + 4]);
143	msgrng_write_bucksize(5, bucket_sizes->bucket[id * 8 + 5]);
144	msgrng_write_bucksize(6, bucket_sizes->bucket[id * 8 + 6]);
145	msgrng_write_bucksize(7, bucket_sizes->bucket[id * 8 + 7]);
146
147	/*
148	 * For sending FMN messages, we need credits on the destination
149	 * bucket.  Program the credits this core has on the 128 possible
150	 * destination buckets.
151	 * We cannot use a loop, because the the first argument has to
152	 * be a constant integer value.
153	 */
154	MSGRNG_CC_INIT_CPU_DEST(0,  cc_config->counters);
155	MSGRNG_CC_INIT_CPU_DEST(1,  cc_config->counters);
156	MSGRNG_CC_INIT_CPU_DEST(2,  cc_config->counters);
157	MSGRNG_CC_INIT_CPU_DEST(3,  cc_config->counters);
158	MSGRNG_CC_INIT_CPU_DEST(4,  cc_config->counters);
159	MSGRNG_CC_INIT_CPU_DEST(5,  cc_config->counters);
160	MSGRNG_CC_INIT_CPU_DEST(6,  cc_config->counters);
161	MSGRNG_CC_INIT_CPU_DEST(7,  cc_config->counters);
162	MSGRNG_CC_INIT_CPU_DEST(8,  cc_config->counters);
163	MSGRNG_CC_INIT_CPU_DEST(9,  cc_config->counters);
164	MSGRNG_CC_INIT_CPU_DEST(10, cc_config->counters);
165	MSGRNG_CC_INIT_CPU_DEST(11, cc_config->counters);
166	MSGRNG_CC_INIT_CPU_DEST(12, cc_config->counters);
167	MSGRNG_CC_INIT_CPU_DEST(13, cc_config->counters);
168	MSGRNG_CC_INIT_CPU_DEST(14, cc_config->counters);
169	MSGRNG_CC_INIT_CPU_DEST(15, cc_config->counters);
170	msgrng_restore(flags);
171}
172
173/*
174 * Boot time init, called only once
175 */
176void
177xlr_msgring_config(void)
178{
179	mtx_init(&msgmap_lock, "msgring", NULL, MTX_SPIN);
180
181	/* check value */
182	if (msgring_maxthreads < 0 || msgring_maxthreads > XLR_NTHREADS)
183		msgring_maxthreads = XLR_NTHREADS;
184}
185
186/*
187 * Drain out max_messages for the buckets set in the bucket mask.
188 * Use max_messages = 0 to drain out all messages.
189 */
190uint32_t
191xlr_msgring_handler(uint8_t bucket_mask, uint32_t max_messages)
192{
193	int bucket = 0;
194	int size = 0, code = 0, rx_stid = 0;
195	struct msgrng_msg msg;
196	struct tx_stn_handler *he;
197	unsigned int status = 0;
198	unsigned long mflags;
199	uint32_t n_msgs;
200	uint32_t msgbuckets;
201
202	n_msgs = 0;
203	mflags = msgrng_access_enable();
204	for (;;) {
205		msgbuckets = (~msgrng_read_status() >> 24) & bucket_mask;
206
207		/* all buckets empty, break */
208		if (msgbuckets == 0)
209			break;
210
211		for (bucket = 0; bucket < 8; bucket++) {
212			if ((msgbuckets & (1 << bucket)) == 0) /* empty */
213				continue;
214
215			status = message_receive(bucket, &size, &code,
216			    &rx_stid, &msg);
217			if (status != 0)
218				continue;
219			n_msgs++;
220			he = &msgmap[rx_stid];
221			if (he->action == NULL) {
222				printf("[%s]: No Handler for message from "
223				    "stn_id=%d, bucket=%d, size=%d, msg0=%jx\n",
224				    __func__, rx_stid, bucket, size,
225				    (uintmax_t)msg.msg0);
226			} else {
227				msgrng_restore(mflags);
228				(*he->action)(bucket, size, code, rx_stid,
229				    &msg, he->arg);
230				mflags = msgrng_access_enable();
231			}
232			if (max_messages > 0 && n_msgs >= max_messages)
233				goto done;
234		}
235	}
236
237done:
238	msgrng_restore(mflags);
239	return (n_msgs);
240}
241
242/*
243 * XLR COP2 supports watermark interrupts based on the number of
244 * messages pending in all the buckets in the core.  We increase
245 * the watermark until all the possible handler threads in the core
246 * are woken up.
247 */
248static void
249msgrng_setconfig(int running, int nthr)
250{
251	uint32_t config, mflags;
252	int watermark = 1;	/* non zero needed */
253	int wm_intr_value;
254
255	KASSERT(nthr >= 0 && nthr <= msgring_maxthreads,
256	    ("Bad value of nthr %d", nthr));
257	KASSERT(running <= nthr, ("Bad value of running %d", running));
258
259	if (running == nthr) {
260		wm_intr_value = 0;
261	} else {
262		switch (running) {
263		case 0: break;		/* keep default */
264		case 1:
265			watermark = 16; break;
266		case 2:
267			watermark = 32; break;
268		case 3:
269			watermark = 48; break;
270		}
271		wm_intr_value = 0x2;	/* set watermark enable interrupt */
272	}
273	mflags = msgrng_access_enable();
274	config = (watermark << 24) | (IRQ_MSGRING << 16) | (1 << 8) |
275		wm_intr_value;
276	/* clear any pending interrupts */
277	write_c0_eirr64(1ULL << IRQ_MSGRING);
278	msgrng_write_config(config);
279	msgrng_restore(mflags);
280}
281
282static int
283msgring_process_fast_intr(void *arg)
284{
285	struct msgring_thread *mthd;
286	struct thread	*td;
287	uint32_t	mflags;
288	int		core, nt;
289
290	core = xlr_core_id();
291	mthd = &msgring_threads[core];
292
293	mtx_lock_spin(&mthd->lock);
294	nt = mthd->running;
295	if(nt >= mthd->nthreads) {
296		mtx_unlock_spin(&mthd->lock);
297		return (FILTER_HANDLED);
298	}
299
300	td = mthd->threads[nt].thread;
301	mflags = msgrng_access_enable();
302
303	/* default value with interrupts disabled */
304	msgrng_write_config((1 << 24) | (IRQ_MSGRING << 16) | (1 << 8));
305
306	msgrng_restore(mflags);
307	mtx_unlock_spin(&mthd->lock);
308
309	/* wake up the target thread */
310	mthd->threads[nt].needed = 1;
311	thread_lock(td);
312	if (TD_AWAITING_INTR(td)) {
313		TD_CLR_IWAIT(td);
314		sched_add(td, SRQ_INTR);
315	}
316	thread_unlock(td);
317	return (FILTER_HANDLED);
318}
319
320static void
321msgring_process(void *arg)
322{
323	struct msgring_thread *mthd;
324	struct thread	*td;
325	int		hwtid, tid, core;
326	int		nmsgs;
327
328	hwtid = (intptr_t)arg;
329	core = hwtid / 4;
330	tid = hwtid % 4;
331	mthd = &msgring_threads[core];
332	td = mthd->threads[tid].thread;
333	KASSERT(curthread == td,
334	    ("Incorrect thread core %d, thread %d", core, hwtid));
335
336	/* First bind this thread to the right CPU */
337	thread_lock(td);
338	sched_bind(td, xlr_hwtid_to_cpuid[hwtid]);
339	thread_unlock(td);
340
341	/*
342	 * Mark ourselves as a running thread, and update the
343	 * message watermark config for this thread
344	 */
345	mtx_lock_spin(&mthd->lock);
346	++mthd->nthreads;
347	mtx_unlock_spin(&mthd->lock);
348
349	/* start processing messages */
350	for(;;) {
351		mtx_lock_spin(&mthd->lock);
352		++mthd->running;
353		msgrng_setconfig(mthd->running, mthd->nthreads);
354		mtx_unlock_spin(&mthd->lock);
355
356		atomic_store_rel_int(&mthd->threads[tid].needed, 0);
357		nmsgs = xlr_msgring_handler(0xff, 0);
358
359		mtx_lock_spin(&mthd->lock);
360		--mthd->running;
361		msgrng_setconfig(mthd->running, mthd->nthreads);
362		mtx_unlock_spin(&mthd->lock);
363
364		/* sleep */
365		thread_lock(td);
366		if (mthd->threads[tid].needed) {
367			thread_unlock(td);
368			continue;
369		}
370		sched_class(td, PRI_ITHD);
371		TD_SET_IWAIT(td);
372		mi_switch(SW_VOL, NULL);
373		thread_unlock(td);
374	}
375}
376
377static void
378create_msgring_thread(int hwtid)
379{
380	struct msgring_thread *mthd;
381	struct thread *td;
382	int	tid, core;
383	int	error;
384
385	core = hwtid / 4;
386	tid = hwtid % 4;
387
388	mthd = &msgring_threads[core];
389	if (tid == 0) {
390		mtx_init(&mthd->lock, "msgrngcore", NULL, MTX_SPIN);
391		mthd->running = mthd->nthreads = 0;
392	}
393	error = kproc_kthread_add(msgring_process, (void *)hwtid,
394	    &msgring_proc, &td, RFSTOPPED, 2, "msgrngproc",
395	    "msgthr%d", hwtid);
396	if (error)
397		panic("kproc_kthread_add() failed with %d", error);
398	mthd->threads[tid].thread = td;
399
400	thread_lock(td);
401	sched_class(td, PRI_ITHD);
402	sched_add(td, SRQ_INTR);
403	thread_unlock(td);
404	CTR2(KTR_INTR, "%s: created %s", __func__, td->td_name);
405}
406
407int
408register_msgring_handler(int startb, int endb, msgring_handler action,
409    void *arg)
410{
411	void	*cookie;
412	int	i;
413	static int msgring_int_enabled = 0;
414
415	printf("register handler %d-%d %p %p\n", startb, endb, action, arg);
416	KASSERT(startb >= 0 && startb <= endb && endb < MSGRNG_NSTATIONS,
417	    ("Invalid value for for bucket range %d,%d", startb, endb));
418
419	mtx_lock_spin(&msgmap_lock);
420	for (i = startb; i <= endb; i++) {
421		KASSERT(msgmap[i].action == NULL,
422		   ("Bucket %d already used [action %p]", i, msgmap[i].action));
423		msgmap[i].action = action;
424		msgmap[i].arg = arg;
425	}
426	mtx_unlock_spin(&msgmap_lock);
427
428	if (xlr_test_and_set(&msgring_int_enabled)) {
429		create_msgring_thread(0);
430		if (msgring_maxthreads > xlr_threads_per_core)
431			msgring_maxthreads = xlr_threads_per_core;
432		cpu_establish_hardintr("msgring", msgring_process_fast_intr,
433			NULL, NULL, IRQ_MSGRING,
434			INTR_TYPE_NET | INTR_FAST, &cookie);
435	}
436	return (0);
437}
438
439static void
440start_msgring_threads(void *arg)
441{
442	int	hwt, tid;
443
444	for (hwt = 1; hwt < XLR_MAX_CORES * XLR_NTHREADS; hwt++) {
445		if ((xlr_hw_thread_mask & (1 << hwt)) == 0)
446			continue;
447		tid = hwt % XLR_NTHREADS;
448		if (tid >= msgring_maxthreads)
449			continue;
450		create_msgring_thread(hwt);
451	}
452}
453
454SYSINIT(start_msgring_threads, SI_SUB_SMP, SI_ORDER_MIDDLE, start_msgring_threads, NULL);
455