fmn.c revision 212790
1211809Sjchandra/*-
2198160Srrs * Copyright (c) 2003-2009 RMI Corporation
3198160Srrs * All rights reserved.
4198160Srrs *
5198160Srrs * Redistribution and use in source and binary forms, with or without
6198160Srrs * modification, are permitted provided that the following conditions
7198160Srrs * are met:
8198160Srrs * 1. Redistributions of source code must retain the above copyright
9198160Srrs *    notice, this list of conditions and the following disclaimer.
10198160Srrs * 2. Redistributions in binary form must reproduce the above copyright
11198160Srrs *    notice, this list of conditions and the following disclaimer in the
12198160Srrs *    documentation and/or other materials provided with the distribution.
13198160Srrs * 3. Neither the name of RMI Corporation, nor the names of its contributors,
14198160Srrs *    may be used to endorse or promote products derived from this software
15198160Srrs *    without specific prior written permission.
16198160Srrs *
17198160Srrs * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18198160Srrs * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19198160Srrs * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20198160Srrs * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21198160Srrs * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22198160Srrs * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23198160Srrs * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24198160Srrs * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25198160Srrs * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26198160Srrs * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27198160Srrs * SUCH DAMAGE.
28198160Srrs *
29198160Srrs * RMI_BSD */
30203112Srrs#include <sys/cdefs.h>
31203112Srrs__FBSDID("$FreeBSD: head/sys/mips/rmi/fmn.c 212790 2010-09-17 10:28:10Z jchandra $");
32198160Srrs#include <sys/types.h>
33198160Srrs#include <sys/systm.h>
34198160Srrs#include <sys/param.h>
35198160Srrs#include <sys/lock.h>
36198160Srrs#include <sys/mutex.h>
37198160Srrs#include <sys/proc.h>
38198160Srrs#include <sys/limits.h>
39198160Srrs#include <sys/bus.h>
40198160Srrs
41208165Srrs#include <sys/ktr.h>
42208165Srrs#include <sys/kernel.h>
43208165Srrs#include <sys/kthread.h>
44208165Srrs#include <sys/proc.h>
45208165Srrs#include <sys/resourcevar.h>
46208165Srrs#include <sys/sched.h>
47208165Srrs#include <sys/unistd.h>
48208165Srrs#include <sys/sysctl.h>
49208165Srrs#include <sys/malloc.h>
50208165Srrs
51198160Srrs#include <machine/reg.h>
52198160Srrs#include <machine/cpu.h>
53208165Srrs#include <machine/hwfunc.h>
54198160Srrs#include <machine/mips_opcode.h>
55198160Srrs
56198160Srrs#include <machine/param.h>
57198160Srrs#include <machine/intr_machdep.h>
58198607Srrs#include <mips/rmi/interrupt.h>
59198607Srrs#include <mips/rmi/msgring.h>
60198607Srrs#include <mips/rmi/pic.h>
61198607Srrs#include <mips/rmi/board.h>
62198160Srrs
63198160Srrs/* definitions */
64198160Srrsstruct tx_stn_handler {
65198625Srrs	void (*action) (int, int, int, int, struct msgrng_msg *, void *);
66198160Srrs	void *dev_id;
67198160Srrs};
68198160Srrs
69208165Srrsstruct msgring_ithread {
70208165Srrs	struct thread *i_thread;
71208165Srrs	u_int i_pending;
72208165Srrs	u_int i_flags;
73208165Srrs	int i_cpu;
74208369Sjchandra	int i_core;
75208165Srrs};
76208165Srrs
77208165Srrsstruct msgring_ithread *msgring_ithreads[MAXCPU];
78208165Srrs
79198160Srrs/* globals */
80198160Srrsstatic struct tx_stn_handler tx_stn_handlers[MAX_TX_STNS];
81198160Srrs
82198160Srrs#define MSGRNG_CC_INIT_CPU_DEST(dest, counter) \
83198160Srrsdo { \
84198160Srrs     msgrng_write_cc(MSGRNG_CC_##dest##_REG, counter[dest][0], 0 ); \
85198160Srrs     msgrng_write_cc(MSGRNG_CC_##dest##_REG, counter[dest][1], 1 ); \
86198160Srrs     msgrng_write_cc(MSGRNG_CC_##dest##_REG, counter[dest][2], 2 ); \
87198160Srrs     msgrng_write_cc(MSGRNG_CC_##dest##_REG, counter[dest][3], 3 ); \
88198160Srrs     msgrng_write_cc(MSGRNG_CC_##dest##_REG, counter[dest][4], 4 ); \
89198160Srrs     msgrng_write_cc(MSGRNG_CC_##dest##_REG, counter[dest][5], 5 ); \
90198160Srrs     msgrng_write_cc(MSGRNG_CC_##dest##_REG, counter[dest][6], 6 ); \
91198160Srrs     msgrng_write_cc(MSGRNG_CC_##dest##_REG, counter[dest][7], 7 ); \
92198160Srrs} while(0)
93198160Srrs
94198160Srrs
95198160Srrs/* make this a read/write spinlock */
96198160Srrsstatic struct mtx msgrng_lock;
97198160Srrsstatic int msgring_int_enabled;
98198625Srrsstatic int msgring_pop_num_buckets;
99212790Sjchandrastatic uint8_t msgring_pop_bucket_mask;
100198625Srrsstatic int msgring_int_type;
101198625Srrsstatic int msgring_watermark_count;
102198625Srrsstatic uint32_t msgring_thread_mask;
103198160Srrsuint32_t msgrng_msg_cycles = 0;
104198160Srrs
105198625Srrsvoid
106198625Srrsxlr_msgring_cpu_init(void)
107198160Srrs{
108198160Srrs	struct stn_cc *cc_config;
109198160Srrs	struct bucket_size *bucket_sizes;
110198160Srrs	int id;
111212321Sjchandra	uint32_t flags;
112198160Srrs
113208165Srrs	KASSERT(xlr_thr_id() == 0,
114208165Srrs		("xlr_msgring_cpu_init from non-zero thread\n"));
115198160Srrs
116208165Srrs	id = xlr_core_id();
117208165Srrs
118198160Srrs	bucket_sizes = xlr_board_info.bucket_sizes;
119198160Srrs	cc_config = xlr_board_info.credit_configs[id];
120198160Srrs
121198625Srrs
122198625Srrs	/*
123198625Srrs	 * Message Stations are shared among all threads in a cpu core
124198625Srrs	 * Assume, thread 0 on all cores are always active when more than 1
125198625Srrs	 * thread is active in a core
126198160Srrs	 */
127212321Sjchandra	flags = msgrng_access_enable();
128212321Sjchandra
129198625Srrs	msgrng_write_bucksize(0, bucket_sizes->bucket[id * 8 + 0]);
130198625Srrs	msgrng_write_bucksize(1, bucket_sizes->bucket[id * 8 + 1]);
131198625Srrs	msgrng_write_bucksize(2, bucket_sizes->bucket[id * 8 + 2]);
132198625Srrs	msgrng_write_bucksize(3, bucket_sizes->bucket[id * 8 + 3]);
133198625Srrs	msgrng_write_bucksize(4, bucket_sizes->bucket[id * 8 + 4]);
134198625Srrs	msgrng_write_bucksize(5, bucket_sizes->bucket[id * 8 + 5]);
135198625Srrs	msgrng_write_bucksize(6, bucket_sizes->bucket[id * 8 + 6]);
136198625Srrs	msgrng_write_bucksize(7, bucket_sizes->bucket[id * 8 + 7]);
137198160Srrs
138198160Srrs	MSGRNG_CC_INIT_CPU_DEST(0, cc_config->counters);
139198160Srrs	MSGRNG_CC_INIT_CPU_DEST(1, cc_config->counters);
140198160Srrs	MSGRNG_CC_INIT_CPU_DEST(2, cc_config->counters);
141198160Srrs	MSGRNG_CC_INIT_CPU_DEST(3, cc_config->counters);
142198160Srrs	MSGRNG_CC_INIT_CPU_DEST(4, cc_config->counters);
143198160Srrs	MSGRNG_CC_INIT_CPU_DEST(5, cc_config->counters);
144198160Srrs	MSGRNG_CC_INIT_CPU_DEST(6, cc_config->counters);
145198160Srrs	MSGRNG_CC_INIT_CPU_DEST(7, cc_config->counters);
146198160Srrs	MSGRNG_CC_INIT_CPU_DEST(8, cc_config->counters);
147198160Srrs	MSGRNG_CC_INIT_CPU_DEST(9, cc_config->counters);
148198160Srrs	MSGRNG_CC_INIT_CPU_DEST(10, cc_config->counters);
149198160Srrs	MSGRNG_CC_INIT_CPU_DEST(11, cc_config->counters);
150198160Srrs	MSGRNG_CC_INIT_CPU_DEST(12, cc_config->counters);
151198160Srrs	MSGRNG_CC_INIT_CPU_DEST(13, cc_config->counters);
152198160Srrs	MSGRNG_CC_INIT_CPU_DEST(14, cc_config->counters);
153198160Srrs	MSGRNG_CC_INIT_CPU_DEST(15, cc_config->counters);
154198160Srrs
155212321Sjchandra	msgrng_restore(flags);
156198160Srrs}
157198160Srrs
158198625Srrsvoid
159198625Srrsxlr_msgring_config(void)
160198160Srrs{
161211802Sjchandra	mtx_init(&msgrng_lock, "msgring", NULL, MTX_SPIN | MTX_RECURSE);
162198160Srrs	msgring_int_type = 0x02;
163198160Srrs	msgring_pop_num_buckets = 8;
164198160Srrs	msgring_pop_bucket_mask = 0xff;
165211802Sjchandra	msgring_int_enabled = 0;
166198160Srrs	msgring_watermark_count = 1;
167198160Srrs	msgring_thread_mask = 0x01;
168198160Srrs}
169198160Srrs
170212790Sjchandra/*
171212790Sjchandra * Drain out max_messages for the buckets set in the bucket mask.
172212790Sjchandra * Use max_messages = 0 to drain out all messages.
173212790Sjchandra */
174212790Sjchandrauint32_t
175212790Sjchandraxlr_msgring_handler(uint8_t bucket_mask, uint32_t max_messages)
176198160Srrs{
177198625Srrs	int bucket = 0;
178198625Srrs	int size = 0, code = 0, rx_stid = 0, tx_stid = 0;
179198160Srrs	struct msgrng_msg msg;
180212790Sjchandra	uint8_t bucket_empty_bm = 0;
181198625Srrs	unsigned int status = 0;
182212790Sjchandra	unsigned long mflags;
183212790Sjchandra	uint32_t  n_msgs;
184198160Srrs
185212790Sjchandra	n_msgs = 0;
186212321Sjchandra	mflags = msgrng_access_enable();
187198160Srrs	/* First Drain all the high priority messages */
188198625Srrs	for (;;) {
189212790Sjchandra		bucket_empty_bm = (msgrng_read_status() >> 24) & bucket_mask;
190198160Srrs
191198625Srrs		/* all buckets empty, break */
192212790Sjchandra		if (bucket_empty_bm == bucket_mask)
193198625Srrs			break;
194198160Srrs
195198625Srrs		for (bucket = 0; bucket < msgring_pop_num_buckets; bucket++) {
196212790Sjchandra			if (!((1 << bucket) & bucket_mask)        /* bucket not in mask */
197212790Sjchandra			    || (bucket_empty_bm & (1 << bucket))) /* empty */
198198625Srrs				continue;
199198625Srrs
200198160Srrs			status = message_receive(bucket, &size, &code, &rx_stid, &msg);
201198625Srrs			if (status)
202198625Srrs				continue;
203198625Srrs
204198160Srrs			tx_stid = xlr_board_info.msgmap[rx_stid];
205212790Sjchandra			n_msgs++;
206198160Srrs
207198160Srrs			if (!tx_stn_handlers[tx_stid].action) {
208198160Srrs				printf("[%s]: No Handler for message from stn_id=%d, bucket=%d, "
209209808Sjchandra				    "size=%d, msg0=%jx, dropping message\n",
210209808Sjchandra				    __FUNCTION__, tx_stid, bucket, size, (uintmax_t)msg.msg0);
211198625Srrs			} else {
212198160Srrs				//printf("[%s]: rx_stid = %d\n", __FUNCTION__, rx_stid);
213212321Sjchandra				msgrng_restore(mflags);
214198625Srrs				(*tx_stn_handlers[tx_stid].action) (bucket, size, code, rx_stid,
215198625Srrs				    &msg, tx_stn_handlers[tx_stid].dev_id);
216212321Sjchandra				mflags = msgrng_access_enable();
217198625Srrs			}
218212790Sjchandra			if (max_messages > 0 && n_msgs >= max_messages)
219212790Sjchandra				goto done;
220198160Srrs		}
221198160Srrs	}
222212790Sjchandra
223212790Sjchandradone:
224212321Sjchandra	msgrng_restore(mflags);
225212790Sjchandra
226212790Sjchandra	return (n_msgs);
227198160Srrs}
228198160Srrs
229212790Sjchandrastatic void
230212790Sjchandraenable_msgring_int(void)
231198160Srrs{
232212321Sjchandra	uint32_t config, mflags;
233198160Srrs
234212321Sjchandra	config = (msgring_watermark_count << 24) | (IRQ_MSGRING << 16) |
235212321Sjchandra	    (msgring_thread_mask << 8) | msgring_int_type;
236212321Sjchandra	mflags = msgrng_access_enable();
237212321Sjchandra	msgrng_write_config(config);
238212321Sjchandra	msgrng_restore(mflags);
239198160Srrs}
240198160Srrs
241212790Sjchandrastatic void
242212790Sjchandradisable_msgring_int(void)
243198160Srrs{
244212321Sjchandra	uint32_t config, mflags;
245198160Srrs
246212321Sjchandra	mflags = msgrng_access_enable();
247212321Sjchandra	config = msgrng_read_config() & ~0x3;
248198160Srrs	msgrng_write_config(config);
249212321Sjchandra	msgrng_restore(mflags);
250198160Srrs}
251198160Srrs
252208165Srrsstatic int
253208165Srrsmsgring_process_fast_intr(void *arg)
254208165Srrs{
255208369Sjchandra	int core = xlr_core_id();
256208165Srrs	volatile struct msgring_ithread *it;
257208165Srrs	struct thread *td;
258198160Srrs
259208165Srrs	/* wakeup an appropriate intr_thread for processing this interrupt */
260208369Sjchandra	it = (volatile struct msgring_ithread *)msgring_ithreads[core];
261208369Sjchandra	KASSERT(it != NULL, ("No interrupt thread on cpu %d", core));
262208165Srrs	td = it->i_thread;
263208165Srrs
264208165Srrs	/*
265208165Srrs	 * Interrupt thread will enable the interrupts after processing all
266208165Srrs	 * messages
267208165Srrs	 */
268212790Sjchandra	disable_msgring_int();
269208165Srrs	atomic_store_rel_int(&it->i_pending, 1);
270208165Srrs	thread_lock(td);
271208165Srrs	if (TD_AWAITING_INTR(td)) {
272208165Srrs		TD_CLR_IWAIT(td);
273208165Srrs		sched_add(td, SRQ_INTR);
274208165Srrs	}
275208165Srrs	thread_unlock(td);
276208165Srrs	return FILTER_HANDLED;
277208165Srrs}
278208165Srrs
279208165Srrsstatic void
280208165Srrsmsgring_process(void *arg)
281208165Srrs{
282208165Srrs	volatile struct msgring_ithread *ithd;
283208165Srrs	struct thread *td;
284208165Srrs	struct proc *p;
285208165Srrs
286208165Srrs	td = curthread;
287208165Srrs	p = td->td_proc;
288208165Srrs	ithd = (volatile struct msgring_ithread *)arg;
289208165Srrs	KASSERT(ithd->i_thread == td,
290208165Srrs	    ("%s:msg_ithread and proc linkage out of sync", __func__));
291208165Srrs
292208165Srrs	/* First bind this thread to the right CPU */
293208165Srrs	thread_lock(td);
294208369Sjchandra
295208165Srrs	sched_bind(td, ithd->i_cpu);
296208165Srrs	thread_unlock(td);
297208165Srrs
298208369Sjchandra	atomic_store_rel_ptr((volatile uintptr_t *)&msgring_ithreads[ithd->i_core],
299208165Srrs	     (uintptr_t)arg);
300212790Sjchandra	enable_msgring_int();
301208165Srrs
302208165Srrs	while (1) {
303208165Srrs		while (ithd->i_pending) {
304208165Srrs			/*
305208165Srrs			 * This might need a full read and write barrier to
306208165Srrs			 * make sure that this write posts before any of the
307208165Srrs			 * memory or device accesses in the handlers.
308208165Srrs			 */
309212790Sjchandra			xlr_msgring_handler(msgring_pop_bucket_mask, 0);
310208165Srrs			atomic_store_rel_int(&ithd->i_pending, 0);
311212790Sjchandra			enable_msgring_int();
312208165Srrs		}
313208165Srrs		if (!ithd->i_pending) {
314208165Srrs			thread_lock(td);
315208165Srrs			if (ithd->i_pending) {
316208165Srrs			  thread_unlock(td);
317208165Srrs			  continue;
318208165Srrs			}
319208165Srrs			sched_class(td, PRI_ITHD);
320208165Srrs			TD_SET_IWAIT(td);
321208165Srrs			mi_switch(SW_VOL, NULL);
322208165Srrs			thread_unlock(td);
323208165Srrs		}
324208165Srrs	}
325208165Srrs
326208165Srrs}
327208165Srrs
328208165Srrsstatic void
329208369Sjchandracreate_msgring_thread(int core, int cpu)
330208165Srrs{
331208165Srrs	struct msgring_ithread *ithd;
332208165Srrs	struct thread *td;
333208165Srrs	struct proc *p;
334208165Srrs	int error;
335208165Srrs
336208165Srrs	/* Create kernel thread for message ring interrupt processing */
337208165Srrs	/* Currently create one task for thread 0 of each core */
338208165Srrs	ithd = malloc(sizeof(struct msgring_ithread),
339208165Srrs	    M_DEVBUF, M_WAITOK | M_ZERO);
340208165Srrs	error = kproc_create(msgring_process, (void *)ithd, &p,
341208165Srrs	    RFSTOPPED | RFHIGHPID, 2, "msg_intr%d", cpu);
342208165Srrs
343208165Srrs	if (error)
344208165Srrs		panic("kproc_create() failed with %d", error);
345208165Srrs	td = FIRST_THREAD_IN_PROC(p);	/* XXXKSE */
346208165Srrs
347208165Srrs	ithd->i_thread = td;
348208165Srrs	ithd->i_pending = 0;
349208165Srrs	ithd->i_cpu = cpu;
350208369Sjchandra	ithd->i_core = core;
351208165Srrs
352208165Srrs	thread_lock(td);
353208165Srrs	sched_class(td, PRI_ITHD);
354208165Srrs	sched_add(td, SRQ_INTR);
355208165Srrs	thread_unlock(td);
356210845Sjchandra	CTR2(KTR_INTR, "%s: created %s", __func__, td->td_name);
357208165Srrs}
358208165Srrs
359198625Srrsint
360198625Srrsregister_msgring_handler(int major,
361198625Srrs    void (*action) (int, int, int, int, struct msgrng_msg *, void *),
362198625Srrs    void *dev_id)
363198160Srrs{
364198625Srrs	void *cookie;		/* FIXME - use? */
365198160Srrs
366198625Srrs	if (major >= MAX_TX_STNS)
367198160Srrs		return 1;
368198160Srrs
369211811Sjchandra	mtx_lock_spin(&msgrng_lock);
370198160Srrs	tx_stn_handlers[major].action = action;
371198160Srrs	tx_stn_handlers[major].dev_id = dev_id;
372211811Sjchandra	mtx_unlock_spin(&msgrng_lock);
373198625Srrs
374198160Srrs	if (xlr_test_and_set(&msgring_int_enabled)) {
375208369Sjchandra		create_msgring_thread(0, 0);
376203112Srrs		cpu_establish_hardintr("msgring", (driver_filter_t *) msgring_process_fast_intr,
377203112Srrs			NULL, NULL, IRQ_MSGRING,
378203112Srrs			INTR_TYPE_NET | INTR_FAST, &cookie);
379198160Srrs	}
380198160Srrs	return 0;
381198160Srrs}
382198160Srrs
383208165Srrsstatic void
384208165Srrsstart_msgring_threads(void *arg)
385208165Srrs{
386208369Sjchandra	int core, cpu;
387208165Srrs
388208369Sjchandra	for (core = 1; core < XLR_MAX_CORES; core++) {
389208369Sjchandra		if ((xlr_hw_thread_mask >> (4 * core)) & 0xf) {
390208369Sjchandra			/* start one thread for an enabled core */
391208369Sjchandra			cpu = xlr_hwtid_to_cpuid[4 * core];
392208369Sjchandra			create_msgring_thread(core, cpu);
393208369Sjchandra		}
394208369Sjchandra	}
395198160Srrs}
396208165Srrs
397208165SrrsSYSINIT(start_msgring_threads, SI_SUB_SMP, SI_ORDER_MIDDLE, start_msgring_threads, NULL);
398