Deleted Added
full compact
xlr_machdep.c (201845) xlr_machdep.c (201881)
1/*-
2 * Copyright (c) 2006-2009 RMI Corporation
3 * Copyright (c) 2002-2004 Juli Mallett <jmallett@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 */
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD$");
30
31#include "opt_ddb.h"
32
33#include <sys/param.h>
34#include <sys/bus.h>
35#include <sys/conf.h>
36#include <sys/rtprio.h>
37#include <sys/systm.h>
38#include <sys/interrupt.h>
39#include <sys/kernel.h>
40#include <sys/kthread.h>
41#include <sys/ktr.h>
42#include <sys/limits.h>
43#include <sys/lock.h>
44#include <sys/malloc.h>
45#include <sys/mutex.h>
46#include <sys/proc.h>
47#include <sys/random.h>
48#include <sys/resourcevar.h>
49#include <sys/sched.h>
50#include <sys/sysctl.h>
51#include <sys/unistd.h>
52
53#include <sys/cons.h> /* cinit() */
54#include <sys/kdb.h>
55#include <sys/reboot.h>
56#include <sys/queue.h>
57#include <sys/smp.h>
58#include <sys/timetc.h>
59
60#include <vm/vm.h>
61#include <vm/vm_page.h>
62
63#include <machine/cpu.h>
64#include <machine/cpufunc.h>
65#include <machine/cpuinfo.h>
66#include <machine/cpuregs.h>
67#include <machine/frame.h>
68#include <machine/hwfunc.h>
69#include <machine/md_var.h>
70#include <machine/asm.h>
71#include <machine/pmap.h>
72#include <machine/trap.h>
73#include <machine/clock.h>
74#include <machine/fls64.h>
75#include <machine/intr_machdep.h>
76#include <machine/smp.h>
77#include <mips/rmi/rmi_mips_exts.h>
78
79#include <mips/rmi/iomap.h>
80#include <mips/rmi/clock.h>
81#include <mips/rmi/msgring.h>
82#include <mips/rmi/xlrconfig.h>
83#include <mips/rmi/interrupt.h>
84#include <mips/rmi/pic.h>
85
86#ifdef XLR_PERFMON
87#include <mips/rmi/perfmon.h>
88#endif
89
90
91
92void platform_prep_smp_launch(void);
93
94unsigned long xlr_io_base = (unsigned long)(DEFAULT_XLR_IO_BASE);
95
96/* 4KB static data aread to keep a copy of the bootload env until
97 the dynamic kenv is setup */
98char boot1_env[4096];
99extern unsigned long _gp;
100int rmi_spin_mutex_safe=0;
101/*
102 * Parameters from boot loader
103 */
104struct boot1_info xlr_boot1_info;
105struct xlr_loader_info xlr_loader_info; /* FIXME : Unused */
106int xlr_run_mode;
107int xlr_argc;
108char **xlr_argv, **xlr_envp;
109uint64_t cpu_mask_info;
110uint32_t xlr_online_cpumask;
111
112#ifdef SMP
113static unsigned long xlr_secondary_gp[MAXCPU];
114static unsigned long xlr_secondary_sp[MAXCPU];
115
116#endif
117extern int mips_cpu_online_mask;
118extern int mips_cpu_logical_mask;
119uint32_t cpu_ltop_map[MAXCPU];
120uint32_t cpu_ptol_map[MAXCPU];
121uint32_t xlr_core_cpu_mask = 0x1; /* Core 0 thread 0 is always there */
122
123void
124platform_reset(void)
125{
126 /* FIXME : use proper define */
127 u_int32_t *mmio = (u_int32_t *) 0xbef18000;
128
129 printf("Rebooting the system now\n");
130 mmio[8] = 0x1;
131}
132
133void
134platform_secondary_init(void)
135{
136#ifdef SMP
137 xlr_msgring_cpu_init();
138
139 /* Setup interrupts for secondary CPUs here */
140 mips_mask_hard_irq(IPI_SMP_CALL_FUNCTION);
141 mips_mask_hard_irq(IPI_STOP);
142 mips_mask_hard_irq(IPI_RENDEZVOUS);
143 mips_mask_hard_irq(IPI_AST);
144 mips_mask_hard_irq(IRQ_TIMER);
145#ifdef XLR_PERFMON
146 mips_mask_hard_irq(IPI_PERFMON);
147#endif
148
149 return;
150#endif
151}
152
153
154int xlr_asid_pcpu = 256; /* This the default */
155int xlr_shtlb_enabled = 0;
156
157/* This function sets up the number of tlb entries available
158 to the kernel based on the number of threads brought up.
159 The ASID range also gets divided similarly.
160 THE NUMBER OF THREADS BROUGHT UP IN EACH CORE MUST BE THE SAME
161NOTE: This function will mark all 64TLB entries as available
162to the threads brought up in the core. If kernel is brought with say mask
1630x33333333, no TLBs will be available to the threads in each core.
164*/
165static void
166setup_tlb_resource(void)
167{
168 int mmu_setup;
169 int value = 0;
170 uint32_t cpu_map = xlr_boot1_info.cpu_online_map;
171 uint32_t thr_mask = cpu_map >> (xlr_cpu_id() << 2);
172 uint8_t core0 = xlr_boot1_info.cpu_online_map & 0xf;
173 uint8_t core_thr_mask;
174 int i = 0, count = 0;
175
176 /* If CPU0 did not enable shared TLB, other cores need to follow */
177 if ((xlr_cpu_id() != 0) && (xlr_shtlb_enabled == 0))
178 return;
179 /* First check if each core is brought up with the same mask */
180 for (i = 1; i < 8; i++) {
181 core_thr_mask = cpu_map >> (i << 2);
182 core_thr_mask &= 0xf;
183 if (core_thr_mask && core_thr_mask != core0) {
184 printf
185 ("Each core must be brought with same cpu mask\n");
186 printf("Cannot enabled shared TLB. ");
187 printf("Falling back to split TLB mode\n");
188 return;
189 }
190 }
191
192 xlr_shtlb_enabled = 1;
193 for (i = 0; i < 4; i++)
194 if (thr_mask & (1 << i))
195 count++;
196 switch (count) {
197 case 1:
198 xlr_asid_pcpu = 256;
199 break;
200 case 2:
201 xlr_asid_pcpu = 128;
202 value = 0x2;
203 break;
204 default:
205 xlr_asid_pcpu = 64;
206 value = 0x3;
207 break;
208 }
209
210 mmu_setup = read_32bit_phnx_ctrl_reg(4, 0);
211 mmu_setup = mmu_setup & ~0x06;
212 mmu_setup |= (value << 1);
213
214 /* turn on global mode */
215 mmu_setup |= 0x01;
216
217 write_32bit_phnx_ctrl_reg(4, 0, mmu_setup);
218
219}
220
221/*
222 * Platform specific register setup for CPUs
223 * XLR has control registers accessible with MFCR/MTCR instructions, this
224 * code initialized them from the environment variable xlr.cr of form:
225 * xlr.cr=reg:val[,reg:val]*, all values in hex.
226 * To enable shared TLB option use xlr.shtlb=1
227 */
228void
229platform_cpu_init()
230{
231 char *hw_env;
232 char *start, *end;
233 uint32_t reg, val;
234 int thr_id = xlr_thr_id();
235
236 if (thr_id == 0) {
237 if ((hw_env = getenv("xlr.shtlb")) != NULL) {
238 start = hw_env;
239 reg = strtoul(start, &end, 16);
240 if (start != end && reg != 0)
241 setup_tlb_resource();
242 } else {
243 /* By default TLB entries are shared in a core */
244 setup_tlb_resource();
245 }
246 }
247 if ((hw_env = getenv("xlr.cr")) == NULL)
248 return;
249
250 start = hw_env;
251 while (*start != '\0') {
252 reg = strtoul(start, &end, 16);
253 if (start == end) {
254 printf("Invalid value in xlr.cr %s, cannot read a hex value at %d\n",
255 hw_env, start - hw_env);
256 goto err_return;
257 }
258 if (*end != ':') {
259 printf("Invalid format in xlr.cr %s, ':' expected at pos %d\n",
260 hw_env, end - hw_env);
261 goto err_return;
262 }
263 start = end + 1;/* step over ':' */
264 val = strtoul(start, &end, 16);
265 if (start == end) {
266 printf("Invalid value in xlr.cr %s, cannot read a hex value at pos %d\n",
267 hw_env, start - hw_env);
268 goto err_return;
269 }
270 if (*end != ',' && *end != '\0') {
271 printf("Invalid format in xlr.cr %s, ',' expected at pos %d\n",
272 hw_env, end - hw_env);
273 goto err_return;
274 }
275 xlr_mtcr(reg, val);
276 if (*end == ',')
277 start = end + 1; /* skip over ',' */
278 else
279 start = end;
280 }
281 freeenv(hw_env);
282 return;
283
284err_return:
285 panic("Invalid xlr.cr setting!");
286 return;
287}
288
289
290#ifdef SMP
291extern void xlr_secondary_start(unsigned long, unsigned long, unsigned long);
292static void
293xlr_secondary_entry(void *data)
294{
295 unsigned long sp, gp;
296 unsigned int cpu = (xlr_cpu_id() << 2) + xlr_thr_id();
297
298 sp = xlr_secondary_sp[cpu];
299 gp = xlr_secondary_gp[cpu];
300
301 xlr_secondary_start((unsigned long)mips_secondary_wait, sp, gp);
302}
303
304#endif
305
306static void
307xlr_set_boot_flags(void)
308{
309 char *p;
310
311 for (p = getenv("boot_flags"); p && *p != '\0'; p++) {
312 switch (*p) {
313 case 'd':
314 case 'D':
315 boothowto |= RB_KDB;
316 break;
317 case 'g':
318 case 'G':
319 boothowto |= RB_GDB;
320 break;
321 case 'v':
322 case 'V':
323 boothowto |= RB_VERBOSE;
324 break;
325
326 case 's': /* single-user (default, supported for sanity) */
327 case 'S':
328 boothowto |= RB_SINGLE;
329 break;
330
331 default:
332 printf("Unrecognized boot flag '%c'.\n", *p);
333 break;
334 }
335 }
336
337 if (p)
338 freeenv(p);
339
340 return;
341}
342extern uint32_t _end;
343
344
345static void
346mips_init(void)
347{
348 init_param1();
349 init_param2(physmem);
350
351 /* XXX: Catch 22. Something touches the tlb. */
352
353 mips_cpu_init();
354 pmap_bootstrap();
355
356 mips_proc0_init();
357 write_c0_register32(MIPS_COP_0_OSSCRATCH, 7, pcpup->pc_curthread);
358
359 mutex_init();
360
361 PMAP_LOCK_INIT(kernel_pmap);
362
363#ifdef DDB
364#ifdef SMP
365 setup_nmi();
366#endif /* SMP */
367 kdb_init();
368 if (boothowto & RB_KDB) {
369 kdb_enter("Boot flags requested debugger", NULL);
370 }
371#endif
372}
373
374void
375platform_start(__register_t a0 __unused,
376 __register_t a1 __unused,
377 __register_t a2 __unused,
378 __register_t a3 __unused)
379{
380 vm_size_t physsz = 0;
381 int i, j;
382 struct xlr_boot1_mem_map *boot_map;
383#ifdef SMP
384 uint32_t tmp;
385 void (*wakeup) (void *, void *, unsigned int);
386
387#endif
388 /* XXX no zeroing of BSS? */
389
390 /* Initialize pcpu stuff */
1/*-
2 * Copyright (c) 2006-2009 RMI Corporation
3 * Copyright (c) 2002-2004 Juli Mallett <jmallett@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 */
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD$");
30
31#include "opt_ddb.h"
32
33#include <sys/param.h>
34#include <sys/bus.h>
35#include <sys/conf.h>
36#include <sys/rtprio.h>
37#include <sys/systm.h>
38#include <sys/interrupt.h>
39#include <sys/kernel.h>
40#include <sys/kthread.h>
41#include <sys/ktr.h>
42#include <sys/limits.h>
43#include <sys/lock.h>
44#include <sys/malloc.h>
45#include <sys/mutex.h>
46#include <sys/proc.h>
47#include <sys/random.h>
48#include <sys/resourcevar.h>
49#include <sys/sched.h>
50#include <sys/sysctl.h>
51#include <sys/unistd.h>
52
53#include <sys/cons.h> /* cinit() */
54#include <sys/kdb.h>
55#include <sys/reboot.h>
56#include <sys/queue.h>
57#include <sys/smp.h>
58#include <sys/timetc.h>
59
60#include <vm/vm.h>
61#include <vm/vm_page.h>
62
63#include <machine/cpu.h>
64#include <machine/cpufunc.h>
65#include <machine/cpuinfo.h>
66#include <machine/cpuregs.h>
67#include <machine/frame.h>
68#include <machine/hwfunc.h>
69#include <machine/md_var.h>
70#include <machine/asm.h>
71#include <machine/pmap.h>
72#include <machine/trap.h>
73#include <machine/clock.h>
74#include <machine/fls64.h>
75#include <machine/intr_machdep.h>
76#include <machine/smp.h>
77#include <mips/rmi/rmi_mips_exts.h>
78
79#include <mips/rmi/iomap.h>
80#include <mips/rmi/clock.h>
81#include <mips/rmi/msgring.h>
82#include <mips/rmi/xlrconfig.h>
83#include <mips/rmi/interrupt.h>
84#include <mips/rmi/pic.h>
85
86#ifdef XLR_PERFMON
87#include <mips/rmi/perfmon.h>
88#endif
89
90
91
92void platform_prep_smp_launch(void);
93
94unsigned long xlr_io_base = (unsigned long)(DEFAULT_XLR_IO_BASE);
95
96/* 4KB static data aread to keep a copy of the bootload env until
97 the dynamic kenv is setup */
98char boot1_env[4096];
99extern unsigned long _gp;
100int rmi_spin_mutex_safe=0;
101/*
102 * Parameters from boot loader
103 */
104struct boot1_info xlr_boot1_info;
105struct xlr_loader_info xlr_loader_info; /* FIXME : Unused */
106int xlr_run_mode;
107int xlr_argc;
108char **xlr_argv, **xlr_envp;
109uint64_t cpu_mask_info;
110uint32_t xlr_online_cpumask;
111
112#ifdef SMP
113static unsigned long xlr_secondary_gp[MAXCPU];
114static unsigned long xlr_secondary_sp[MAXCPU];
115
116#endif
117extern int mips_cpu_online_mask;
118extern int mips_cpu_logical_mask;
119uint32_t cpu_ltop_map[MAXCPU];
120uint32_t cpu_ptol_map[MAXCPU];
121uint32_t xlr_core_cpu_mask = 0x1; /* Core 0 thread 0 is always there */
122
123void
124platform_reset(void)
125{
126 /* FIXME : use proper define */
127 u_int32_t *mmio = (u_int32_t *) 0xbef18000;
128
129 printf("Rebooting the system now\n");
130 mmio[8] = 0x1;
131}
132
133void
134platform_secondary_init(void)
135{
136#ifdef SMP
137 xlr_msgring_cpu_init();
138
139 /* Setup interrupts for secondary CPUs here */
140 mips_mask_hard_irq(IPI_SMP_CALL_FUNCTION);
141 mips_mask_hard_irq(IPI_STOP);
142 mips_mask_hard_irq(IPI_RENDEZVOUS);
143 mips_mask_hard_irq(IPI_AST);
144 mips_mask_hard_irq(IRQ_TIMER);
145#ifdef XLR_PERFMON
146 mips_mask_hard_irq(IPI_PERFMON);
147#endif
148
149 return;
150#endif
151}
152
153
154int xlr_asid_pcpu = 256; /* This the default */
155int xlr_shtlb_enabled = 0;
156
157/* This function sets up the number of tlb entries available
158 to the kernel based on the number of threads brought up.
159 The ASID range also gets divided similarly.
160 THE NUMBER OF THREADS BROUGHT UP IN EACH CORE MUST BE THE SAME
161NOTE: This function will mark all 64TLB entries as available
162to the threads brought up in the core. If kernel is brought with say mask
1630x33333333, no TLBs will be available to the threads in each core.
164*/
165static void
166setup_tlb_resource(void)
167{
168 int mmu_setup;
169 int value = 0;
170 uint32_t cpu_map = xlr_boot1_info.cpu_online_map;
171 uint32_t thr_mask = cpu_map >> (xlr_cpu_id() << 2);
172 uint8_t core0 = xlr_boot1_info.cpu_online_map & 0xf;
173 uint8_t core_thr_mask;
174 int i = 0, count = 0;
175
176 /* If CPU0 did not enable shared TLB, other cores need to follow */
177 if ((xlr_cpu_id() != 0) && (xlr_shtlb_enabled == 0))
178 return;
179 /* First check if each core is brought up with the same mask */
180 for (i = 1; i < 8; i++) {
181 core_thr_mask = cpu_map >> (i << 2);
182 core_thr_mask &= 0xf;
183 if (core_thr_mask && core_thr_mask != core0) {
184 printf
185 ("Each core must be brought with same cpu mask\n");
186 printf("Cannot enabled shared TLB. ");
187 printf("Falling back to split TLB mode\n");
188 return;
189 }
190 }
191
192 xlr_shtlb_enabled = 1;
193 for (i = 0; i < 4; i++)
194 if (thr_mask & (1 << i))
195 count++;
196 switch (count) {
197 case 1:
198 xlr_asid_pcpu = 256;
199 break;
200 case 2:
201 xlr_asid_pcpu = 128;
202 value = 0x2;
203 break;
204 default:
205 xlr_asid_pcpu = 64;
206 value = 0x3;
207 break;
208 }
209
210 mmu_setup = read_32bit_phnx_ctrl_reg(4, 0);
211 mmu_setup = mmu_setup & ~0x06;
212 mmu_setup |= (value << 1);
213
214 /* turn on global mode */
215 mmu_setup |= 0x01;
216
217 write_32bit_phnx_ctrl_reg(4, 0, mmu_setup);
218
219}
220
221/*
222 * Platform specific register setup for CPUs
223 * XLR has control registers accessible with MFCR/MTCR instructions, this
224 * code initialized them from the environment variable xlr.cr of form:
225 * xlr.cr=reg:val[,reg:val]*, all values in hex.
226 * To enable shared TLB option use xlr.shtlb=1
227 */
228void
229platform_cpu_init()
230{
231 char *hw_env;
232 char *start, *end;
233 uint32_t reg, val;
234 int thr_id = xlr_thr_id();
235
236 if (thr_id == 0) {
237 if ((hw_env = getenv("xlr.shtlb")) != NULL) {
238 start = hw_env;
239 reg = strtoul(start, &end, 16);
240 if (start != end && reg != 0)
241 setup_tlb_resource();
242 } else {
243 /* By default TLB entries are shared in a core */
244 setup_tlb_resource();
245 }
246 }
247 if ((hw_env = getenv("xlr.cr")) == NULL)
248 return;
249
250 start = hw_env;
251 while (*start != '\0') {
252 reg = strtoul(start, &end, 16);
253 if (start == end) {
254 printf("Invalid value in xlr.cr %s, cannot read a hex value at %d\n",
255 hw_env, start - hw_env);
256 goto err_return;
257 }
258 if (*end != ':') {
259 printf("Invalid format in xlr.cr %s, ':' expected at pos %d\n",
260 hw_env, end - hw_env);
261 goto err_return;
262 }
263 start = end + 1;/* step over ':' */
264 val = strtoul(start, &end, 16);
265 if (start == end) {
266 printf("Invalid value in xlr.cr %s, cannot read a hex value at pos %d\n",
267 hw_env, start - hw_env);
268 goto err_return;
269 }
270 if (*end != ',' && *end != '\0') {
271 printf("Invalid format in xlr.cr %s, ',' expected at pos %d\n",
272 hw_env, end - hw_env);
273 goto err_return;
274 }
275 xlr_mtcr(reg, val);
276 if (*end == ',')
277 start = end + 1; /* skip over ',' */
278 else
279 start = end;
280 }
281 freeenv(hw_env);
282 return;
283
284err_return:
285 panic("Invalid xlr.cr setting!");
286 return;
287}
288
289
290#ifdef SMP
291extern void xlr_secondary_start(unsigned long, unsigned long, unsigned long);
292static void
293xlr_secondary_entry(void *data)
294{
295 unsigned long sp, gp;
296 unsigned int cpu = (xlr_cpu_id() << 2) + xlr_thr_id();
297
298 sp = xlr_secondary_sp[cpu];
299 gp = xlr_secondary_gp[cpu];
300
301 xlr_secondary_start((unsigned long)mips_secondary_wait, sp, gp);
302}
303
304#endif
305
306static void
307xlr_set_boot_flags(void)
308{
309 char *p;
310
311 for (p = getenv("boot_flags"); p && *p != '\0'; p++) {
312 switch (*p) {
313 case 'd':
314 case 'D':
315 boothowto |= RB_KDB;
316 break;
317 case 'g':
318 case 'G':
319 boothowto |= RB_GDB;
320 break;
321 case 'v':
322 case 'V':
323 boothowto |= RB_VERBOSE;
324 break;
325
326 case 's': /* single-user (default, supported for sanity) */
327 case 'S':
328 boothowto |= RB_SINGLE;
329 break;
330
331 default:
332 printf("Unrecognized boot flag '%c'.\n", *p);
333 break;
334 }
335 }
336
337 if (p)
338 freeenv(p);
339
340 return;
341}
342extern uint32_t _end;
343
344
345static void
346mips_init(void)
347{
348 init_param1();
349 init_param2(physmem);
350
351 /* XXX: Catch 22. Something touches the tlb. */
352
353 mips_cpu_init();
354 pmap_bootstrap();
355
356 mips_proc0_init();
357 write_c0_register32(MIPS_COP_0_OSSCRATCH, 7, pcpup->pc_curthread);
358
359 mutex_init();
360
361 PMAP_LOCK_INIT(kernel_pmap);
362
363#ifdef DDB
364#ifdef SMP
365 setup_nmi();
366#endif /* SMP */
367 kdb_init();
368 if (boothowto & RB_KDB) {
369 kdb_enter("Boot flags requested debugger", NULL);
370 }
371#endif
372}
373
374void
375platform_start(__register_t a0 __unused,
376 __register_t a1 __unused,
377 __register_t a2 __unused,
378 __register_t a3 __unused)
379{
380 vm_size_t physsz = 0;
381 int i, j;
382 struct xlr_boot1_mem_map *boot_map;
383#ifdef SMP
384 uint32_t tmp;
385 void (*wakeup) (void *, void *, unsigned int);
386
387#endif
388 /* XXX no zeroing of BSS? */
389
390 /* Initialize pcpu stuff */
391 mips_pcpu_init();
391 mips_pcpu0_init();
392
393 /* XXX FIXME the code below is not 64 bit clean */
394 /* Save boot loader and other stuff from scratch regs */
395 xlr_boot1_info = *(struct boot1_info *)read_c0_register32(MIPS_COP_0_OSSCRATCH, 0);
396 cpu_mask_info = read_c0_register64(MIPS_COP_0_OSSCRATCH, 1);
397 xlr_online_cpumask = read_c0_register32(MIPS_COP_0_OSSCRATCH, 2);
398 xlr_run_mode = read_c0_register32(MIPS_COP_0_OSSCRATCH, 3);
399 xlr_argc = read_c0_register32(MIPS_COP_0_OSSCRATCH, 4);
400 xlr_argv = (char **)read_c0_register32(MIPS_COP_0_OSSCRATCH, 5);
401 xlr_envp = (char **)read_c0_register32(MIPS_COP_0_OSSCRATCH, 6);
402
403 /* TODO: Verify the magic number here */
404 /* FIXMELATER: xlr_boot1_info.magic_number */
405
406 /* initialize console so that we have printf */
407 boothowto |= (RB_SERIAL | RB_MULTIPLE); /* Use multiple consoles */
408
409 /* clockrate used by delay, so initialize it here */
410 cpu_clock = xlr_boot1_info.cpu_frequency / 1000000;
411
412 /*
413 * Note the time counter on CPU0 runs not at system clock speed, but
414 * at PIC time counter speed (which is returned by
415 * platform_get_frequency(). Thus we do not use
416 * xlr_boot1_info.cpu_frequency here.
417 */
418 mips_timer_early_init(platform_get_frequency());
419
420 /* Init the time counter in the PIC and local putc routine*/
421 rmi_early_counter_init();
422
423 /* Init console please */
424 cninit();
425 init_static_kenv(boot1_env, sizeof(boot1_env));
426 printf("Environment (from %d args):\n", xlr_argc - 1);
427 if (xlr_argc == 1)
428 printf("\tNone\n");
429 for (i = 1; i < xlr_argc; i++) {
430 char *n;
431
432 printf("\t%s\n", xlr_argv[i]);
433 n = strsep(&xlr_argv[i], "=");
434 if (xlr_argv[i] == NULL)
435 setenv(n, "1");
436 else
437 setenv(n, xlr_argv[i]);
438 }
439
440 xlr_set_boot_flags();
441
442 /* get physical memory info from boot loader */
443 boot_map = (struct xlr_boot1_mem_map *)
444 (unsigned long)xlr_boot1_info.psb_mem_map;
445 for (i = 0, j = 0; i < boot_map->num_entries; i++, j += 2) {
446 if (boot_map->physmem_map[i].type == BOOT1_MEM_RAM) {
447 if (j == 14) {
448 printf("*** ERROR *** memory map too large ***\n");
449 break;
450 }
451 if (j == 0) {
452 /* TODO FIXME */
453 /* start after kernel end */
454 phys_avail[0] = (vm_paddr_t)
455 MIPS_KSEG0_TO_PHYS(&_end) + 0x20000;
456 /* boot loader start */
457 /* HACK to Use bootloaders memory region */
458 /* TODO FIXME */
459 if (boot_map->physmem_map[0].size == 0x0c000000) {
460 boot_map->physmem_map[0].size = 0x0ff00000;
461 }
462 phys_avail[1] = boot_map->physmem_map[0].addr +
463 boot_map->physmem_map[0].size;
464
465 } else {
466/*
467 * Can't use this code yet, because most of the fixed allocations happen from
468 * the biggest physical area. If we have more than 512M memory the kernel will try
469 * to map from the second are which is not in KSEG0 and not mapped
470 */
471 phys_avail[j] = (vm_paddr_t)
472 boot_map->physmem_map[i].addr;
473 phys_avail[j + 1] = phys_avail[j] +
474 boot_map->physmem_map[i].size;
475#if 0 /* FIXME TOD0 */
476 phys_avail[j] = phys_avail[j + 1] = 0;
477#endif
478 }
479 physsz += boot_map->physmem_map[i].size;
480 }
481 }
482
483 /* FIXME XLR TODO */
484 phys_avail[j] = phys_avail[j + 1] = 0;
485 realmem = physmem = btoc(physsz);
486
487 /* Store pcpu in scratch 5 */
488 write_c0_register32(MIPS_COP_0_OSSCRATCH, 5, pcpup);
489
490 /* Set up hz, among others. */
491 mips_init();
492
493#ifdef SMP
494 /*
495 * If thread 0 of any core is not available then mark whole core as
496 * not available
497 */
498 tmp = xlr_boot1_info.cpu_online_map;
499 for (i = 4; i < MAXCPU; i += 4) {
500 if ((tmp & (0xf << i)) && !(tmp & (0x1 << i))) {
501 /*
502 * Oopps.. thread 0 is not available. Disable whole
503 * core
504 */
505 tmp = tmp & ~(0xf << i);
506 printf("WARNING: Core %d is disabled because thread 0"
507 " of this core is not enabled.\n", i / 4);
508 }
509 }
510 xlr_boot1_info.cpu_online_map = tmp;
511
512 /* Wakeup Other cpus, and put them in bsd park code. */
513 for (i = 1, j = 1; i < 32; i++) {
514 /* Allocate stack for all other cpus from fbsd kseg0 memory. */
515 if ((1U << i) & xlr_boot1_info.cpu_online_map) {
516 xlr_secondary_gp[i] =
517 pmap_steal_memory(PAGE_SIZE);
518 if (!xlr_secondary_gp[i])
519 panic("Allocation failed for secondary cpu stacks");
520 xlr_secondary_sp[i] =
521 xlr_secondary_gp[i] + PAGE_SIZE - CALLFRAME_SIZ;
522 xlr_secondary_gp[i] = (unsigned long)&_gp;
523 /* Build ltop and ptol cpu map. */
524 cpu_ltop_map[j] = i;
525 cpu_ptol_map[i] = j;
526 if ((i & 0x3) == 0) /* store thread0 of each core */
527 xlr_core_cpu_mask |= (1 << j);
528 mips_cpu_logical_mask |= (1 << j);
529 j++;
530 }
531 }
532
533 mips_cpu_online_mask |= xlr_boot1_info.cpu_online_map;
534 wakeup = ((void (*) (void *, void *, unsigned int))
535 (unsigned long)(xlr_boot1_info.wakeup));
536 printf("Waking up CPUs 0x%llx.\n", xlr_boot1_info.cpu_online_map & ~(0x1U));
537 if (xlr_boot1_info.cpu_online_map & ~(0x1U))
538 wakeup(xlr_secondary_entry, 0,
539 (unsigned int)xlr_boot1_info.cpu_online_map);
540#endif
541
542 /* xlr specific post initialization */
543 /*
544 * The expectation is that mutex_init() is already done in
545 * mips_init() XXX NOTE: We may need to move this to SMP based init
546 * code for each CPU, later.
547 */
548 rmi_spin_mutex_safe = 1;
549 on_chip_init();
550 mips_timer_init_params(platform_get_frequency(), 0);
551 printf("Platform specific startup now completes\n");
552}
553
554void
555platform_identify(void)
556{
557 printf("Board [%d:%d], processor 0x%08x\n", (int)xlr_boot1_info.board_major_version,
558 (int)xlr_boot1_info.board_minor_version, mips_rd_prid());
559
560
561}
562
563/*
564 * XXX Maybe return the state of the watchdog in enter, and pass it to
565 * exit? Like spl().
566 */
567void
568platform_trap_enter(void)
569{
570}
571
572void
573platform_trap_exit(void)
574{
575}
576
577
578/*
579 void
580 platform_update_intrmask(int intr)
581 {
582 write_c0_eimr64(read_c0_eimr64() | (1ULL<<intr));
583 }
584*/
585
586void
587disable_msgring_int(void *arg);
588void
589enable_msgring_int(void *arg);
590void xlr_msgring_handler(struct trapframe *tf);
591void msgring_process_fast_intr(void *arg);
592
593struct msgring_ithread {
594 struct thread *i_thread;
595 u_int i_pending;
596 u_int i_flags;
597 int i_cpu;
598};
599struct msgring_ithread msgring_ithreads[MAXCPU];
600char ithd_name[MAXCPU][32];
601
602void
603msgring_process_fast_intr(void *arg)
604{
605 int cpu = PCPU_GET(cpuid);
606 volatile struct msgring_ithread *it;
607 struct proc *p;
608 struct thread *td;
609
610 /* wakeup an appropriate intr_thread for processing this interrupt */
611 it = (volatile struct msgring_ithread *)&msgring_ithreads[cpu];
612 td = it->i_thread;
613 p = td->td_proc;
614
615 /*
616 * Interrupt thread will enable the interrupts after processing all
617 * messages
618 */
619 disable_msgring_int(NULL);
620 it->i_pending = 1;
621 if (TD_AWAITING_INTR(td)) {
622 thread_lock(td);
623 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
624 p->p_comm);
625 TD_CLR_IWAIT(td);
626 sched_add(td, SRQ_INTR);
627 thread_unlock(td);
628 } else {
629 CTR4(KTR_INTR, "%s: pid %d (%s): state %d",
630 __func__, p->p_pid, p->p_comm, td->td_state);
631 }
632
633}
634
635#define MIT_DEAD 4
636static void
637msgring_process(void *arg)
638{
639 volatile struct msgring_ithread *ithd;
640 struct thread *td;
641 struct proc *p;
642
643 td = curthread;
644 p = td->td_proc;
645 ithd = (volatile struct msgring_ithread *)arg;
646 KASSERT(ithd->i_thread == td,
647 ("%s:msg_ithread and proc linkage out of sync", __func__));
648
649 /* First bind this thread to the right CPU */
650 thread_lock(td);
651 sched_bind(td, ithd->i_cpu);
652 thread_unlock(td);
653
654 //printf("Started %s on CPU %d\n", __FUNCTION__, ithd->i_cpu);
655
656 while (1) {
657 if (ithd->i_flags & MIT_DEAD) {
658 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
659 p->p_pid, p->p_comm);
660 kthread_exit();
661 }
662 while (ithd->i_pending) {
663 /*
664 * This might need a full read and write barrier to
665 * make sure that this write posts before any of the
666 * memory or device accesses in the handlers.
667 */
668 atomic_store_rel_int(&ithd->i_pending, 0);
669 xlr_msgring_handler(NULL);
670 }
671 if (!ithd->i_pending && !(ithd->i_flags & MIT_DEAD)) {
672 thread_lock(td);
673 sched_class(td, PRI_ITHD);
674 TD_SET_IWAIT(td);
675 thread_unlock(td);
676 enable_msgring_int(NULL);
677 mi_switch(SW_VOL, NULL);
678 }
679 }
680
681}
682void
683platform_prep_smp_launch(void)
684{
685 int cpu;
686 uint32_t cpu_mask;
687 struct msgring_ithread *ithd;
688 struct thread *td;
689 struct proc *p;
690 int error;
691
692 cpu_mask = PCPU_GET(cpumask) | PCPU_GET(other_cpus);
693
694 /* Create kernel threads for message ring interrupt processing */
695 /* Currently create one task for thread 0 of each core */
696 for (cpu = 0; cpu < MAXCPU; cpu += 1) {
697
698 if (!((1 << cpu) & cpu_mask))
699 continue;
700
701 if ((cpu_ltop_map[cpu] % 4) != 0)
702 continue;
703
704 ithd = &msgring_ithreads[cpu];
705 sprintf(ithd_name[cpu], "msg_intr%d", cpu);
706 error = kproc_create(msgring_process,
707 (void *)ithd,
708 &p,
709 (RFSTOPPED | RFHIGHPID),
710 2,
711 ithd_name[cpu]);
712
713 if (error)
714 panic("kproc_create() failed with %d", error);
715 td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */
716
717 thread_lock(td);
718 sched_class(td, PRI_ITHD);
719 TD_SET_IWAIT(td);
720 thread_unlock(td);
721 ithd->i_thread = td;
722 ithd->i_pending = 0;
723 ithd->i_cpu = cpu;
724 CTR2(KTR_INTR, "%s: created %s", __func__, ithd_name[cpu]);
725 }
726}
392
393 /* XXX FIXME the code below is not 64 bit clean */
394 /* Save boot loader and other stuff from scratch regs */
395 xlr_boot1_info = *(struct boot1_info *)read_c0_register32(MIPS_COP_0_OSSCRATCH, 0);
396 cpu_mask_info = read_c0_register64(MIPS_COP_0_OSSCRATCH, 1);
397 xlr_online_cpumask = read_c0_register32(MIPS_COP_0_OSSCRATCH, 2);
398 xlr_run_mode = read_c0_register32(MIPS_COP_0_OSSCRATCH, 3);
399 xlr_argc = read_c0_register32(MIPS_COP_0_OSSCRATCH, 4);
400 xlr_argv = (char **)read_c0_register32(MIPS_COP_0_OSSCRATCH, 5);
401 xlr_envp = (char **)read_c0_register32(MIPS_COP_0_OSSCRATCH, 6);
402
403 /* TODO: Verify the magic number here */
404 /* FIXMELATER: xlr_boot1_info.magic_number */
405
406 /* initialize console so that we have printf */
407 boothowto |= (RB_SERIAL | RB_MULTIPLE); /* Use multiple consoles */
408
409 /* clockrate used by delay, so initialize it here */
410 cpu_clock = xlr_boot1_info.cpu_frequency / 1000000;
411
412 /*
413 * Note the time counter on CPU0 runs not at system clock speed, but
414 * at PIC time counter speed (which is returned by
415 * platform_get_frequency(). Thus we do not use
416 * xlr_boot1_info.cpu_frequency here.
417 */
418 mips_timer_early_init(platform_get_frequency());
419
420 /* Init the time counter in the PIC and local putc routine*/
421 rmi_early_counter_init();
422
423 /* Init console please */
424 cninit();
425 init_static_kenv(boot1_env, sizeof(boot1_env));
426 printf("Environment (from %d args):\n", xlr_argc - 1);
427 if (xlr_argc == 1)
428 printf("\tNone\n");
429 for (i = 1; i < xlr_argc; i++) {
430 char *n;
431
432 printf("\t%s\n", xlr_argv[i]);
433 n = strsep(&xlr_argv[i], "=");
434 if (xlr_argv[i] == NULL)
435 setenv(n, "1");
436 else
437 setenv(n, xlr_argv[i]);
438 }
439
440 xlr_set_boot_flags();
441
442 /* get physical memory info from boot loader */
443 boot_map = (struct xlr_boot1_mem_map *)
444 (unsigned long)xlr_boot1_info.psb_mem_map;
445 for (i = 0, j = 0; i < boot_map->num_entries; i++, j += 2) {
446 if (boot_map->physmem_map[i].type == BOOT1_MEM_RAM) {
447 if (j == 14) {
448 printf("*** ERROR *** memory map too large ***\n");
449 break;
450 }
451 if (j == 0) {
452 /* TODO FIXME */
453 /* start after kernel end */
454 phys_avail[0] = (vm_paddr_t)
455 MIPS_KSEG0_TO_PHYS(&_end) + 0x20000;
456 /* boot loader start */
457 /* HACK to Use bootloaders memory region */
458 /* TODO FIXME */
459 if (boot_map->physmem_map[0].size == 0x0c000000) {
460 boot_map->physmem_map[0].size = 0x0ff00000;
461 }
462 phys_avail[1] = boot_map->physmem_map[0].addr +
463 boot_map->physmem_map[0].size;
464
465 } else {
466/*
467 * Can't use this code yet, because most of the fixed allocations happen from
468 * the biggest physical area. If we have more than 512M memory the kernel will try
469 * to map from the second are which is not in KSEG0 and not mapped
470 */
471 phys_avail[j] = (vm_paddr_t)
472 boot_map->physmem_map[i].addr;
473 phys_avail[j + 1] = phys_avail[j] +
474 boot_map->physmem_map[i].size;
475#if 0 /* FIXME TOD0 */
476 phys_avail[j] = phys_avail[j + 1] = 0;
477#endif
478 }
479 physsz += boot_map->physmem_map[i].size;
480 }
481 }
482
483 /* FIXME XLR TODO */
484 phys_avail[j] = phys_avail[j + 1] = 0;
485 realmem = physmem = btoc(physsz);
486
487 /* Store pcpu in scratch 5 */
488 write_c0_register32(MIPS_COP_0_OSSCRATCH, 5, pcpup);
489
490 /* Set up hz, among others. */
491 mips_init();
492
493#ifdef SMP
494 /*
495 * If thread 0 of any core is not available then mark whole core as
496 * not available
497 */
498 tmp = xlr_boot1_info.cpu_online_map;
499 for (i = 4; i < MAXCPU; i += 4) {
500 if ((tmp & (0xf << i)) && !(tmp & (0x1 << i))) {
501 /*
502 * Oopps.. thread 0 is not available. Disable whole
503 * core
504 */
505 tmp = tmp & ~(0xf << i);
506 printf("WARNING: Core %d is disabled because thread 0"
507 " of this core is not enabled.\n", i / 4);
508 }
509 }
510 xlr_boot1_info.cpu_online_map = tmp;
511
512 /* Wakeup Other cpus, and put them in bsd park code. */
513 for (i = 1, j = 1; i < 32; i++) {
514 /* Allocate stack for all other cpus from fbsd kseg0 memory. */
515 if ((1U << i) & xlr_boot1_info.cpu_online_map) {
516 xlr_secondary_gp[i] =
517 pmap_steal_memory(PAGE_SIZE);
518 if (!xlr_secondary_gp[i])
519 panic("Allocation failed for secondary cpu stacks");
520 xlr_secondary_sp[i] =
521 xlr_secondary_gp[i] + PAGE_SIZE - CALLFRAME_SIZ;
522 xlr_secondary_gp[i] = (unsigned long)&_gp;
523 /* Build ltop and ptol cpu map. */
524 cpu_ltop_map[j] = i;
525 cpu_ptol_map[i] = j;
526 if ((i & 0x3) == 0) /* store thread0 of each core */
527 xlr_core_cpu_mask |= (1 << j);
528 mips_cpu_logical_mask |= (1 << j);
529 j++;
530 }
531 }
532
533 mips_cpu_online_mask |= xlr_boot1_info.cpu_online_map;
534 wakeup = ((void (*) (void *, void *, unsigned int))
535 (unsigned long)(xlr_boot1_info.wakeup));
536 printf("Waking up CPUs 0x%llx.\n", xlr_boot1_info.cpu_online_map & ~(0x1U));
537 if (xlr_boot1_info.cpu_online_map & ~(0x1U))
538 wakeup(xlr_secondary_entry, 0,
539 (unsigned int)xlr_boot1_info.cpu_online_map);
540#endif
541
542 /* xlr specific post initialization */
543 /*
544 * The expectation is that mutex_init() is already done in
545 * mips_init() XXX NOTE: We may need to move this to SMP based init
546 * code for each CPU, later.
547 */
548 rmi_spin_mutex_safe = 1;
549 on_chip_init();
550 mips_timer_init_params(platform_get_frequency(), 0);
551 printf("Platform specific startup now completes\n");
552}
553
554void
555platform_identify(void)
556{
557 printf("Board [%d:%d], processor 0x%08x\n", (int)xlr_boot1_info.board_major_version,
558 (int)xlr_boot1_info.board_minor_version, mips_rd_prid());
559
560
561}
562
563/*
564 * XXX Maybe return the state of the watchdog in enter, and pass it to
565 * exit? Like spl().
566 */
567void
568platform_trap_enter(void)
569{
570}
571
572void
573platform_trap_exit(void)
574{
575}
576
577
578/*
579 void
580 platform_update_intrmask(int intr)
581 {
582 write_c0_eimr64(read_c0_eimr64() | (1ULL<<intr));
583 }
584*/
585
586void
587disable_msgring_int(void *arg);
588void
589enable_msgring_int(void *arg);
590void xlr_msgring_handler(struct trapframe *tf);
591void msgring_process_fast_intr(void *arg);
592
593struct msgring_ithread {
594 struct thread *i_thread;
595 u_int i_pending;
596 u_int i_flags;
597 int i_cpu;
598};
599struct msgring_ithread msgring_ithreads[MAXCPU];
600char ithd_name[MAXCPU][32];
601
602void
603msgring_process_fast_intr(void *arg)
604{
605 int cpu = PCPU_GET(cpuid);
606 volatile struct msgring_ithread *it;
607 struct proc *p;
608 struct thread *td;
609
610 /* wakeup an appropriate intr_thread for processing this interrupt */
611 it = (volatile struct msgring_ithread *)&msgring_ithreads[cpu];
612 td = it->i_thread;
613 p = td->td_proc;
614
615 /*
616 * Interrupt thread will enable the interrupts after processing all
617 * messages
618 */
619 disable_msgring_int(NULL);
620 it->i_pending = 1;
621 if (TD_AWAITING_INTR(td)) {
622 thread_lock(td);
623 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
624 p->p_comm);
625 TD_CLR_IWAIT(td);
626 sched_add(td, SRQ_INTR);
627 thread_unlock(td);
628 } else {
629 CTR4(KTR_INTR, "%s: pid %d (%s): state %d",
630 __func__, p->p_pid, p->p_comm, td->td_state);
631 }
632
633}
634
635#define MIT_DEAD 4
636static void
637msgring_process(void *arg)
638{
639 volatile struct msgring_ithread *ithd;
640 struct thread *td;
641 struct proc *p;
642
643 td = curthread;
644 p = td->td_proc;
645 ithd = (volatile struct msgring_ithread *)arg;
646 KASSERT(ithd->i_thread == td,
647 ("%s:msg_ithread and proc linkage out of sync", __func__));
648
649 /* First bind this thread to the right CPU */
650 thread_lock(td);
651 sched_bind(td, ithd->i_cpu);
652 thread_unlock(td);
653
654 //printf("Started %s on CPU %d\n", __FUNCTION__, ithd->i_cpu);
655
656 while (1) {
657 if (ithd->i_flags & MIT_DEAD) {
658 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__,
659 p->p_pid, p->p_comm);
660 kthread_exit();
661 }
662 while (ithd->i_pending) {
663 /*
664 * This might need a full read and write barrier to
665 * make sure that this write posts before any of the
666 * memory or device accesses in the handlers.
667 */
668 atomic_store_rel_int(&ithd->i_pending, 0);
669 xlr_msgring_handler(NULL);
670 }
671 if (!ithd->i_pending && !(ithd->i_flags & MIT_DEAD)) {
672 thread_lock(td);
673 sched_class(td, PRI_ITHD);
674 TD_SET_IWAIT(td);
675 thread_unlock(td);
676 enable_msgring_int(NULL);
677 mi_switch(SW_VOL, NULL);
678 }
679 }
680
681}
682void
683platform_prep_smp_launch(void)
684{
685 int cpu;
686 uint32_t cpu_mask;
687 struct msgring_ithread *ithd;
688 struct thread *td;
689 struct proc *p;
690 int error;
691
692 cpu_mask = PCPU_GET(cpumask) | PCPU_GET(other_cpus);
693
694 /* Create kernel threads for message ring interrupt processing */
695 /* Currently create one task for thread 0 of each core */
696 for (cpu = 0; cpu < MAXCPU; cpu += 1) {
697
698 if (!((1 << cpu) & cpu_mask))
699 continue;
700
701 if ((cpu_ltop_map[cpu] % 4) != 0)
702 continue;
703
704 ithd = &msgring_ithreads[cpu];
705 sprintf(ithd_name[cpu], "msg_intr%d", cpu);
706 error = kproc_create(msgring_process,
707 (void *)ithd,
708 &p,
709 (RFSTOPPED | RFHIGHPID),
710 2,
711 ithd_name[cpu]);
712
713 if (error)
714 panic("kproc_create() failed with %d", error);
715 td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */
716
717 thread_lock(td);
718 sched_class(td, PRI_ITHD);
719 TD_SET_IWAIT(td);
720 thread_unlock(td);
721 ithd->i_thread = td;
722 ithd->i_pending = 0;
723 ithd->i_cpu = cpu;
724 CTR2(KTR_INTR, "%s: created %s", __func__, ithd_name[cpu]);
725 }
726}