1// SPDX-License-Identifier: GPL-2.0
2/*
3 *  linux/arch/sparc64/kernel/setup.c
4 *
5 *  Copyright (C) 1995,1996  David S. Miller (davem@caip.rutgers.edu)
6 *  Copyright (C) 1997       Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9#include <linux/errno.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/mm.h>
13#include <linux/stddef.h>
14#include <linux/unistd.h>
15#include <linux/ptrace.h>
16#include <asm/smp.h>
17#include <linux/user.h>
18#include <linux/delay.h>
19#include <linux/fs.h>
20#include <linux/seq_file.h>
21#include <linux/syscalls.h>
22#include <linux/kdev_t.h>
23#include <linux/major.h>
24#include <linux/string.h>
25#include <linux/init.h>
26#include <linux/inet.h>
27#include <linux/console.h>
28#include <linux/root_dev.h>
29#include <linux/interrupt.h>
30#include <linux/cpu.h>
31#include <linux/initrd.h>
32#include <linux/module.h>
33#include <linux/start_kernel.h>
34#include <linux/memblock.h>
35#include <uapi/linux/mount.h>
36
37#include <asm/io.h>
38#include <asm/processor.h>
39#include <asm/oplib.h>
40#include <asm/page.h>
41#include <asm/idprom.h>
42#include <asm/head.h>
43#include <asm/starfire.h>
44#include <asm/mmu_context.h>
45#include <asm/timer.h>
46#include <asm/sections.h>
47#include <asm/setup.h>
48#include <asm/mmu.h>
49#include <asm/ns87303.h>
50#include <asm/btext.h>
51#include <asm/elf.h>
52#include <asm/mdesc.h>
53#include <asm/cacheflush.h>
54#include <asm/dma.h>
55#include <asm/irq.h>
56
57#ifdef CONFIG_IP_PNP
58#include <net/ipconfig.h>
59#endif
60
61#include "entry.h"
62#include "kernel.h"
63
64/* Used to synchronize accesses to NatSemi SUPER I/O chip configure
65 * operations in asm/ns87303.h
66 */
67DEFINE_SPINLOCK(ns87303_lock);
68EXPORT_SYMBOL(ns87303_lock);
69
70static void
71prom_console_write(struct console *con, const char *s, unsigned int n)
72{
73	prom_write(s, n);
74}
75
76/* Exported for mm/init.c:paging_init. */
77unsigned long cmdline_memory_size = 0;
78
79static struct console prom_early_console = {
80	.name =		"earlyprom",
81	.write =	prom_console_write,
82	.flags =	CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME,
83	.index =	-1,
84};
85
86/*
87 * Process kernel command line switches that are specific to the
88 * SPARC or that require special low-level processing.
89 */
90static void __init process_switch(char c)
91{
92	switch (c) {
93	case 'd':
94	case 's':
95		break;
96	case 'h':
97		prom_printf("boot_flags_init: Halt!\n");
98		prom_halt();
99		break;
100	case 'p':
101		prom_early_console.flags &= ~CON_BOOT;
102		break;
103	case 'P':
104		/* Force UltraSPARC-III P-Cache on. */
105		if (tlb_type != cheetah) {
106			printk("BOOT: Ignoring P-Cache force option.\n");
107			break;
108		}
109		cheetah_pcache_forced_on = 1;
110		add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
111		cheetah_enable_pcache();
112		break;
113
114	default:
115		printk("Unknown boot switch (-%c)\n", c);
116		break;
117	}
118}
119
120static void __init boot_flags_init(char *commands)
121{
122	while (*commands) {
123		/* Move to the start of the next "argument". */
124		while (*commands == ' ')
125			commands++;
126
127		/* Process any command switches, otherwise skip it. */
128		if (*commands == '\0')
129			break;
130		if (*commands == '-') {
131			commands++;
132			while (*commands && *commands != ' ')
133				process_switch(*commands++);
134			continue;
135		}
136		if (!strncmp(commands, "mem=", 4))
137			cmdline_memory_size = memparse(commands + 4, &commands);
138
139		while (*commands && *commands != ' ')
140			commands++;
141	}
142}
143
144extern unsigned short root_flags;
145extern unsigned short root_dev;
146extern unsigned short ram_flags;
147#define RAMDISK_IMAGE_START_MASK	0x07FF
148#define RAMDISK_PROMPT_FLAG		0x8000
149#define RAMDISK_LOAD_FLAG		0x4000
150
151extern int root_mountflags;
152
153char reboot_command[COMMAND_LINE_SIZE];
154
155static void __init per_cpu_patch(void)
156{
157	struct cpuid_patch_entry *p;
158	unsigned long ver;
159	int is_jbus;
160
161	if (tlb_type == spitfire && !this_is_starfire)
162		return;
163
164	is_jbus = 0;
165	if (tlb_type != hypervisor) {
166		__asm__ ("rdpr %%ver, %0" : "=r" (ver));
167		is_jbus = ((ver >> 32UL) == __JALAPENO_ID ||
168			   (ver >> 32UL) == __SERRANO_ID);
169	}
170
171	p = &__cpuid_patch;
172	while (p < &__cpuid_patch_end) {
173		unsigned long addr = p->addr;
174		unsigned int *insns;
175
176		switch (tlb_type) {
177		case spitfire:
178			insns = &p->starfire[0];
179			break;
180		case cheetah:
181		case cheetah_plus:
182			if (is_jbus)
183				insns = &p->cheetah_jbus[0];
184			else
185				insns = &p->cheetah_safari[0];
186			break;
187		case hypervisor:
188			insns = &p->sun4v[0];
189			break;
190		default:
191			prom_printf("Unknown cpu type, halting.\n");
192			prom_halt();
193		}
194
195		*(unsigned int *) (addr +  0) = insns[0];
196		wmb();
197		__asm__ __volatile__("flush	%0" : : "r" (addr +  0));
198
199		*(unsigned int *) (addr +  4) = insns[1];
200		wmb();
201		__asm__ __volatile__("flush	%0" : : "r" (addr +  4));
202
203		*(unsigned int *) (addr +  8) = insns[2];
204		wmb();
205		__asm__ __volatile__("flush	%0" : : "r" (addr +  8));
206
207		*(unsigned int *) (addr + 12) = insns[3];
208		wmb();
209		__asm__ __volatile__("flush	%0" : : "r" (addr + 12));
210
211		p++;
212	}
213}
214
215void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *start,
216			     struct sun4v_1insn_patch_entry *end)
217{
218	while (start < end) {
219		unsigned long addr = start->addr;
220
221		*(unsigned int *) (addr +  0) = start->insn;
222		wmb();
223		__asm__ __volatile__("flush	%0" : : "r" (addr +  0));
224
225		start++;
226	}
227}
228
229void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
230			     struct sun4v_2insn_patch_entry *end)
231{
232	while (start < end) {
233		unsigned long addr = start->addr;
234
235		*(unsigned int *) (addr +  0) = start->insns[0];
236		wmb();
237		__asm__ __volatile__("flush	%0" : : "r" (addr +  0));
238
239		*(unsigned int *) (addr +  4) = start->insns[1];
240		wmb();
241		__asm__ __volatile__("flush	%0" : : "r" (addr +  4));
242
243		start++;
244	}
245}
246
247void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
248			     struct sun4v_2insn_patch_entry *end)
249{
250	while (start < end) {
251		unsigned long addr = start->addr;
252
253		*(unsigned int *) (addr +  0) = start->insns[0];
254		wmb();
255		__asm__ __volatile__("flush	%0" : : "r" (addr +  0));
256
257		*(unsigned int *) (addr +  4) = start->insns[1];
258		wmb();
259		__asm__ __volatile__("flush	%0" : : "r" (addr +  4));
260
261		start++;
262	}
263}
264
265static void __init sun4v_patch(void)
266{
267	extern void sun4v_hvapi_init(void);
268
269	if (tlb_type != hypervisor)
270		return;
271
272	sun4v_patch_1insn_range(&__sun4v_1insn_patch,
273				&__sun4v_1insn_patch_end);
274
275	sun4v_patch_2insn_range(&__sun4v_2insn_patch,
276				&__sun4v_2insn_patch_end);
277
278	switch (sun4v_chip_type) {
279	case SUN4V_CHIP_SPARC_M7:
280	case SUN4V_CHIP_SPARC_M8:
281	case SUN4V_CHIP_SPARC_SN:
282		sun4v_patch_1insn_range(&__sun_m7_1insn_patch,
283					&__sun_m7_1insn_patch_end);
284		sun_m7_patch_2insn_range(&__sun_m7_2insn_patch,
285					 &__sun_m7_2insn_patch_end);
286		break;
287	default:
288		break;
289	}
290
291	if (sun4v_chip_type != SUN4V_CHIP_NIAGARA1) {
292		sun4v_patch_1insn_range(&__fast_win_ctrl_1insn_patch,
293					&__fast_win_ctrl_1insn_patch_end);
294	}
295
296	sun4v_hvapi_init();
297}
298
299static void __init popc_patch(void)
300{
301	struct popc_3insn_patch_entry *p3;
302	struct popc_6insn_patch_entry *p6;
303
304	p3 = &__popc_3insn_patch;
305	while (p3 < &__popc_3insn_patch_end) {
306		unsigned long i, addr = p3->addr;
307
308		for (i = 0; i < 3; i++) {
309			*(unsigned int *) (addr +  (i * 4)) = p3->insns[i];
310			wmb();
311			__asm__ __volatile__("flush	%0"
312					     : : "r" (addr +  (i * 4)));
313		}
314
315		p3++;
316	}
317
318	p6 = &__popc_6insn_patch;
319	while (p6 < &__popc_6insn_patch_end) {
320		unsigned long i, addr = p6->addr;
321
322		for (i = 0; i < 6; i++) {
323			*(unsigned int *) (addr +  (i * 4)) = p6->insns[i];
324			wmb();
325			__asm__ __volatile__("flush	%0"
326					     : : "r" (addr +  (i * 4)));
327		}
328
329		p6++;
330	}
331}
332
333static void __init pause_patch(void)
334{
335	struct pause_patch_entry *p;
336
337	p = &__pause_3insn_patch;
338	while (p < &__pause_3insn_patch_end) {
339		unsigned long i, addr = p->addr;
340
341		for (i = 0; i < 3; i++) {
342			*(unsigned int *) (addr +  (i * 4)) = p->insns[i];
343			wmb();
344			__asm__ __volatile__("flush	%0"
345					     : : "r" (addr +  (i * 4)));
346		}
347
348		p++;
349	}
350}
351
352void __init start_early_boot(void)
353{
354	int cpu;
355
356	check_if_starfire();
357	per_cpu_patch();
358	sun4v_patch();
359	smp_init_cpu_poke();
360
361	cpu = hard_smp_processor_id();
362	if (cpu >= NR_CPUS) {
363		prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
364			    cpu, NR_CPUS);
365		prom_halt();
366	}
367	current_thread_info()->cpu = cpu;
368
369	time_init_early();
370	prom_init_report();
371	start_kernel();
372}
373
374/* On Ultra, we support all of the v8 capabilities. */
375unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
376				   HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV |
377				   HWCAP_SPARC_V9);
378EXPORT_SYMBOL(sparc64_elf_hwcap);
379
380static const char *hwcaps[] = {
381	"flush", "stbar", "swap", "muldiv", "v9",
382	"ultra3", "blkinit", "n2",
383
384	/* These strings are as they appear in the machine description
385	 * 'hwcap-list' property for cpu nodes.
386	 */
387	"mul32", "div32", "fsmuld", "v8plus", "popc", "vis", "vis2",
388	"ASIBlkInit", "fmaf", "vis3", "hpc", "random", "trans", "fjfmau",
389	"ima", "cspare", "pause", "cbcond", NULL /*reserved for crypto */,
390	"adp",
391};
392
393static const char *crypto_hwcaps[] = {
394	"aes", "des", "kasumi", "camellia", "md5", "sha1", "sha256",
395	"sha512", "mpmul", "montmul", "montsqr", "crc32c",
396};
397
398void cpucap_info(struct seq_file *m)
399{
400	unsigned long caps = sparc64_elf_hwcap;
401	int i, printed = 0;
402
403	seq_puts(m, "cpucaps\t\t: ");
404	for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
405		unsigned long bit = 1UL << i;
406		if (hwcaps[i] && (caps & bit)) {
407			seq_printf(m, "%s%s",
408				   printed ? "," : "", hwcaps[i]);
409			printed++;
410		}
411	}
412	if (caps & HWCAP_SPARC_CRYPTO) {
413		unsigned long cfr;
414
415		__asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
416		for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) {
417			unsigned long bit = 1UL << i;
418			if (cfr & bit) {
419				seq_printf(m, "%s%s",
420					   printed ? "," : "", crypto_hwcaps[i]);
421				printed++;
422			}
423		}
424	}
425	seq_putc(m, '\n');
426}
427
428static void __init report_one_hwcap(int *printed, const char *name)
429{
430	if ((*printed) == 0)
431		printk(KERN_INFO "CPU CAPS: [");
432	printk(KERN_CONT "%s%s",
433	       (*printed) ? "," : "", name);
434	if (++(*printed) == 8) {
435		printk(KERN_CONT "]\n");
436		*printed = 0;
437	}
438}
439
440static void __init report_crypto_hwcaps(int *printed)
441{
442	unsigned long cfr;
443	int i;
444
445	__asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
446
447	for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) {
448		unsigned long bit = 1UL << i;
449		if (cfr & bit)
450			report_one_hwcap(printed, crypto_hwcaps[i]);
451	}
452}
453
454static void __init report_hwcaps(unsigned long caps)
455{
456	int i, printed = 0;
457
458	for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
459		unsigned long bit = 1UL << i;
460		if (hwcaps[i] && (caps & bit))
461			report_one_hwcap(&printed, hwcaps[i]);
462	}
463	if (caps & HWCAP_SPARC_CRYPTO)
464		report_crypto_hwcaps(&printed);
465	if (printed != 0)
466		printk(KERN_CONT "]\n");
467}
468
469static unsigned long __init mdesc_cpu_hwcap_list(void)
470{
471	struct mdesc_handle *hp;
472	unsigned long caps = 0;
473	const char *prop;
474	int len;
475	u64 pn;
476
477	hp = mdesc_grab();
478	if (!hp)
479		return 0;
480
481	pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "cpu");
482	if (pn == MDESC_NODE_NULL)
483		goto out;
484
485	prop = mdesc_get_property(hp, pn, "hwcap-list", &len);
486	if (!prop)
487		goto out;
488
489	while (len) {
490		int i, plen;
491
492		for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
493			unsigned long bit = 1UL << i;
494
495			if (hwcaps[i] && !strcmp(prop, hwcaps[i])) {
496				caps |= bit;
497				break;
498			}
499		}
500		for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) {
501			if (!strcmp(prop, crypto_hwcaps[i]))
502				caps |= HWCAP_SPARC_CRYPTO;
503		}
504
505		plen = strlen(prop) + 1;
506		prop += plen;
507		len -= plen;
508	}
509
510out:
511	mdesc_release(hp);
512	return caps;
513}
514
515/* This yields a mask that user programs can use to figure out what
516 * instruction set this cpu supports.
517 */
518static void __init init_sparc64_elf_hwcap(void)
519{
520	unsigned long cap = sparc64_elf_hwcap;
521	unsigned long mdesc_caps;
522
523	if (tlb_type == cheetah || tlb_type == cheetah_plus)
524		cap |= HWCAP_SPARC_ULTRA3;
525	else if (tlb_type == hypervisor) {
526		if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 ||
527		    sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
528		    sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
529		    sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
530		    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
531		    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
532		    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
533		    sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
534		    sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
535		    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
536			cap |= HWCAP_SPARC_BLKINIT;
537		if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
538		    sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
539		    sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
540		    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
541		    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
542		    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
543		    sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
544		    sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
545		    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
546			cap |= HWCAP_SPARC_N2;
547	}
548
549	cap |= (AV_SPARC_MUL32 | AV_SPARC_DIV32 | AV_SPARC_V8PLUS);
550
551	mdesc_caps = mdesc_cpu_hwcap_list();
552	if (!mdesc_caps) {
553		if (tlb_type == spitfire)
554			cap |= AV_SPARC_VIS;
555		if (tlb_type == cheetah || tlb_type == cheetah_plus)
556			cap |= AV_SPARC_VIS | AV_SPARC_VIS2;
557		if (tlb_type == cheetah_plus) {
558			unsigned long impl, ver;
559
560			__asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
561			impl = ((ver >> 32) & 0xffff);
562			if (impl == PANTHER_IMPL)
563				cap |= AV_SPARC_POPC;
564		}
565		if (tlb_type == hypervisor) {
566			if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1)
567				cap |= AV_SPARC_ASI_BLK_INIT;
568			if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
569			    sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
570			    sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
571			    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
572			    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
573			    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
574			    sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
575			    sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
576			    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
577				cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
578					AV_SPARC_ASI_BLK_INIT |
579					AV_SPARC_POPC);
580			if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
581			    sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
582			    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
583			    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
584			    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
585			    sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
586			    sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
587			    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
588				cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
589					AV_SPARC_FMAF);
590		}
591	}
592	sparc64_elf_hwcap = cap | mdesc_caps;
593
594	report_hwcaps(sparc64_elf_hwcap);
595
596	if (sparc64_elf_hwcap & AV_SPARC_POPC)
597		popc_patch();
598	if (sparc64_elf_hwcap & AV_SPARC_PAUSE)
599		pause_patch();
600}
601
602void __init alloc_irqstack_bootmem(void)
603{
604	unsigned int i, node;
605
606	for_each_possible_cpu(i) {
607		node = cpu_to_node(i);
608
609		softirq_stack[i] = memblock_alloc_node(THREAD_SIZE,
610						       THREAD_SIZE, node);
611		if (!softirq_stack[i])
612			panic("%s: Failed to allocate %lu bytes align=%lx nid=%d\n",
613			      __func__, THREAD_SIZE, THREAD_SIZE, node);
614		hardirq_stack[i] = memblock_alloc_node(THREAD_SIZE,
615						       THREAD_SIZE, node);
616		if (!hardirq_stack[i])
617			panic("%s: Failed to allocate %lu bytes align=%lx nid=%d\n",
618			      __func__, THREAD_SIZE, THREAD_SIZE, node);
619	}
620}
621
622void __init setup_arch(char **cmdline_p)
623{
624	/* Initialize PROM console and command line. */
625	*cmdline_p = prom_getbootargs();
626	strscpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
627	parse_early_param();
628
629	boot_flags_init(*cmdline_p);
630#ifdef CONFIG_EARLYFB
631	if (btext_find_display())
632#endif
633		register_console(&prom_early_console);
634
635	if (tlb_type == hypervisor)
636		pr_info("ARCH: SUN4V\n");
637	else
638		pr_info("ARCH: SUN4U\n");
639
640	idprom_init();
641
642	if (!root_flags)
643		root_mountflags &= ~MS_RDONLY;
644	ROOT_DEV = old_decode_dev(root_dev);
645#ifdef CONFIG_BLK_DEV_RAM
646	rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
647#endif
648
649#ifdef CONFIG_IP_PNP
650	if (!ic_set_manually) {
651		phandle chosen = prom_finddevice("/chosen");
652		u32 cl, sv, gw;
653
654		cl = prom_getintdefault (chosen, "client-ip", 0);
655		sv = prom_getintdefault (chosen, "server-ip", 0);
656		gw = prom_getintdefault (chosen, "gateway-ip", 0);
657		if (cl && sv) {
658			ic_myaddr = cl;
659			ic_servaddr = sv;
660			if (gw)
661				ic_gateway = gw;
662#if defined(CONFIG_IP_PNP_BOOTP) || defined(CONFIG_IP_PNP_RARP)
663			ic_proto_enabled = 0;
664#endif
665		}
666	}
667#endif
668
669	/* Get boot processor trap_block[] setup.  */
670	init_cur_cpu_trap(current_thread_info());
671
672	paging_init();
673	init_sparc64_elf_hwcap();
674	smp_fill_in_cpu_possible_map();
675	/*
676	 * Once the OF device tree and MDESC have been setup and nr_cpus has
677	 * been parsed, we know the list of possible cpus.  Therefore we can
678	 * allocate the IRQ stacks.
679	 */
680	alloc_irqstack_bootmem();
681}
682
683extern int stop_a_enabled;
684
685void sun_do_break(void)
686{
687	if (!stop_a_enabled)
688		return;
689
690	prom_printf("\n");
691	flush_user_windows();
692
693	prom_cmdline();
694}
695EXPORT_SYMBOL(sun_do_break);
696
697int stop_a_enabled = 1;
698EXPORT_SYMBOL(stop_a_enabled);
699