1/*
2 *  arch/s390/kernel/early.c
3 *
4 *    Copyright IBM Corp. 2007
5 *    Author(s): Hongjie Yang <hongjie@us.ibm.com>,
6 *		 Heiko Carstens <heiko.carstens@de.ibm.com>
7 */
8
9#include <linux/init.h>
10#include <linux/errno.h>
11#include <linux/string.h>
12#include <linux/ctype.h>
13#include <linux/lockdep.h>
14#include <linux/module.h>
15#include <linux/pfn.h>
16#include <linux/uaccess.h>
17#include <asm/ipl.h>
18#include <asm/lowcore.h>
19#include <asm/processor.h>
20#include <asm/sections.h>
21#include <asm/setup.h>
22#include <asm/cpcmd.h>
23#include <asm/sclp.h>
24
25/*
26 * Create a Kernel NSS if the SAVESYS= parameter is defined
27 */
28#define DEFSYS_CMD_SIZE		96
29#define SAVESYS_CMD_SIZE	32
30
31char kernel_nss_name[NSS_NAME_SIZE + 1];
32
33#ifdef CONFIG_SHARED_KERNEL
34static noinline __init void create_kernel_nss(void)
35{
36	unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size;
37#ifdef CONFIG_BLK_DEV_INITRD
38	unsigned int sinitrd_pfn, einitrd_pfn;
39#endif
40	int response;
41	char *savesys_ptr;
42	char upper_command_line[COMMAND_LINE_SIZE];
43	char defsys_cmd[DEFSYS_CMD_SIZE];
44	char savesys_cmd[SAVESYS_CMD_SIZE];
45
46	/* Do nothing if we are not running under VM */
47	if (!MACHINE_IS_VM)
48		return;
49
50	/* Convert COMMAND_LINE to upper case */
51	for (i = 0; i < strlen(COMMAND_LINE); i++)
52		upper_command_line[i] = toupper(COMMAND_LINE[i]);
53
54	savesys_ptr = strstr(upper_command_line, "SAVESYS=");
55
56	if (!savesys_ptr)
57		return;
58
59	savesys_ptr += 8;    /* Point to the beginning of the NSS name */
60	for (i = 0; i < NSS_NAME_SIZE; i++) {
61		if (savesys_ptr[i] == ' ' || savesys_ptr[i] == '\0')
62			break;
63		kernel_nss_name[i] = savesys_ptr[i];
64	}
65
66	stext_pfn = PFN_DOWN(__pa(&_stext));
67	eshared_pfn = PFN_DOWN(__pa(&_eshared));
68	end_pfn = PFN_UP(__pa(&_end));
69	min_size = end_pfn << 2;
70
71	sprintf(defsys_cmd, "DEFSYS %s 00000-%.5X EW %.5X-%.5X SR %.5X-%.5X",
72		kernel_nss_name, stext_pfn - 1, stext_pfn, eshared_pfn - 1,
73		eshared_pfn, end_pfn);
74
75#ifdef CONFIG_BLK_DEV_INITRD
76	if (INITRD_START && INITRD_SIZE) {
77		sinitrd_pfn = PFN_DOWN(__pa(INITRD_START));
78		einitrd_pfn = PFN_UP(__pa(INITRD_START + INITRD_SIZE));
79		min_size = einitrd_pfn << 2;
80		sprintf(defsys_cmd, "%s EW %.5X-%.5X", defsys_cmd,
81		sinitrd_pfn, einitrd_pfn);
82	}
83#endif
84
85	sprintf(defsys_cmd, "%s EW MINSIZE=%.7iK", defsys_cmd, min_size);
86	sprintf(savesys_cmd, "SAVESYS %s \n IPL %s",
87		kernel_nss_name, kernel_nss_name);
88
89	__cpcmd(defsys_cmd, NULL, 0, &response);
90
91	if (response != 0)
92		return;
93
94	__cpcmd(savesys_cmd, NULL, 0, &response);
95
96	if (response != strlen(savesys_cmd))
97		return;
98
99	ipl_flags = IPL_NSS_VALID;
100}
101
102#else /* CONFIG_SHARED_KERNEL */
103
104static inline void create_kernel_nss(void) { }
105
106#endif /* CONFIG_SHARED_KERNEL */
107
108/*
109 * Clear bss memory
110 */
111static noinline __init void clear_bss_section(void)
112{
113	memset(__bss_start, 0, __bss_stop - __bss_start);
114}
115
116/*
117 * Initialize storage key for kernel pages
118 */
119static noinline __init void init_kernel_storage_key(void)
120{
121	unsigned long end_pfn, init_pfn;
122
123	end_pfn = PFN_UP(__pa(&_end));
124
125	for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
126		page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY);
127}
128
129static noinline __init void detect_machine_type(void)
130{
131	struct cpuinfo_S390 *cpuinfo = &S390_lowcore.cpu_data;
132
133	get_cpu_id(&S390_lowcore.cpu_data.cpu_id);
134
135	/* Running under z/VM ? */
136	if (cpuinfo->cpu_id.version == 0xff)
137		machine_flags |= 1;
138
139	/* Running on a P/390 ? */
140	if (cpuinfo->cpu_id.machine == 0x7490)
141		machine_flags |= 4;
142}
143
144#ifdef CONFIG_64BIT
145static noinline __init int memory_fast_detect(void)
146{
147	unsigned long val0 = 0;
148	unsigned long val1 = 0xc;
149	int ret = -ENOSYS;
150
151	if (ipl_flags & IPL_NSS_VALID)
152		return -ENOSYS;
153
154	asm volatile(
155		"	diag	%1,%2,0x260\n"
156		"0:	lhi	%0,0\n"
157		"1:\n"
158		EX_TABLE(0b,1b)
159		: "+d" (ret), "+d" (val0), "+d" (val1) : : "cc");
160
161	if (ret || val0 != val1)
162		return -ENOSYS;
163
164	memory_chunk[0].size = val0 + 1;
165	return 0;
166}
167#else
168static inline int memory_fast_detect(void)
169{
170	return -ENOSYS;
171}
172#endif
173
174#define ADDR2G	(1UL << 31)
175
176static noinline __init unsigned long sclp_memory_detect(void)
177{
178	struct sclp_readinfo_sccb *sccb;
179	unsigned long long memsize;
180
181	sccb = &s390_readinfo_sccb;
182
183	if (sccb->header.response_code != 0x10)
184		return 0;
185
186	if (sccb->rnsize)
187		memsize = sccb->rnsize << 20;
188	else
189		memsize = sccb->rnsize2 << 20;
190	if (sccb->rnmax)
191		memsize *= sccb->rnmax;
192	else
193		memsize *= sccb->rnmax2;
194#ifndef CONFIG_64BIT
195	/*
196	 * Can't deal with more than 2G in 31 bit addressing mode, so
197	 * limit the value in order to avoid strange side effects.
198	 */
199	if (memsize > ADDR2G)
200		memsize = ADDR2G;
201#endif
202	return (unsigned long) memsize;
203}
204
205static inline __init unsigned long __tprot(unsigned long addr)
206{
207	int cc = -1;
208
209	asm volatile(
210		"	tprot	0(%1),0\n"
211		"0:	ipm	%0\n"
212		"	srl	%0,28\n"
213		"1:\n"
214		EX_TABLE(0b,1b)
215		: "+d" (cc) : "a" (addr) : "cc");
216	return (unsigned long)cc;
217}
218
219/* Checking memory in 128KB increments. */
220#define CHUNK_INCR	(1UL << 17)
221
222static noinline __init void find_memory_chunks(unsigned long memsize)
223{
224	unsigned long addr = 0, old_addr = 0;
225	unsigned long old_cc = CHUNK_READ_WRITE;
226	unsigned long cc;
227	int chunk = 0;
228
229	while (chunk < MEMORY_CHUNKS) {
230		cc = __tprot(addr);
231		while (cc == old_cc) {
232			addr += CHUNK_INCR;
233			cc = __tprot(addr);
234#ifndef CONFIG_64BIT
235			if (addr == ADDR2G)
236				break;
237#endif
238		}
239
240		if (old_addr != addr &&
241		    (old_cc == CHUNK_READ_WRITE || old_cc == CHUNK_READ_ONLY)) {
242			memory_chunk[chunk].addr = old_addr;
243			memory_chunk[chunk].size = addr - old_addr;
244			memory_chunk[chunk].type = old_cc;
245			chunk++;
246		}
247
248		old_addr = addr;
249		old_cc = cc;
250
251#ifndef CONFIG_64BIT
252		if (addr == ADDR2G)
253			break;
254#endif
255		/*
256		 * Finish memory detection at the first hole
257		 * if storage size is unknown.
258		 */
259		if (cc == -1UL && !memsize)
260			break;
261		if (memsize && addr >= memsize)
262			break;
263	}
264}
265
266static __init void early_pgm_check_handler(void)
267{
268	unsigned long addr;
269	const struct exception_table_entry *fixup;
270
271	addr = S390_lowcore.program_old_psw.addr;
272	fixup = search_exception_tables(addr & PSW_ADDR_INSN);
273	if (!fixup)
274		disabled_wait(0);
275	S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE;
276}
277
278static noinline __init void setup_lowcore_early(void)
279{
280	psw_t psw;
281
282	psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
283	psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_ext_handler;
284	S390_lowcore.external_new_psw = psw;
285	psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
286	S390_lowcore.program_new_psw = psw;
287	s390_base_pgm_handler_fn = early_pgm_check_handler;
288}
289
290/*
291 * Save ipl parameters, clear bss memory, initialize storage keys
292 * and create a kernel NSS at startup if the SAVESYS= parm is defined
293 */
294void __init startup_init(void)
295{
296	unsigned long memsize;
297
298	ipl_save_parameters();
299	clear_bss_section();
300	init_kernel_storage_key();
301	lockdep_init();
302	lockdep_off();
303	detect_machine_type();
304	create_kernel_nss();
305	sort_main_extable();
306	setup_lowcore_early();
307	sclp_readinfo_early();
308	memsize = sclp_memory_detect();
309	if (memory_fast_detect() < 0)
310		find_memory_chunks(memsize);
311	lockdep_on();
312}
313