• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/arch/sparc/kernel/
1/* smp.c: Sparc64 SMP support.
2 *
3 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
4 */
5
6#include <linux/module.h>
7#include <linux/kernel.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/pagemap.h>
11#include <linux/threads.h>
12#include <linux/smp.h>
13#include <linux/interrupt.h>
14#include <linux/kernel_stat.h>
15#include <linux/delay.h>
16#include <linux/init.h>
17#include <linux/spinlock.h>
18#include <linux/fs.h>
19#include <linux/seq_file.h>
20#include <linux/cache.h>
21#include <linux/jiffies.h>
22#include <linux/profile.h>
23#include <linux/bootmem.h>
24#include <linux/vmalloc.h>
25#include <linux/ftrace.h>
26#include <linux/cpu.h>
27#include <linux/slab.h>
28
29#include <asm/head.h>
30#include <asm/ptrace.h>
31#include <asm/atomic.h>
32#include <asm/tlbflush.h>
33#include <asm/mmu_context.h>
34#include <asm/cpudata.h>
35#include <asm/hvtramp.h>
36#include <asm/io.h>
37#include <asm/timer.h>
38
39#include <asm/irq.h>
40#include <asm/irq_regs.h>
41#include <asm/page.h>
42#include <asm/pgtable.h>
43#include <asm/oplib.h>
44#include <asm/uaccess.h>
45#include <asm/starfire.h>
46#include <asm/tlb.h>
47#include <asm/sections.h>
48#include <asm/prom.h>
49#include <asm/mdesc.h>
50#include <asm/ldc.h>
51#include <asm/hypervisor.h>
52
53#include "cpumap.h"
54
55int sparc64_multi_core __read_mostly;
56
57DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
58cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
59	{ [0 ... NR_CPUS-1] = CPU_MASK_NONE };
60
61EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
62EXPORT_SYMBOL(cpu_core_map);
63
64static cpumask_t smp_commenced_mask;
65
66void smp_info(struct seq_file *m)
67{
68	int i;
69
70	seq_printf(m, "State:\n");
71	for_each_online_cpu(i)
72		seq_printf(m, "CPU%d:\t\tonline\n", i);
73}
74
75void smp_bogo(struct seq_file *m)
76{
77	int i;
78
79	for_each_online_cpu(i)
80		seq_printf(m,
81			   "Cpu%dClkTck\t: %016lx\n",
82			   i, cpu_data(i).clock_tick);
83}
84
85extern void setup_sparc64_timer(void);
86
87static volatile unsigned long callin_flag = 0;
88
89void __cpuinit smp_callin(void)
90{
91	int cpuid = hard_smp_processor_id();
92
93	__local_per_cpu_offset = __per_cpu_offset(cpuid);
94
95	if (tlb_type == hypervisor)
96		sun4v_ktsb_register();
97
98	__flush_tlb_all();
99
100	setup_sparc64_timer();
101
102	if (cheetah_pcache_forced_on)
103		cheetah_enable_pcache();
104
105	local_irq_enable();
106
107	callin_flag = 1;
108	__asm__ __volatile__("membar #Sync\n\t"
109			     "flush  %%g6" : : : "memory");
110
111	/* Clear this or we will die instantly when we
112	 * schedule back to this idler...
113	 */
114	current_thread_info()->new_child = 0;
115
116	/* Attach to the address space of init_task. */
117	atomic_inc(&init_mm.mm_count);
118	current->active_mm = &init_mm;
119
120	/* inform the notifiers about the new cpu */
121	notify_cpu_starting(cpuid);
122
123	while (!cpu_isset(cpuid, smp_commenced_mask))
124		rmb();
125
126	ipi_call_lock_irq();
127	cpu_set(cpuid, cpu_online_map);
128	ipi_call_unlock_irq();
129
130	/* idle thread is expected to have preempt disabled */
131	preempt_disable();
132}
133
134void cpu_panic(void)
135{
136	printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
137	panic("SMP bolixed\n");
138}
139
140/* This tick register synchronization scheme is taken entirely from
141 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
142 *
143 * The only change I've made is to rework it so that the master
144 * initiates the synchonization instead of the slave. -DaveM
145 */
146
147#define MASTER	0
148#define SLAVE	(SMP_CACHE_BYTES/sizeof(unsigned long))
149
150#define NUM_ROUNDS	64	/* magic value */
151#define NUM_ITERS	5	/* likewise */
152
153static DEFINE_SPINLOCK(itc_sync_lock);
154static unsigned long go[SLAVE + 1];
155
156#define DEBUG_TICK_SYNC	0
157
158static inline long get_delta (long *rt, long *master)
159{
160	unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
161	unsigned long tcenter, t0, t1, tm;
162	unsigned long i;
163
164	for (i = 0; i < NUM_ITERS; i++) {
165		t0 = tick_ops->get_tick();
166		go[MASTER] = 1;
167		membar_safe("#StoreLoad");
168		while (!(tm = go[SLAVE]))
169			rmb();
170		go[SLAVE] = 0;
171		wmb();
172		t1 = tick_ops->get_tick();
173
174		if (t1 - t0 < best_t1 - best_t0)
175			best_t0 = t0, best_t1 = t1, best_tm = tm;
176	}
177
178	*rt = best_t1 - best_t0;
179	*master = best_tm - best_t0;
180
181	/* average best_t0 and best_t1 without overflow: */
182	tcenter = (best_t0/2 + best_t1/2);
183	if (best_t0 % 2 + best_t1 % 2 == 2)
184		tcenter++;
185	return tcenter - best_tm;
186}
187
188void smp_synchronize_tick_client(void)
189{
190	long i, delta, adj, adjust_latency = 0, done = 0;
191	unsigned long flags, rt, master_time_stamp, bound;
192#if DEBUG_TICK_SYNC
193	struct {
194		long rt;	/* roundtrip time */
195		long master;	/* master's timestamp */
196		long diff;	/* difference between midpoint and master's timestamp */
197		long lat;	/* estimate of itc adjustment latency */
198	} t[NUM_ROUNDS];
199#endif
200
201	go[MASTER] = 1;
202
203	while (go[MASTER])
204		rmb();
205
206	local_irq_save(flags);
207	{
208		for (i = 0; i < NUM_ROUNDS; i++) {
209			delta = get_delta(&rt, &master_time_stamp);
210			if (delta == 0) {
211				done = 1;	/* let's lock on to this... */
212				bound = rt;
213			}
214
215			if (!done) {
216				if (i > 0) {
217					adjust_latency += -delta;
218					adj = -delta + adjust_latency/4;
219				} else
220					adj = -delta;
221
222				tick_ops->add_tick(adj);
223			}
224#if DEBUG_TICK_SYNC
225			t[i].rt = rt;
226			t[i].master = master_time_stamp;
227			t[i].diff = delta;
228			t[i].lat = adjust_latency/4;
229#endif
230		}
231	}
232	local_irq_restore(flags);
233
234#if DEBUG_TICK_SYNC
235	for (i = 0; i < NUM_ROUNDS; i++)
236		printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
237		       t[i].rt, t[i].master, t[i].diff, t[i].lat);
238#endif
239
240	printk(KERN_INFO "CPU %d: synchronized TICK with master CPU "
241	       "(last diff %ld cycles, maxerr %lu cycles)\n",
242	       smp_processor_id(), delta, rt);
243}
244
245static void smp_start_sync_tick_client(int cpu);
246
247static void smp_synchronize_one_tick(int cpu)
248{
249	unsigned long flags, i;
250
251	go[MASTER] = 0;
252
253	smp_start_sync_tick_client(cpu);
254
255	/* wait for client to be ready */
256	while (!go[MASTER])
257		rmb();
258
259	/* now let the client proceed into his loop */
260	go[MASTER] = 0;
261	membar_safe("#StoreLoad");
262
263	spin_lock_irqsave(&itc_sync_lock, flags);
264	{
265		for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
266			while (!go[MASTER])
267				rmb();
268			go[MASTER] = 0;
269			wmb();
270			go[SLAVE] = tick_ops->get_tick();
271			membar_safe("#StoreLoad");
272		}
273	}
274	spin_unlock_irqrestore(&itc_sync_lock, flags);
275}
276
277#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
278static unsigned long kimage_addr_to_ra(void *p)
279{
280	unsigned long val = (unsigned long) p;
281
282	return kern_base + (val - KERNBASE);
283}
284
285static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, void **descrp)
286{
287	extern unsigned long sparc64_ttable_tl0;
288	extern unsigned long kern_locked_tte_data;
289	struct hvtramp_descr *hdesc;
290	unsigned long trampoline_ra;
291	struct trap_per_cpu *tb;
292	u64 tte_vaddr, tte_data;
293	unsigned long hv_err;
294	int i;
295
296	hdesc = kzalloc(sizeof(*hdesc) +
297			(sizeof(struct hvtramp_mapping) *
298			 num_kernel_image_mappings - 1),
299			GFP_KERNEL);
300	if (!hdesc) {
301		printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
302		       "hvtramp_descr.\n");
303		return;
304	}
305	*descrp = hdesc;
306
307	hdesc->cpu = cpu;
308	hdesc->num_mappings = num_kernel_image_mappings;
309
310	tb = &trap_block[cpu];
311
312	hdesc->fault_info_va = (unsigned long) &tb->fault_info;
313	hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
314
315	hdesc->thread_reg = thread_reg;
316
317	tte_vaddr = (unsigned long) KERNBASE;
318	tte_data = kern_locked_tte_data;
319
320	for (i = 0; i < hdesc->num_mappings; i++) {
321		hdesc->maps[i].vaddr = tte_vaddr;
322		hdesc->maps[i].tte   = tte_data;
323		tte_vaddr += 0x400000;
324		tte_data  += 0x400000;
325	}
326
327	trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
328
329	hv_err = sun4v_cpu_start(cpu, trampoline_ra,
330				 kimage_addr_to_ra(&sparc64_ttable_tl0),
331				 __pa(hdesc));
332	if (hv_err)
333		printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
334		       "gives error %lu\n", hv_err);
335}
336#endif
337
338extern unsigned long sparc64_cpu_startup;
339
340/* The OBP cpu startup callback truncates the 3rd arg cookie to
341 * 32-bits (I think) so to be safe we have it read the pointer
342 * contained here so we work on >4GB machines. -DaveM
343 */
344static struct thread_info *cpu_new_thread = NULL;
345
346static int __cpuinit smp_boot_one_cpu(unsigned int cpu)
347{
348	unsigned long entry =
349		(unsigned long)(&sparc64_cpu_startup);
350	unsigned long cookie =
351		(unsigned long)(&cpu_new_thread);
352	struct task_struct *p;
353	void *descr = NULL;
354	int timeout, ret;
355
356	p = fork_idle(cpu);
357	if (IS_ERR(p))
358		return PTR_ERR(p);
359	callin_flag = 0;
360	cpu_new_thread = task_thread_info(p);
361
362	if (tlb_type == hypervisor) {
363#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
364		if (ldom_domaining_enabled)
365			ldom_startcpu_cpuid(cpu,
366					    (unsigned long) cpu_new_thread,
367					    &descr);
368		else
369#endif
370			prom_startcpu_cpuid(cpu, entry, cookie);
371	} else {
372		struct device_node *dp = of_find_node_by_cpuid(cpu);
373
374		prom_startcpu(dp->phandle, entry, cookie);
375	}
376
377	for (timeout = 0; timeout < 50000; timeout++) {
378		if (callin_flag)
379			break;
380		udelay(100);
381	}
382
383	if (callin_flag) {
384		ret = 0;
385	} else {
386		printk("Processor %d is stuck.\n", cpu);
387		ret = -ENODEV;
388	}
389	cpu_new_thread = NULL;
390
391	kfree(descr);
392
393	return ret;
394}
395
396static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
397{
398	u64 result, target;
399	int stuck, tmp;
400
401	if (this_is_starfire) {
402		/* map to real upaid */
403		cpu = (((cpu & 0x3c) << 1) |
404			((cpu & 0x40) >> 4) |
405			(cpu & 0x3));
406	}
407
408	target = (cpu << 14) | 0x70;
409again:
410	/* Ok, this is the real Spitfire Errata #54.
411	 * One must read back from a UDB internal register
412	 * after writes to the UDB interrupt dispatch, but
413	 * before the membar Sync for that write.
414	 * So we use the high UDB control register (ASI 0x7f,
415	 * ADDR 0x20) for the dummy read. -DaveM
416	 */
417	tmp = 0x40;
418	__asm__ __volatile__(
419	"wrpr	%1, %2, %%pstate\n\t"
420	"stxa	%4, [%0] %3\n\t"
421	"stxa	%5, [%0+%8] %3\n\t"
422	"add	%0, %8, %0\n\t"
423	"stxa	%6, [%0+%8] %3\n\t"
424	"membar	#Sync\n\t"
425	"stxa	%%g0, [%7] %3\n\t"
426	"membar	#Sync\n\t"
427	"mov	0x20, %%g1\n\t"
428	"ldxa	[%%g1] 0x7f, %%g0\n\t"
429	"membar	#Sync"
430	: "=r" (tmp)
431	: "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
432	  "r" (data0), "r" (data1), "r" (data2), "r" (target),
433	  "r" (0x10), "0" (tmp)
434        : "g1");
435
436	/* NOTE: PSTATE_IE is still clear. */
437	stuck = 100000;
438	do {
439		__asm__ __volatile__("ldxa [%%g0] %1, %0"
440			: "=r" (result)
441			: "i" (ASI_INTR_DISPATCH_STAT));
442		if (result == 0) {
443			__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
444					     : : "r" (pstate));
445			return;
446		}
447		stuck -= 1;
448		if (stuck == 0)
449			break;
450	} while (result & 0x1);
451	__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
452			     : : "r" (pstate));
453	if (stuck == 0) {
454		printk("CPU[%d]: mondo stuckage result[%016llx]\n",
455		       smp_processor_id(), result);
456	} else {
457		udelay(2);
458		goto again;
459	}
460}
461
462static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt)
463{
464	u64 *mondo, data0, data1, data2;
465	u16 *cpu_list;
466	u64 pstate;
467	int i;
468
469	__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
470	cpu_list = __va(tb->cpu_list_pa);
471	mondo = __va(tb->cpu_mondo_block_pa);
472	data0 = mondo[0];
473	data1 = mondo[1];
474	data2 = mondo[2];
475	for (i = 0; i < cnt; i++)
476		spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]);
477}
478
479/* Cheetah now allows to send the whole 64-bytes of data in the interrupt
480 * packet, but we have no use for that.  However we do take advantage of
481 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
482 */
483static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt)
484{
485	int nack_busy_id, is_jbus, need_more;
486	u64 *mondo, pstate, ver, busy_mask;
487	u16 *cpu_list;
488
489	cpu_list = __va(tb->cpu_list_pa);
490	mondo = __va(tb->cpu_mondo_block_pa);
491
492	/* Unfortunately, someone at Sun had the brilliant idea to make the
493	 * busy/nack fields hard-coded by ITID number for this Ultra-III
494	 * derivative processor.
495	 */
496	__asm__ ("rdpr %%ver, %0" : "=r" (ver));
497	is_jbus = ((ver >> 32) == __JALAPENO_ID ||
498		   (ver >> 32) == __SERRANO_ID);
499
500	__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
501
502retry:
503	need_more = 0;
504	__asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
505			     : : "r" (pstate), "i" (PSTATE_IE));
506
507	/* Setup the dispatch data registers. */
508	__asm__ __volatile__("stxa	%0, [%3] %6\n\t"
509			     "stxa	%1, [%4] %6\n\t"
510			     "stxa	%2, [%5] %6\n\t"
511			     "membar	#Sync\n\t"
512			     : /* no outputs */
513			     : "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]),
514			       "r" (0x40), "r" (0x50), "r" (0x60),
515			       "i" (ASI_INTR_W));
516
517	nack_busy_id = 0;
518	busy_mask = 0;
519	{
520		int i;
521
522		for (i = 0; i < cnt; i++) {
523			u64 target, nr;
524
525			nr = cpu_list[i];
526			if (nr == 0xffff)
527				continue;
528
529			target = (nr << 14) | 0x70;
530			if (is_jbus) {
531				busy_mask |= (0x1UL << (nr * 2));
532			} else {
533				target |= (nack_busy_id << 24);
534				busy_mask |= (0x1UL <<
535					      (nack_busy_id * 2));
536			}
537			__asm__ __volatile__(
538				"stxa	%%g0, [%0] %1\n\t"
539				"membar	#Sync\n\t"
540				: /* no outputs */
541				: "r" (target), "i" (ASI_INTR_W));
542			nack_busy_id++;
543			if (nack_busy_id == 32) {
544				need_more = 1;
545				break;
546			}
547		}
548	}
549
550	/* Now, poll for completion. */
551	{
552		u64 dispatch_stat, nack_mask;
553		long stuck;
554
555		stuck = 100000 * nack_busy_id;
556		nack_mask = busy_mask << 1;
557		do {
558			__asm__ __volatile__("ldxa	[%%g0] %1, %0"
559					     : "=r" (dispatch_stat)
560					     : "i" (ASI_INTR_DISPATCH_STAT));
561			if (!(dispatch_stat & (busy_mask | nack_mask))) {
562				__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
563						     : : "r" (pstate));
564				if (unlikely(need_more)) {
565					int i, this_cnt = 0;
566					for (i = 0; i < cnt; i++) {
567						if (cpu_list[i] == 0xffff)
568							continue;
569						cpu_list[i] = 0xffff;
570						this_cnt++;
571						if (this_cnt == 32)
572							break;
573					}
574					goto retry;
575				}
576				return;
577			}
578			if (!--stuck)
579				break;
580		} while (dispatch_stat & busy_mask);
581
582		__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
583				     : : "r" (pstate));
584
585		if (dispatch_stat & busy_mask) {
586			/* Busy bits will not clear, continue instead
587			 * of freezing up on this cpu.
588			 */
589			printk("CPU[%d]: mondo stuckage result[%016llx]\n",
590			       smp_processor_id(), dispatch_stat);
591		} else {
592			int i, this_busy_nack = 0;
593
594			/* Delay some random time with interrupts enabled
595			 * to prevent deadlock.
596			 */
597			udelay(2 * nack_busy_id);
598
599			/* Clear out the mask bits for cpus which did not
600			 * NACK us.
601			 */
602			for (i = 0; i < cnt; i++) {
603				u64 check_mask, nr;
604
605				nr = cpu_list[i];
606				if (nr == 0xffff)
607					continue;
608
609				if (is_jbus)
610					check_mask = (0x2UL << (2*nr));
611				else
612					check_mask = (0x2UL <<
613						      this_busy_nack);
614				if ((dispatch_stat & check_mask) == 0)
615					cpu_list[i] = 0xffff;
616				this_busy_nack += 2;
617				if (this_busy_nack == 64)
618					break;
619			}
620
621			goto retry;
622		}
623	}
624}
625
626/* Multi-cpu list version.  */
627static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
628{
629	int retries, this_cpu, prev_sent, i, saw_cpu_error;
630	unsigned long status;
631	u16 *cpu_list;
632
633	this_cpu = smp_processor_id();
634
635	cpu_list = __va(tb->cpu_list_pa);
636
637	saw_cpu_error = 0;
638	retries = 0;
639	prev_sent = 0;
640	do {
641		int forward_progress, n_sent;
642
643		status = sun4v_cpu_mondo_send(cnt,
644					      tb->cpu_list_pa,
645					      tb->cpu_mondo_block_pa);
646
647		/* HV_EOK means all cpus received the xcall, we're done.  */
648		if (likely(status == HV_EOK))
649			break;
650
651		/* First, see if we made any forward progress.
652		 *
653		 * The hypervisor indicates successful sends by setting
654		 * cpu list entries to the value 0xffff.
655		 */
656		n_sent = 0;
657		for (i = 0; i < cnt; i++) {
658			if (likely(cpu_list[i] == 0xffff))
659				n_sent++;
660		}
661
662		forward_progress = 0;
663		if (n_sent > prev_sent)
664			forward_progress = 1;
665
666		prev_sent = n_sent;
667
668		/* If we get a HV_ECPUERROR, then one or more of the cpus
669		 * in the list are in error state.  Use the cpu_state()
670		 * hypervisor call to find out which cpus are in error state.
671		 */
672		if (unlikely(status == HV_ECPUERROR)) {
673			for (i = 0; i < cnt; i++) {
674				long err;
675				u16 cpu;
676
677				cpu = cpu_list[i];
678				if (cpu == 0xffff)
679					continue;
680
681				err = sun4v_cpu_state(cpu);
682				if (err == HV_CPU_STATE_ERROR) {
683					saw_cpu_error = (cpu + 1);
684					cpu_list[i] = 0xffff;
685				}
686			}
687		} else if (unlikely(status != HV_EWOULDBLOCK))
688			goto fatal_mondo_error;
689
690		/* Don't bother rewriting the CPU list, just leave the
691		 * 0xffff and non-0xffff entries in there and the
692		 * hypervisor will do the right thing.
693		 *
694		 * Only advance timeout state if we didn't make any
695		 * forward progress.
696		 */
697		if (unlikely(!forward_progress)) {
698			if (unlikely(++retries > 10000))
699				goto fatal_mondo_timeout;
700
701			/* Delay a little bit to let other cpus catch up
702			 * on their cpu mondo queue work.
703			 */
704			udelay(2 * cnt);
705		}
706	} while (1);
707
708	if (unlikely(saw_cpu_error))
709		goto fatal_mondo_cpu_error;
710
711	return;
712
713fatal_mondo_cpu_error:
714	printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
715	       "(including %d) were in error state\n",
716	       this_cpu, saw_cpu_error - 1);
717	return;
718
719fatal_mondo_timeout:
720	printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
721	       " progress after %d retries.\n",
722	       this_cpu, retries);
723	goto dump_cpu_list_and_out;
724
725fatal_mondo_error:
726	printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
727	       this_cpu, status);
728	printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
729	       "mondo_block_pa(%lx)\n",
730	       this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
731
732dump_cpu_list_and_out:
733	printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
734	for (i = 0; i < cnt; i++)
735		printk("%u ", cpu_list[i]);
736	printk("]\n");
737}
738
739static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
740
741static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask)
742{
743	struct trap_per_cpu *tb;
744	int this_cpu, i, cnt;
745	unsigned long flags;
746	u16 *cpu_list;
747	u64 *mondo;
748
749	/* We have to do this whole thing with interrupts fully disabled.
750	 * Otherwise if we send an xcall from interrupt context it will
751	 * corrupt both our mondo block and cpu list state.
752	 *
753	 * One consequence of this is that we cannot use timeout mechanisms
754	 * that depend upon interrupts being delivered locally.  So, for
755	 * example, we cannot sample jiffies and expect it to advance.
756	 *
757	 * Fortunately, udelay() uses %stick/%tick so we can use that.
758	 */
759	local_irq_save(flags);
760
761	this_cpu = smp_processor_id();
762	tb = &trap_block[this_cpu];
763
764	mondo = __va(tb->cpu_mondo_block_pa);
765	mondo[0] = data0;
766	mondo[1] = data1;
767	mondo[2] = data2;
768	wmb();
769
770	cpu_list = __va(tb->cpu_list_pa);
771
772	/* Setup the initial cpu list.  */
773	cnt = 0;
774	for_each_cpu(i, mask) {
775		if (i == this_cpu || !cpu_online(i))
776			continue;
777		cpu_list[cnt++] = i;
778	}
779
780	if (cnt)
781		xcall_deliver_impl(tb, cnt);
782
783	local_irq_restore(flags);
784}
785
786/* Send cross call to all processors mentioned in MASK_P
787 * except self.  Really, there are only two cases currently,
788 * "&cpu_online_map" and "&mm->cpu_vm_mask".
789 */
790static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask)
791{
792	u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
793
794	xcall_deliver(data0, data1, data2, mask);
795}
796
797/* Send cross call to all processors except self. */
798static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2)
799{
800	smp_cross_call_masked(func, ctx, data1, data2, &cpu_online_map);
801}
802
803extern unsigned long xcall_sync_tick;
804
805static void smp_start_sync_tick_client(int cpu)
806{
807	xcall_deliver((u64) &xcall_sync_tick, 0, 0,
808		      &cpumask_of_cpu(cpu));
809}
810
811extern unsigned long xcall_call_function;
812
813void arch_send_call_function_ipi_mask(const struct cpumask *mask)
814{
815	xcall_deliver((u64) &xcall_call_function, 0, 0, mask);
816}
817
818extern unsigned long xcall_call_function_single;
819
820void arch_send_call_function_single_ipi(int cpu)
821{
822	xcall_deliver((u64) &xcall_call_function_single, 0, 0,
823		      &cpumask_of_cpu(cpu));
824}
825
826void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
827{
828	clear_softint(1 << irq);
829	generic_smp_call_function_interrupt();
830}
831
832void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
833{
834	clear_softint(1 << irq);
835	generic_smp_call_function_single_interrupt();
836}
837
838static void tsb_sync(void *info)
839{
840	struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
841	struct mm_struct *mm = info;
842
843	/* It is not valid to test "currrent->active_mm == mm" here.
844	 *
845	 * The value of "current" is not changed atomically with
846	 * switch_mm().  But that's OK, we just need to check the
847	 * current cpu's trap block PGD physical address.
848	 */
849	if (tp->pgd_paddr == __pa(mm->pgd))
850		tsb_context_switch(mm);
851}
852
853void smp_tsb_sync(struct mm_struct *mm)
854{
855	smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1);
856}
857
858extern unsigned long xcall_flush_tlb_mm;
859extern unsigned long xcall_flush_tlb_pending;
860extern unsigned long xcall_flush_tlb_kernel_range;
861extern unsigned long xcall_fetch_glob_regs;
862extern unsigned long xcall_receive_signal;
863extern unsigned long xcall_new_mmu_context_version;
864#ifdef CONFIG_KGDB
865extern unsigned long xcall_kgdb_capture;
866#endif
867
868#ifdef DCACHE_ALIASING_POSSIBLE
869extern unsigned long xcall_flush_dcache_page_cheetah;
870#endif
871extern unsigned long xcall_flush_dcache_page_spitfire;
872
873#ifdef CONFIG_DEBUG_DCFLUSH
874extern atomic_t dcpage_flushes;
875extern atomic_t dcpage_flushes_xcall;
876#endif
877
878static inline void __local_flush_dcache_page(struct page *page)
879{
880#ifdef DCACHE_ALIASING_POSSIBLE
881	__flush_dcache_page(page_address(page),
882			    ((tlb_type == spitfire) &&
883			     page_mapping(page) != NULL));
884#else
885	if (page_mapping(page) != NULL &&
886	    tlb_type == spitfire)
887		__flush_icache_page(__pa(page_address(page)));
888#endif
889}
890
891void smp_flush_dcache_page_impl(struct page *page, int cpu)
892{
893	int this_cpu;
894
895	if (tlb_type == hypervisor)
896		return;
897
898#ifdef CONFIG_DEBUG_DCFLUSH
899	atomic_inc(&dcpage_flushes);
900#endif
901
902	this_cpu = get_cpu();
903
904	if (cpu == this_cpu) {
905		__local_flush_dcache_page(page);
906	} else if (cpu_online(cpu)) {
907		void *pg_addr = page_address(page);
908		u64 data0 = 0;
909
910		if (tlb_type == spitfire) {
911			data0 = ((u64)&xcall_flush_dcache_page_spitfire);
912			if (page_mapping(page) != NULL)
913				data0 |= ((u64)1 << 32);
914		} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
915#ifdef DCACHE_ALIASING_POSSIBLE
916			data0 =	((u64)&xcall_flush_dcache_page_cheetah);
917#endif
918		}
919		if (data0) {
920			xcall_deliver(data0, __pa(pg_addr),
921				      (u64) pg_addr, &cpumask_of_cpu(cpu));
922#ifdef CONFIG_DEBUG_DCFLUSH
923			atomic_inc(&dcpage_flushes_xcall);
924#endif
925		}
926	}
927
928	put_cpu();
929}
930
931void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
932{
933	void *pg_addr;
934	int this_cpu;
935	u64 data0;
936
937	if (tlb_type == hypervisor)
938		return;
939
940	this_cpu = get_cpu();
941
942#ifdef CONFIG_DEBUG_DCFLUSH
943	atomic_inc(&dcpage_flushes);
944#endif
945	data0 = 0;
946	pg_addr = page_address(page);
947	if (tlb_type == spitfire) {
948		data0 = ((u64)&xcall_flush_dcache_page_spitfire);
949		if (page_mapping(page) != NULL)
950			data0 |= ((u64)1 << 32);
951	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
952#ifdef DCACHE_ALIASING_POSSIBLE
953		data0 = ((u64)&xcall_flush_dcache_page_cheetah);
954#endif
955	}
956	if (data0) {
957		xcall_deliver(data0, __pa(pg_addr),
958			      (u64) pg_addr, &cpu_online_map);
959#ifdef CONFIG_DEBUG_DCFLUSH
960		atomic_inc(&dcpage_flushes_xcall);
961#endif
962	}
963	__local_flush_dcache_page(page);
964
965	put_cpu();
966}
967
968void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
969{
970	struct mm_struct *mm;
971	unsigned long flags;
972
973	clear_softint(1 << irq);
974
975	/* See if we need to allocate a new TLB context because
976	 * the version of the one we are using is now out of date.
977	 */
978	mm = current->active_mm;
979	if (unlikely(!mm || (mm == &init_mm)))
980		return;
981
982	spin_lock_irqsave(&mm->context.lock, flags);
983
984	if (unlikely(!CTX_VALID(mm->context)))
985		get_new_mmu_context(mm);
986
987	spin_unlock_irqrestore(&mm->context.lock, flags);
988
989	load_secondary_context(mm);
990	__flush_tlb_mm(CTX_HWBITS(mm->context),
991		       SECONDARY_CONTEXT);
992}
993
994void smp_new_mmu_context_version(void)
995{
996	smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
997}
998
999#ifdef CONFIG_KGDB
1000void kgdb_roundup_cpus(unsigned long flags)
1001{
1002	smp_cross_call(&xcall_kgdb_capture, 0, 0, 0);
1003}
1004#endif
1005
1006void smp_fetch_global_regs(void)
1007{
1008	smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0);
1009}
1010
1011/* We know that the window frames of the user have been flushed
1012 * to the stack before we get here because all callers of us
1013 * are flush_tlb_*() routines, and these run after flush_cache_*()
1014 * which performs the flushw.
1015 *
1016 * The SMP TLB coherency scheme we use works as follows:
1017 *
1018 * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
1019 *    space has (potentially) executed on, this is the heuristic
1020 *    we use to avoid doing cross calls.
1021 *
1022 *    Also, for flushing from kswapd and also for clones, we
1023 *    use cpu_vm_mask as the list of cpus to make run the TLB.
1024 *
1025 * 2) TLB context numbers are shared globally across all processors
1026 *    in the system, this allows us to play several games to avoid
1027 *    cross calls.
1028 *
1029 *    One invariant is that when a cpu switches to a process, and
1030 *    that processes tsk->active_mm->cpu_vm_mask does not have the
1031 *    current cpu's bit set, that tlb context is flushed locally.
1032 *
1033 *    If the address space is non-shared (ie. mm->count == 1) we avoid
1034 *    cross calls when we want to flush the currently running process's
1035 *    tlb state.  This is done by clearing all cpu bits except the current
1036 *    processor's in current->mm->cpu_vm_mask and performing the
1037 *    flush locally only.  This will force any subsequent cpus which run
1038 *    this task to flush the context from the local tlb if the process
1039 *    migrates to another cpu (again).
1040 *
1041 * 3) For shared address spaces (threads) and swapping we bite the
1042 *    bullet for most cases and perform the cross call (but only to
1043 *    the cpus listed in cpu_vm_mask).
1044 *
1045 *    The performance gain from "optimizing" away the cross call for threads is
1046 *    questionable (in theory the big win for threads is the massive sharing of
1047 *    address space state across processors).
1048 */
1049
1050/* This currently is only used by the hugetlb arch pre-fault
1051 * hook on UltraSPARC-III+ and later when changing the pagesize
1052 * bits of the context register for an address space.
1053 */
1054void smp_flush_tlb_mm(struct mm_struct *mm)
1055{
1056	u32 ctx = CTX_HWBITS(mm->context);
1057	int cpu = get_cpu();
1058
1059	if (atomic_read(&mm->mm_users) == 1) {
1060		cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1061		goto local_flush_and_out;
1062	}
1063
1064	smp_cross_call_masked(&xcall_flush_tlb_mm,
1065			      ctx, 0, 0,
1066			      mm_cpumask(mm));
1067
1068local_flush_and_out:
1069	__flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1070
1071	put_cpu();
1072}
1073
1074void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1075{
1076	u32 ctx = CTX_HWBITS(mm->context);
1077	int cpu = get_cpu();
1078
1079	if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1080		cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1081	else
1082		smp_cross_call_masked(&xcall_flush_tlb_pending,
1083				      ctx, nr, (unsigned long) vaddrs,
1084				      mm_cpumask(mm));
1085
1086	__flush_tlb_pending(ctx, nr, vaddrs);
1087
1088	put_cpu();
1089}
1090
1091void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1092{
1093	start &= PAGE_MASK;
1094	end    = PAGE_ALIGN(end);
1095	if (start != end) {
1096		smp_cross_call(&xcall_flush_tlb_kernel_range,
1097			       0, start, end);
1098
1099		__flush_tlb_kernel_range(start, end);
1100	}
1101}
1102
1103/* CPU capture. */
1104/* #define CAPTURE_DEBUG */
1105extern unsigned long xcall_capture;
1106
1107static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1108static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1109static unsigned long penguins_are_doing_time;
1110
1111void smp_capture(void)
1112{
1113	int result = atomic_add_ret(1, &smp_capture_depth);
1114
1115	if (result == 1) {
1116		int ncpus = num_online_cpus();
1117
1118#ifdef CAPTURE_DEBUG
1119		printk("CPU[%d]: Sending penguins to jail...",
1120		       smp_processor_id());
1121#endif
1122		penguins_are_doing_time = 1;
1123		atomic_inc(&smp_capture_registry);
1124		smp_cross_call(&xcall_capture, 0, 0, 0);
1125		while (atomic_read(&smp_capture_registry) != ncpus)
1126			rmb();
1127#ifdef CAPTURE_DEBUG
1128		printk("done\n");
1129#endif
1130	}
1131}
1132
1133void smp_release(void)
1134{
1135	if (atomic_dec_and_test(&smp_capture_depth)) {
1136#ifdef CAPTURE_DEBUG
1137		printk("CPU[%d]: Giving pardon to "
1138		       "imprisoned penguins\n",
1139		       smp_processor_id());
1140#endif
1141		penguins_are_doing_time = 0;
1142		membar_safe("#StoreLoad");
1143		atomic_dec(&smp_capture_registry);
1144	}
1145}
1146
1147/* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE
1148 * set, so they can service tlb flush xcalls...
1149 */
1150extern void prom_world(int);
1151
1152void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs)
1153{
1154	clear_softint(1 << irq);
1155
1156	preempt_disable();
1157
1158	__asm__ __volatile__("flushw");
1159	prom_world(1);
1160	atomic_inc(&smp_capture_registry);
1161	membar_safe("#StoreLoad");
1162	while (penguins_are_doing_time)
1163		rmb();
1164	atomic_dec(&smp_capture_registry);
1165	prom_world(0);
1166
1167	preempt_enable();
1168}
1169
1170/* /proc/profile writes can call this, don't __init it please. */
1171int setup_profiling_timer(unsigned int multiplier)
1172{
1173	return -EINVAL;
1174}
1175
1176void __init smp_prepare_cpus(unsigned int max_cpus)
1177{
1178}
1179
1180void __devinit smp_prepare_boot_cpu(void)
1181{
1182}
1183
1184void __init smp_setup_processor_id(void)
1185{
1186	if (tlb_type == spitfire)
1187		xcall_deliver_impl = spitfire_xcall_deliver;
1188	else if (tlb_type == cheetah || tlb_type == cheetah_plus)
1189		xcall_deliver_impl = cheetah_xcall_deliver;
1190	else
1191		xcall_deliver_impl = hypervisor_xcall_deliver;
1192}
1193
1194void __devinit smp_fill_in_sib_core_maps(void)
1195{
1196	unsigned int i;
1197
1198	for_each_present_cpu(i) {
1199		unsigned int j;
1200
1201		cpus_clear(cpu_core_map[i]);
1202		if (cpu_data(i).core_id == 0) {
1203			cpu_set(i, cpu_core_map[i]);
1204			continue;
1205		}
1206
1207		for_each_present_cpu(j) {
1208			if (cpu_data(i).core_id ==
1209			    cpu_data(j).core_id)
1210				cpu_set(j, cpu_core_map[i]);
1211		}
1212	}
1213
1214	for_each_present_cpu(i) {
1215		unsigned int j;
1216
1217		cpus_clear(per_cpu(cpu_sibling_map, i));
1218		if (cpu_data(i).proc_id == -1) {
1219			cpu_set(i, per_cpu(cpu_sibling_map, i));
1220			continue;
1221		}
1222
1223		for_each_present_cpu(j) {
1224			if (cpu_data(i).proc_id ==
1225			    cpu_data(j).proc_id)
1226				cpu_set(j, per_cpu(cpu_sibling_map, i));
1227		}
1228	}
1229}
1230
1231int __cpuinit __cpu_up(unsigned int cpu)
1232{
1233	int ret = smp_boot_one_cpu(cpu);
1234
1235	if (!ret) {
1236		cpu_set(cpu, smp_commenced_mask);
1237		while (!cpu_isset(cpu, cpu_online_map))
1238			mb();
1239		if (!cpu_isset(cpu, cpu_online_map)) {
1240			ret = -ENODEV;
1241		} else {
1242			/* On SUN4V, writes to %tick and %stick are
1243			 * not allowed.
1244			 */
1245			if (tlb_type != hypervisor)
1246				smp_synchronize_one_tick(cpu);
1247		}
1248	}
1249	return ret;
1250}
1251
1252#ifdef CONFIG_HOTPLUG_CPU
1253void cpu_play_dead(void)
1254{
1255	int cpu = smp_processor_id();
1256	unsigned long pstate;
1257
1258	idle_task_exit();
1259
1260	if (tlb_type == hypervisor) {
1261		struct trap_per_cpu *tb = &trap_block[cpu];
1262
1263		sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
1264				tb->cpu_mondo_pa, 0);
1265		sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
1266				tb->dev_mondo_pa, 0);
1267		sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
1268				tb->resum_mondo_pa, 0);
1269		sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
1270				tb->nonresum_mondo_pa, 0);
1271	}
1272
1273	cpu_clear(cpu, smp_commenced_mask);
1274	membar_safe("#Sync");
1275
1276	local_irq_disable();
1277
1278	__asm__ __volatile__(
1279		"rdpr	%%pstate, %0\n\t"
1280		"wrpr	%0, %1, %%pstate"
1281		: "=r" (pstate)
1282		: "i" (PSTATE_IE));
1283
1284	while (1)
1285		barrier();
1286}
1287
1288int __cpu_disable(void)
1289{
1290	int cpu = smp_processor_id();
1291	cpuinfo_sparc *c;
1292	int i;
1293
1294	for_each_cpu_mask(i, cpu_core_map[cpu])
1295		cpu_clear(cpu, cpu_core_map[i]);
1296	cpus_clear(cpu_core_map[cpu]);
1297
1298	for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu))
1299		cpu_clear(cpu, per_cpu(cpu_sibling_map, i));
1300	cpus_clear(per_cpu(cpu_sibling_map, cpu));
1301
1302	c = &cpu_data(cpu);
1303
1304	c->core_id = 0;
1305	c->proc_id = -1;
1306
1307	smp_wmb();
1308
1309	/* Make sure no interrupts point to this cpu.  */
1310	fixup_irqs();
1311
1312	local_irq_enable();
1313	mdelay(1);
1314	local_irq_disable();
1315
1316	ipi_call_lock();
1317	cpu_clear(cpu, cpu_online_map);
1318	ipi_call_unlock();
1319
1320	cpu_map_rebuild();
1321
1322	return 0;
1323}
1324
1325void __cpu_die(unsigned int cpu)
1326{
1327	int i;
1328
1329	for (i = 0; i < 100; i++) {
1330		smp_rmb();
1331		if (!cpu_isset(cpu, smp_commenced_mask))
1332			break;
1333		msleep(100);
1334	}
1335	if (cpu_isset(cpu, smp_commenced_mask)) {
1336		printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1337	} else {
1338#if defined(CONFIG_SUN_LDOMS)
1339		unsigned long hv_err;
1340		int limit = 100;
1341
1342		do {
1343			hv_err = sun4v_cpu_stop(cpu);
1344			if (hv_err == HV_EOK) {
1345				cpu_clear(cpu, cpu_present_map);
1346				break;
1347			}
1348		} while (--limit > 0);
1349		if (limit <= 0) {
1350			printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
1351			       hv_err);
1352		}
1353#endif
1354	}
1355}
1356#endif
1357
1358void __init smp_cpus_done(unsigned int max_cpus)
1359{
1360}
1361
1362void smp_send_reschedule(int cpu)
1363{
1364	xcall_deliver((u64) &xcall_receive_signal, 0, 0,
1365		      &cpumask_of_cpu(cpu));
1366}
1367
1368void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
1369{
1370	clear_softint(1 << irq);
1371}
1372
1373/* This is a nop because we capture all other cpus
1374 * anyways when making the PROM active.
1375 */
1376void smp_send_stop(void)
1377{
1378}
1379
1380/**
1381 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
1382 * @cpu: cpu to allocate for
1383 * @size: size allocation in bytes
1384 * @align: alignment
1385 *
1386 * Allocate @size bytes aligned at @align for cpu @cpu.  This wrapper
1387 * does the right thing for NUMA regardless of the current
1388 * configuration.
1389 *
1390 * RETURNS:
1391 * Pointer to the allocated area on success, NULL on failure.
1392 */
1393static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
1394					size_t align)
1395{
1396	const unsigned long goal = __pa(MAX_DMA_ADDRESS);
1397#ifdef CONFIG_NEED_MULTIPLE_NODES
1398	int node = cpu_to_node(cpu);
1399	void *ptr;
1400
1401	if (!node_online(node) || !NODE_DATA(node)) {
1402		ptr = __alloc_bootmem(size, align, goal);
1403		pr_info("cpu %d has no node %d or node-local memory\n",
1404			cpu, node);
1405		pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
1406			 cpu, size, __pa(ptr));
1407	} else {
1408		ptr = __alloc_bootmem_node(NODE_DATA(node),
1409					   size, align, goal);
1410		pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
1411			 "%016lx\n", cpu, size, node, __pa(ptr));
1412	}
1413	return ptr;
1414#else
1415	return __alloc_bootmem(size, align, goal);
1416#endif
1417}
1418
1419static void __init pcpu_free_bootmem(void *ptr, size_t size)
1420{
1421	free_bootmem(__pa(ptr), size);
1422}
1423
1424static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
1425{
1426	if (cpu_to_node(from) == cpu_to_node(to))
1427		return LOCAL_DISTANCE;
1428	else
1429		return REMOTE_DISTANCE;
1430}
1431
1432static void __init pcpu_populate_pte(unsigned long addr)
1433{
1434	pgd_t *pgd = pgd_offset_k(addr);
1435	pud_t *pud;
1436	pmd_t *pmd;
1437
1438	pud = pud_offset(pgd, addr);
1439	if (pud_none(*pud)) {
1440		pmd_t *new;
1441
1442		new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1443		pud_populate(&init_mm, pud, new);
1444	}
1445
1446	pmd = pmd_offset(pud, addr);
1447	if (!pmd_present(*pmd)) {
1448		pte_t *new;
1449
1450		new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1451		pmd_populate_kernel(&init_mm, pmd, new);
1452	}
1453}
1454
1455void __init setup_per_cpu_areas(void)
1456{
1457	unsigned long delta;
1458	unsigned int cpu;
1459	int rc = -EINVAL;
1460
1461	if (pcpu_chosen_fc != PCPU_FC_PAGE) {
1462		rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1463					    PERCPU_DYNAMIC_RESERVE, 4 << 20,
1464					    pcpu_cpu_distance,
1465					    pcpu_alloc_bootmem,
1466					    pcpu_free_bootmem);
1467		if (rc)
1468			pr_warning("PERCPU: %s allocator failed (%d), "
1469				   "falling back to page size\n",
1470				   pcpu_fc_names[pcpu_chosen_fc], rc);
1471	}
1472	if (rc < 0)
1473		rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
1474					   pcpu_alloc_bootmem,
1475					   pcpu_free_bootmem,
1476					   pcpu_populate_pte);
1477	if (rc < 0)
1478		panic("cannot initialize percpu area (err=%d)", rc);
1479
1480	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1481	for_each_possible_cpu(cpu)
1482		__per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
1483
1484	/* Setup %g5 for the boot cpu.  */
1485	__local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
1486
1487	of_fill_in_cpu_data();
1488	if (tlb_type == hypervisor)
1489		mdesc_fill_in_cpu_data(cpu_all_mask);
1490}
1491