1/*
2 * arch/ubicom32/kernel/smp.c
3 *   SMP implementation for Ubicom32 processors.
4 *
5 * (C) Copyright 2009, Ubicom, Inc.
6 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
7 * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
8 * Copyright (C) 2001,2004 Grant Grundler <grundler@parisc-linux.org>
9 *
10 * This file is part of the Ubicom32 Linux Kernel Port.
11 *
12 * The Ubicom32 Linux Kernel Port is free software: you can redistribute
13 * it and/or modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, either version 2 of the
15 * License, or (at your option) any later version.
16 *
17 * The Ubicom32 Linux Kernel Port is distributed in the hope that it
18 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
19 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
20 * the GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with the Ubicom32 Linux Kernel Port.  If not,
24 * see <http://www.gnu.org/licenses/>.
25 *
26 * Ubicom32 implementation derived from (with many thanks):
27 *   arch/m68knommu
28 *   arch/blackfin
29 *   arch/parisc
30 */
31
32#include <linux/types.h>
33#include <linux/spinlock.h>
34#include <linux/slab.h>
35
36#include <linux/kernel.h>
37#include <linux/bootmem.h>
38#include <linux/module.h>
39#include <linux/sched.h>
40#include <linux/init.h>
41#include <linux/interrupt.h>
42#include <linux/smp.h>
43#include <linux/kernel_stat.h>
44#include <linux/mm.h>
45#include <linux/err.h>
46#include <linux/delay.h>
47#include <linux/bitops.h>
48#include <linux/cpu.h>
49#include <linux/profile.h>
50#include <linux/delay.h>
51#include <linux/io.h>
52#include <linux/ptrace.h>
53#include <linux/unistd.h>
54#include <linux/irq.h>
55
56#include <asm/system.h>
57#include <asm/atomic.h>
58#include <asm/current.h>
59#include <asm/tlbflush.h>
60#include <asm/timex.h>
61#include <asm/cpu.h>
62#include <asm/irq.h>
63#include <asm/processor.h>
64#include <asm/thread.h>
65#include <asm/sections.h>
66#include <asm/ip5000.h>
67
68/*
69 * Mask the debug printout for IPI because they are too verbose
70 * for regular debugging.
71 */
72
73// #define DEBUG_SMP 1
74#if !defined(DEBUG_SMP)
75#define smp_debug(lvl, ...)
76#else
77static unsigned int smp_debug_lvl = 50;
78#define smp_debug(lvl, printargs...)		\
79	if (lvl >= smp_debug_lvl) {		\
80			printk(printargs);	\
81	}
82#endif
83
84#if !defined(DEBUG_SMP)
85#define DEBUG_ASSERT(cond)
86#else
87#define DEBUG_ASSERT(cond) \
88	if (!(cond)) { \
89		THREAD_STALL; \
90	}
91#endif
92
93/*
94 * List of IPI Commands (more than one can be set at a time).
95 */
96enum ipi_message_type {
97	IPI_NOP,
98	IPI_RESCHEDULE,
99	IPI_CALL_FUNC,
100	IPI_CALL_FUNC_SINGLE,
101	IPI_CPU_STOP,
102	IPI_CPU_TIMER,
103};
104
105/*
106 * We maintain a hardware thread oriented view of online threads
107 * and those involved or needing IPI.
108 */
109static volatile unsigned long smp_online_threads = 0;
110static volatile unsigned long smp_needs_ipi = 0;
111static volatile unsigned long smp_inside_ipi = 0;
112static unsigned long smp_irq_affinity[NR_IRQS];
113
114/*
115 * What do we need to track on a per cpu/thread basis?
116 */
117DEFINE_PER_CPU(struct cpuinfo_ubicom32, cpu_data);
118
119/*
120 * Each thread cpuinfo IPI information is guarded by a lock
121 * that is kept local to this file.
122 */
123DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED;
124
125/*
126 * The IPI(s) are based on a software IRQ through the LDSR.
127 */
128unsigned int smp_ipi_irq;
129
130/*
131 * Define a spinlock so that only one cpu is able to modify the
132 * smp_needs_ipi and to set/clear the IRQ at a time.
133 */
134DEFINE_SPINLOCK(smp_ipi_lock);
135
136/*
137 * smp_halt_processor()
138 *	Halt this hardware thread.
139 */
140static void smp_halt_processor(void)
141{
142	int cpuid = thread_get_self();
143	cpu_clear(smp_processor_id(), cpu_online_map);
144	local_irq_disable();
145	printk(KERN_EMERG "cpu[%d] has halted. It is not OK to turn off power \
146		until all cpu's are off.\n", cpuid);
147	for (;;) {
148		thread_suspend();
149	}
150}
151
152/*
153 * ipi_interrupt()
154 *	Handle an Interprocessor Interrupt.
155 */
156static irqreturn_t ipi_interrupt(int irq, void *dev_id)
157{
158	int cpuid = smp_processor_id();
159	struct cpuinfo_ubicom32 *p = &per_cpu(cpu_data, cpuid);
160	unsigned long ops;
161
162	/*
163	 * Count this now; we may make a call that never returns.
164	 */
165	p->ipi_count++;
166
167	/*
168	 * We are about to process all ops.  If another cpu has stated
169	 * that we need an IPI, we will have already processed it.  By
170	 * clearing our smp_needs_ipi, and processing all ops,
171	 * we reduce the number of IPI interrupts.  However, this introduces
172	 * the possibility that smp_needs_ipi will be clear and the soft irq
173	 * will have gone off; so we need to make the get_affinity() path
174	 * tolerant of spurious interrupts.
175	 */
176	spin_lock(&smp_ipi_lock);
177	smp_needs_ipi &= ~(1 << p->tid);
178	spin_unlock(&smp_ipi_lock);
179
180	for (;;) {
181		/*
182		 * Read the set of IPI commands we should handle.
183		 */
184		spinlock_t *lock = &per_cpu(ipi_lock, cpuid);
185		spin_lock(lock);
186		ops = p->ipi_pending;
187		p->ipi_pending = 0;
188		spin_unlock(lock);
189
190		/*
191		 * If we have no IPI commands to execute, break out.
192		 */
193		if (!ops) {
194			break;
195		}
196
197		/*
198		 * Execute the set of commands in the ops word, one command
199		 * at a time in no particular order.  Strip of each command
200		 * as we execute it.
201		 */
202		while (ops) {
203			unsigned long which = ffz(~ops);
204			ops &= ~(1 << which);
205
206			BUG_ON(!irqs_disabled());
207			switch (which) {
208			case IPI_NOP:
209				smp_debug(100, KERN_INFO "cpu[%d]: "
210					  "IPI_NOP\n", cpuid);
211				break;
212
213			case IPI_RESCHEDULE:
214				/*
215				 * Reschedule callback.  Everything to be
216				 * done is done by the interrupt return path.
217				 */
218				smp_debug(200, KERN_INFO "cpu[%d]: "
219					  "IPI_RESCHEDULE\n", cpuid);
220				break;
221
222			case IPI_CALL_FUNC:
223				smp_debug(100, KERN_INFO "cpu[%d]: "
224					  "IPI_CALL_FUNC\n", cpuid);
225				generic_smp_call_function_interrupt();
226				break;
227
228			case IPI_CALL_FUNC_SINGLE:
229				smp_debug(100, KERN_INFO "cpu[%d]: "
230					  "IPI_CALL_FUNC_SINGLE\n", cpuid);
231				generic_smp_call_function_single_interrupt();
232				break;
233
234			case IPI_CPU_STOP:
235				smp_debug(100, KERN_INFO "cpu[%d]: "
236					  "IPI_CPU_STOP\n", cpuid);
237				smp_halt_processor();
238				break;
239
240#if !defined(CONFIG_LOCAL_TIMERS)
241			case IPI_CPU_TIMER:
242				smp_debug(100, KERN_INFO "cpu[%d]: "
243					  "IPI_CPU_TIMER\n", cpuid);
244#if defined(CONFIG_GENERIC_CLOCKEVENTS)
245				local_timer_interrupt();
246#else
247				update_process_times(user_mode(get_irq_regs()));
248				profile_tick(CPU_PROFILING);
249#endif
250#endif
251				break;
252
253			default:
254				printk(KERN_CRIT "cpu[%d]: "
255					  "Unknown IPI: %lu\n", cpuid, which);
256
257				return IRQ_NONE;
258			}
259
260			/*
261			 * Let in any pending interrupts
262			 */
263			BUG_ON(!irqs_disabled());
264			local_irq_enable();
265			local_irq_disable();
266		}
267	}
268	return IRQ_HANDLED;
269}
270
271/*
272 * ipi_send()
273 *	Send an Interprocessor Interrupt.
274 */
275static void ipi_send(int cpu, enum ipi_message_type op)
276{
277	struct cpuinfo_ubicom32 *p = &per_cpu(cpu_data, cpu);
278	spinlock_t *lock = &per_cpu(ipi_lock, cpu);
279	unsigned long flags;
280
281	/*
282	 * We protect the setting of the ipi_pending field and ensure
283	 * that the ipi delivery mechanism and interrupt are atomically
284	 * handled.
285	 */
286	spin_lock_irqsave(lock, flags);
287	p->ipi_pending |= 1 << op;
288	spin_unlock_irqrestore(lock, flags);
289
290	spin_lock_irqsave(&smp_ipi_lock, flags);
291	smp_needs_ipi |= (1 << p->tid);
292	ubicom32_set_interrupt(smp_ipi_irq);
293	spin_unlock_irqrestore(&smp_ipi_lock, flags);
294	smp_debug(100, KERN_INFO "cpu[%d]: send: %d\n", cpu, op);
295}
296
297/*
298 * ipi_send_mask
299 *	Send an IPI to each cpu in mask.
300 */
301static inline void ipi_send_mask(unsigned int op, const struct cpumask mask)
302{
303	int cpu;
304	for_each_cpu_mask(cpu, mask) {
305		ipi_send(cpu, op);
306	}
307}
308
309/*
310 * ipi_send_allbutself()
311 *	Send an IPI to all threads but ourselves.
312 */
313static inline void ipi_send_allbutself(unsigned int op)
314{
315	int self = smp_processor_id();
316	struct cpumask result;
317	cpumask_copy(&result, &cpu_online_map);
318	cpu_clear(self, result);
319	ipi_send_mask(op, result);
320}
321
322/*
323 * smp_enable_vector()
324 */
325static void smp_enable_vector(unsigned int irq)
326{
327	ubicom32_clear_interrupt(smp_ipi_irq);
328	ldsr_enable_vector(irq);
329}
330
331/*
332 * smp_disable_vector()
333 *	Disable the interrupt by clearing the appropriate bit in the
334 *	LDSR Mask Register.
335 */
336static void smp_disable_vector(unsigned int irq)
337{
338	ldsr_disable_vector(irq);
339}
340
341/*
342 * smp_mask_vector()
343 */
344static void smp_mask_vector(unsigned int irq)
345{
346	ldsr_mask_vector(irq);
347}
348
349/*
350 * smp_unmask_vector()
351 */
352static void smp_unmask_vector(unsigned int irq)
353{
354	ldsr_unmask_vector(irq);
355}
356
357/*
358 * smp_end_vector()
359 *	Called once an interrupt is completed (reset the LDSR mask).
360 */
361static void smp_end_vector(unsigned int irq)
362{
363	struct cpuinfo_ubicom32 *p = &per_cpu(cpu_data, smp_processor_id());
364	spin_lock(&smp_ipi_lock);
365	smp_inside_ipi &= ~(1 << p->tid);
366	if (smp_inside_ipi) {
367		spin_unlock(&smp_ipi_lock);
368		return;
369	}
370	spin_unlock(&smp_ipi_lock);
371	ldsr_unmask_vector(irq);
372	smp_debug(100, KERN_INFO "cpu[%d]: unamesk vector\n", smp_processor_id());
373}
374
375/*
376 * Special hanlder functions for SMP.
377 */
378static struct irq_chip ubicom32_smp_chip = {
379	.name		= "UbicoIPI",
380	.startup	= NULL,
381	.shutdown	= NULL,
382	.enable		= smp_enable_vector,
383	.disable	= smp_disable_vector,
384	.ack		= NULL,
385	.mask		= smp_mask_vector,
386	.unmask		= smp_unmask_vector,
387	.end		= smp_end_vector,
388};
389
390/*
391 * smp_reset_ipi()
392 *	None of these cpu(s) got their IPI, turn it back on.
393 *
394 * Note: This is called by the LDSR which is not a full
395 * Linux cpu.  Thus you must use the raw form of locks
396 * because lock debugging will not work on the partial
397 * cpu nature of the LDSR.
398 */
399void smp_reset_ipi(unsigned long mask)
400{
401	__raw_spin_lock(&smp_ipi_lock.raw_lock);
402	smp_needs_ipi |= mask;
403	smp_inside_ipi &= ~mask;
404	ubicom32_set_interrupt(smp_ipi_irq);
405	__raw_spin_unlock(&smp_ipi_lock.raw_lock);
406	smp_debug(100, KERN_INFO "smp: reset IPIs for: 0x%x\n", mask);
407}
408
409/*
410 * smp_get_affinity()
411 *	Choose the thread affinity for this interrupt.
412 *
413 * Note: This is called by the LDSR which is not a full
414 * Linux cpu.  Thus you must use the raw form of locks
415 * because lock debugging will not work on the partial
416 * cpu nature of the LDSR.
417 */
418unsigned long smp_get_affinity(unsigned int irq, int *all)
419{
420	unsigned long mask = 0;
421
422	/*
423	 * Most IRQ(s) are delivered in a round robin fashion.
424	 */
425	if (irq != smp_ipi_irq) {
426		unsigned long result = smp_irq_affinity[irq] & smp_online_threads;
427		DEBUG_ASSERT(result);
428		*all = 0;
429		return result;
430	}
431
432	/*
433	 * This is an IPI request.  Return all cpu(s) scheduled for an IPI.
434	 * We also track those cpu(s) that are going to be "receiving" IPI this
435	 * round.  When all CPU(s) have called smp_end_vector(),
436	 * we will unmask the IPI interrupt.
437	 */
438	__raw_spin_lock(&smp_ipi_lock.raw_lock);
439	ubicom32_clear_interrupt(smp_ipi_irq);
440	if (smp_needs_ipi) {
441		mask = smp_needs_ipi;
442		smp_inside_ipi |= smp_needs_ipi;
443		smp_needs_ipi = 0;
444	}
445	__raw_spin_unlock(&smp_ipi_lock.raw_lock);
446	*all = 1;
447	return mask;
448}
449
450/*
451 *  smp_set_affinity()
452 *	Set the affinity for this irq but store the value in tid(s).
453 */
454void smp_set_affinity(unsigned int irq, const struct cpumask *dest)
455{
456	int cpuid;
457	unsigned long *paffinity = &smp_irq_affinity[irq];
458
459	/*
460	 *  If none specified, all cpus are allowed.
461	 */
462	if (cpus_empty(*dest)) {
463		*paffinity = 0xffffffff;
464		return;
465	}
466
467	/*
468	 * Make sure to clear the old value before setting up the
469	 * list.
470	 */
471	*paffinity = 0;
472	for_each_cpu_mask(cpuid, *dest) {
473		struct cpuinfo_ubicom32 *p = &per_cpu(cpu_data, cpuid);
474		*paffinity |= (1 << p->tid);
475	}
476}
477
478/*
479 * smp_send_stop()
480 *	Send a stop request to all CPU but this one.
481 */
482void smp_send_stop(void)
483{
484	ipi_send_allbutself(IPI_CPU_STOP);
485}
486
487/*
488 * smp_send_timer_all()
489 *	Send all cpu(s) but this one, a request to update times.
490 */
491void smp_send_timer_all(void)
492{
493	ipi_send_allbutself(IPI_CPU_TIMER);
494}
495
496/*
497 * smp_timer_broadcast()
498 *	Use an IPI to broadcast a timer message
499 */
500void smp_timer_broadcast(const struct cpumask *mask)
501{
502	ipi_send_mask(IPI_CPU_TIMER, *mask);
503}
504
505/*
506 * smp_send_reschedule()
507 *	Send a reschedule request to the specified cpu.
508 */
509void smp_send_reschedule(int cpu)
510{
511	ipi_send(cpu, IPI_RESCHEDULE);
512}
513
514/*
515 * arch_send_call_function_ipi()
516 *	Cause each cpu in the mask to call the generic function handler.
517 */
518void arch_send_call_function_ipi_mask(const struct cpumask *mask)
519{
520	int cpu;
521	for_each_cpu_mask(cpu, *mask) {
522		ipi_send(cpu, IPI_CALL_FUNC);
523	}
524}
525
526/*
527 * arch_send_call_function_single_ipi()
528 *	Cause the specified cpu to call the generic function handler.
529 */
530void arch_send_call_function_single_ipi(int cpu)
531{
532	ipi_send(cpu, IPI_CALL_FUNC_SINGLE);
533}
534
535/*
536 * setup_profiling_timer()
537 *	Dummy function created to keep Oprofile happy in the SMP case.
538 */
539int setup_profiling_timer(unsigned int multiplier)
540{
541	return 0;
542}
543
544/*
545 * smp_mainline_start()
546 *	Start a slave thread executing a mainline Linux context.
547 */
548static void __init smp_mainline_start(void *arg)
549{
550	int cpuid = smp_processor_id();
551	struct cpuinfo_ubicom32 *p = &per_cpu(cpu_data, cpuid);
552
553	BUG_ON(p->tid != thread_get_self());
554
555	/*
556	 * Well, support 2.4 linux scheme as well.
557	 */
558	if (cpu_test_and_set(cpuid, cpu_online_map)) {
559		printk(KERN_CRIT "cpu[%d]: already initialized!\n", cpuid);
560		smp_halt_processor();
561		return;
562	}
563
564	/*
565	 * Initialise the idle task for this CPU
566	 */
567	atomic_inc(&init_mm.mm_count);
568	current->active_mm = &init_mm;
569	if (current->mm) {
570		printk(KERN_CRIT "cpu[%d]: idle task already has memory "
571		       "management\n", cpuid);
572		smp_halt_processor();
573		return;
574	}
575
576	/*
577	 * TODO: X86 does this prior to calling notify, try to understand why?
578	 */
579	preempt_disable();
580
581#if defined(CONFIG_GENERIC_CLOCKEVENTS)
582	/*
583	 * Setup a local timer event so that this cpu will get timer interrupts
584	 */
585	if (local_timer_setup(cpuid) == -1) {
586		printk(KERN_CRIT "cpu[%d]: timer alloc failed\n", cpuid);
587		smp_halt_processor();
588		return;
589	}
590#endif
591
592	/*
593	 * Notify those interested that we are up and alive.  This must
594	 * be done before interrupts are enabled.  It must also be completed
595	 * before the bootstrap cpu returns from __cpu_up() (see comment
596	 * above cpu_set() of the cpu_online_map).
597	 */
598	notify_cpu_starting(cpuid);
599
600	/*
601	 * Indicate that this thread is now online and present.   Setting
602	 * cpu_online_map has the side effect of allowing the bootstrap
603	 * cpu to continue along; so anything that MUST be done prior to the
604	 * bootstrap cpu returning from __cpu_up() needs to go above here.
605	 */
606	cpu_set(cpuid, cpu_online_map);
607	cpu_set(cpuid, cpu_present_map);
608
609	/*
610	 * Maintain a thread mapping in addition to the cpu mapping.
611	 */
612	smp_online_threads |= (1 << p->tid);
613
614	/*
615	 * Enable interrupts for this thread.
616	 */
617	local_irq_enable();
618
619	/*
620	 * Enter the idle loop and wait for a timer to schedule some work.
621	 */
622	printk(KERN_INFO "cpu[%d]: entering cpu_idle()\n", cpuid);
623	cpu_idle();
624
625	/* Not Reached */
626}
627
628/*
629 * smp_cpus_done()
630 *	Called once the kernel_init() has brought up all cpu(s).
631 */
632void smp_cpus_done(unsigned int cpu_max)
633{
634	/* Do Nothing */
635}
636
637/*
638 * __cpu_up()
639 *	Called to startup a sepcific cpu.
640 */
641int __cpuinit __cpu_up(unsigned int cpu)
642{
643	struct task_struct *idle;
644	unsigned int *stack;
645	long timeout;
646	struct cpuinfo_ubicom32 *p = &per_cpu(cpu_data, cpu);
647
648	/*
649	 * Create an idle task for this CPU.
650	 */
651	idle = fork_idle(cpu);
652	if (IS_ERR(idle)) {
653		panic("cpu[%d]: fork failed\n", cpu);
654		return -ENOSYS;
655	}
656	task_thread_info(idle)->cpu = cpu;
657
658	/*
659	 * Setup the sw_ksp[] to point to this new task.
660	 */
661	sw_ksp[p->tid] = (unsigned int)idle->stack;
662	stack = (unsigned int *)(sw_ksp[p->tid] + PAGE_SIZE - 8);
663
664	/*
665	 * Cause the specified thread to execute our smp_mainline_start
666	 * function as a TYPE_NORMAL thread.
667	 */
668	printk(KERN_INFO "cpu[%d]: launching mainline Linux thread\n", cpu);
669	if (thread_start(p->tid, smp_mainline_start, (void *)NULL, stack,
670			 THREAD_TYPE_NORMAL) == -1) {
671		printk(KERN_WARNING "cpu[%d]: failed thread_start\n", cpu);
672		return -ENOSYS;
673	}
674
675	/*
676	 * Wait for the thread to start up.  The thread will set
677	 * the online bit when it is running.  Our caller execpts the
678	 * cpu to be online if we return 0.
679	 */
680	for (timeout = 0; timeout < 10000; timeout++) {
681		if (cpu_online(cpu)) {
682			break;
683		}
684
685		udelay(100);
686		barrier();
687		continue;
688	}
689
690	if (!cpu_online(cpu)) {
691		printk(KERN_CRIT "cpu[%d]: failed to live after %ld us\n",
692		       cpu, timeout * 100);
693		return -ENOSYS;
694	}
695
696	printk(KERN_INFO "cpu[%d]: came alive after %ld us\n",
697	       cpu, timeout * 100);
698	return 0;
699}
700
701/*
702 * Data used by setup_irq for the IPI.
703 */
704static struct irqaction ipi_irq = {
705	.name	 = "ipi",
706	.flags	 = IRQF_DISABLED | IRQF_PERCPU,
707	.handler = ipi_interrupt,
708};
709
710/*
711 * smp_prepare_cpus()
712 *	Mark threads that are available to Linux as possible cpus(s).
713 */
714void __init smp_prepare_cpus(unsigned int max_cpus)
715{
716	int i;
717
718	/*
719	 * We will need a software IRQ to send IPI(s).  We will use
720	 * a single software IRQ for all IPI(s).
721	 */
722	if (irq_soft_alloc(&smp_ipi_irq) < 0) {
723		panic("no software IRQ is available\n");
724		return;
725	}
726
727	/*
728	 * For the IPI interrupt, we want to use our own chip definition.
729	 * This allows us to define what happens in SMP IPI without affecting
730	 * the performance of the other interrupts.
731	 *
732	 * Next, Register the IPI interrupt function against the soft IRQ.
733	 */
734	set_irq_chip(smp_ipi_irq, &ubicom32_smp_chip);
735	setup_irq(smp_ipi_irq, &ipi_irq);
736
737	/*
738	 * We use the device tree node to determine how many
739	 * free cpus we will have (up to NR_CPUS) and we indicate
740	 * that those cpus are present.
741	 *
742	 * We need to do this very early in the SMP case
743	 * because the Linux init code uses the cpu_present_map.
744	 */
745	for_each_possible_cpu(i) {
746		thread_t tid;
747		struct cpuinfo_ubicom32 *p = &per_cpu(cpu_data, i);
748
749		/*
750		 *  Skip the bootstrap cpu
751		 */
752		if (i == 0) {
753			continue;
754		}
755
756		/*
757		 * If we have a free thread left in the mask,
758		 * indicate that the cpu is present.
759		 */
760		tid = thread_alloc();
761		if (tid == (thread_t)-1) {
762			break;
763		}
764
765		/*
766		 * Save the hardware thread id for this cpu.
767		 */
768		p->tid = tid;
769		cpu_set(i, cpu_present_map);
770		printk(KERN_INFO "cpu[%d]: added to cpu_present_map - tid: %d\n", i, tid);
771	}
772}
773
774/*
775 * smp_prepare_boot_cpu()
776 *	Copy the per_cpu data into the appropriate spot for the bootstrap cpu.
777 *
778 * The code in boot_cpu_init() has already set the boot cpu's
779 * state in the possible, present, and online maps.
780 */
781void __devinit smp_prepare_boot_cpu(void)
782{
783	struct cpuinfo_ubicom32 *p = &per_cpu(cpu_data, 0);
784
785	smp_online_threads |= (1 << p->tid);
786	printk(KERN_INFO "cpu[%d]: bootstrap CPU online - tid: %ld\n",
787			current_thread_info()->cpu, p->tid);
788}
789
790/*
791 * smp_setup_processor_id()
792 *	Set the current_thread_info() structure cpu value.
793 *
794 * We set the value to the true hardware thread value that we are running on.
795 * NOTE: this function overrides the weak alias function in main.c
796 */
797void __init smp_setup_processor_id(void)
798{
799	struct cpuinfo_ubicom32 *p = &per_cpu(cpu_data, 0);
800	int i;
801	for_each_cpu_mask(i, CPU_MASK_ALL)
802		set_cpu_possible(i, true);
803
804	current_thread_info()->cpu = 0;
805	p->tid = thread_get_self();
806}
807