• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/arch/alpha/kernel/
1/*
2 *	linux/arch/alpha/kernel/smp.c
3 *
4 *      2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
5 *            Renamed modified smp_call_function to smp_call_function_on_cpu()
6 *            Created an function that conforms to the old calling convention
7 *            of smp_call_function().
8 *
9 *            This is helpful for DCPI.
10 *
11 */
12
13#include <linux/errno.h>
14#include <linux/kernel.h>
15#include <linux/kernel_stat.h>
16#include <linux/module.h>
17#include <linux/sched.h>
18#include <linux/mm.h>
19#include <linux/err.h>
20#include <linux/threads.h>
21#include <linux/smp.h>
22#include <linux/interrupt.h>
23#include <linux/init.h>
24#include <linux/delay.h>
25#include <linux/spinlock.h>
26#include <linux/irq.h>
27#include <linux/cache.h>
28#include <linux/profile.h>
29#include <linux/bitops.h>
30#include <linux/cpu.h>
31
32#include <asm/hwrpb.h>
33#include <asm/ptrace.h>
34#include <asm/atomic.h>
35
36#include <asm/io.h>
37#include <asm/irq.h>
38#include <asm/pgtable.h>
39#include <asm/pgalloc.h>
40#include <asm/mmu_context.h>
41#include <asm/tlbflush.h>
42
43#include "proto.h"
44#include "irq_impl.h"
45
46
47#define DEBUG_SMP 0
48#if DEBUG_SMP
49#define DBGS(args)	printk args
50#else
51#define DBGS(args)
52#endif
53
54/* A collection of per-processor data.  */
55struct cpuinfo_alpha cpu_data[NR_CPUS];
56EXPORT_SYMBOL(cpu_data);
57
58/* A collection of single bit ipi messages.  */
59static struct {
60	unsigned long bits ____cacheline_aligned;
61} ipi_data[NR_CPUS] __cacheline_aligned;
62
63enum ipi_message_type {
64	IPI_RESCHEDULE,
65	IPI_CALL_FUNC,
66	IPI_CALL_FUNC_SINGLE,
67	IPI_CPU_STOP,
68};
69
70/* Set to a secondary's cpuid when it comes online.  */
71static int smp_secondary_alive __devinitdata = 0;
72
73int smp_num_probed;		/* Internal processor count */
74int smp_num_cpus = 1;		/* Number that came online.  */
75EXPORT_SYMBOL(smp_num_cpus);
76
77/*
78 * Called by both boot and secondaries to move global data into
79 *  per-processor storage.
80 */
81static inline void __init
82smp_store_cpu_info(int cpuid)
83{
84	cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
85	cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
86	cpu_data[cpuid].need_new_asn = 0;
87	cpu_data[cpuid].asn_lock = 0;
88}
89
90/*
91 * Ideally sets up per-cpu profiling hooks.  Doesn't do much now...
92 */
93static inline void __init
94smp_setup_percpu_timer(int cpuid)
95{
96	cpu_data[cpuid].prof_counter = 1;
97	cpu_data[cpuid].prof_multiplier = 1;
98}
99
100static void __init
101wait_boot_cpu_to_stop(int cpuid)
102{
103	unsigned long stop = jiffies + 10*HZ;
104
105	while (time_before(jiffies, stop)) {
106	        if (!smp_secondary_alive)
107			return;
108		barrier();
109	}
110
111	printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
112	for (;;)
113		barrier();
114}
115
116/*
117 * Where secondaries begin a life of C.
118 */
119void __cpuinit
120smp_callin(void)
121{
122	int cpuid = hard_smp_processor_id();
123
124	if (cpu_online(cpuid)) {
125		printk("??, cpu 0x%x already present??\n", cpuid);
126		BUG();
127	}
128	set_cpu_online(cpuid, true);
129
130	/* Turn on machine checks.  */
131	wrmces(7);
132
133	/* Set trap vectors.  */
134	trap_init();
135
136	/* Set interrupt vector.  */
137	wrent(entInt, 0);
138
139	/* Get our local ticker going. */
140	smp_setup_percpu_timer(cpuid);
141
142	/* Call platform-specific callin, if specified */
143	if (alpha_mv.smp_callin) alpha_mv.smp_callin();
144
145	/* All kernel threads share the same mm context.  */
146	atomic_inc(&init_mm.mm_count);
147	current->active_mm = &init_mm;
148
149	/* inform the notifiers about the new cpu */
150	notify_cpu_starting(cpuid);
151
152	/* Must have completely accurate bogos.  */
153	local_irq_enable();
154
155	/* Wait boot CPU to stop with irq enabled before running
156	   calibrate_delay. */
157	wait_boot_cpu_to_stop(cpuid);
158	mb();
159	calibrate_delay();
160
161	smp_store_cpu_info(cpuid);
162	/* Allow master to continue only after we written loops_per_jiffy.  */
163	wmb();
164	smp_secondary_alive = 1;
165
166	DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
167	      cpuid, current, current->active_mm));
168
169	/* Do nothing.  */
170	cpu_idle();
171}
172
173/* Wait until hwrpb->txrdy is clear for cpu.  Return -1 on timeout.  */
174static int __devinit
175wait_for_txrdy (unsigned long cpumask)
176{
177	unsigned long timeout;
178
179	if (!(hwrpb->txrdy & cpumask))
180		return 0;
181
182	timeout = jiffies + 10*HZ;
183	while (time_before(jiffies, timeout)) {
184		if (!(hwrpb->txrdy & cpumask))
185			return 0;
186		udelay(10);
187		barrier();
188	}
189
190	return -1;
191}
192
193/*
194 * Send a message to a secondary's console.  "START" is one such
195 * interesting message.  ;-)
196 */
197static void __cpuinit
198send_secondary_console_msg(char *str, int cpuid)
199{
200	struct percpu_struct *cpu;
201	register char *cp1, *cp2;
202	unsigned long cpumask;
203	size_t len;
204
205	cpu = (struct percpu_struct *)
206		((char*)hwrpb
207		 + hwrpb->processor_offset
208		 + cpuid * hwrpb->processor_size);
209
210	cpumask = (1UL << cpuid);
211	if (wait_for_txrdy(cpumask))
212		goto timeout;
213
214	cp2 = str;
215	len = strlen(cp2);
216	*(unsigned int *)&cpu->ipc_buffer[0] = len;
217	cp1 = (char *) &cpu->ipc_buffer[1];
218	memcpy(cp1, cp2, len);
219
220	/* atomic test and set */
221	wmb();
222	set_bit(cpuid, &hwrpb->rxrdy);
223
224	if (wait_for_txrdy(cpumask))
225		goto timeout;
226	return;
227
228 timeout:
229	printk("Processor %x not ready\n", cpuid);
230}
231
232/*
233 * A secondary console wants to send a message.  Receive it.
234 */
235static void
236recv_secondary_console_msg(void)
237{
238	int mycpu, i, cnt;
239	unsigned long txrdy = hwrpb->txrdy;
240	char *cp1, *cp2, buf[80];
241	struct percpu_struct *cpu;
242
243	DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
244
245	mycpu = hard_smp_processor_id();
246
247	for (i = 0; i < NR_CPUS; i++) {
248		if (!(txrdy & (1UL << i)))
249			continue;
250
251		DBGS(("recv_secondary_console_msg: "
252		      "TXRDY contains CPU %d.\n", i));
253
254		cpu = (struct percpu_struct *)
255		  ((char*)hwrpb
256		   + hwrpb->processor_offset
257		   + i * hwrpb->processor_size);
258
259 		DBGS(("recv_secondary_console_msg: on %d from %d"
260		      " HALT_REASON 0x%lx FLAGS 0x%lx\n",
261		      mycpu, i, cpu->halt_reason, cpu->flags));
262
263		cnt = cpu->ipc_buffer[0] >> 32;
264		if (cnt <= 0 || cnt >= 80)
265			strcpy(buf, "<<< BOGUS MSG >>>");
266		else {
267			cp1 = (char *) &cpu->ipc_buffer[11];
268			cp2 = buf;
269			strcpy(cp2, cp1);
270
271			while ((cp2 = strchr(cp2, '\r')) != 0) {
272				*cp2 = ' ';
273				if (cp2[1] == '\n')
274					cp2[1] = ' ';
275			}
276		}
277
278		DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
279		      "message is '%s'\n", mycpu, buf));
280	}
281
282	hwrpb->txrdy = 0;
283}
284
285/*
286 * Convince the console to have a secondary cpu begin execution.
287 */
288static int __cpuinit
289secondary_cpu_start(int cpuid, struct task_struct *idle)
290{
291	struct percpu_struct *cpu;
292	struct pcb_struct *hwpcb, *ipcb;
293	unsigned long timeout;
294
295	cpu = (struct percpu_struct *)
296		((char*)hwrpb
297		 + hwrpb->processor_offset
298		 + cpuid * hwrpb->processor_size);
299	hwpcb = (struct pcb_struct *) cpu->hwpcb;
300	ipcb = &task_thread_info(idle)->pcb;
301
302	/* Initialize the CPU's HWPCB to something just good enough for
303	   us to get started.  Immediately after starting, we'll swpctx
304	   to the target idle task's pcb.  Reuse the stack in the mean
305	   time.  Precalculate the target PCBB.  */
306	hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
307	hwpcb->usp = 0;
308	hwpcb->ptbr = ipcb->ptbr;
309	hwpcb->pcc = 0;
310	hwpcb->asn = 0;
311	hwpcb->unique = virt_to_phys(ipcb);
312	hwpcb->flags = ipcb->flags;
313	hwpcb->res1 = hwpcb->res2 = 0;
314
315	DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
316	      cpuid, idle->state, ipcb->flags));
317
318	/* Setup HWRPB fields that SRM uses to activate secondary CPU */
319	hwrpb->CPU_restart = __smp_callin;
320	hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
321
322	/* Recalculate and update the HWRPB checksum */
323	hwrpb_update_checksum(hwrpb);
324
325	/*
326	 * Send a "start" command to the specified processor.
327	 */
328
329	/* SRM III 3.4.1.3 */
330	cpu->flags |= 0x22;	/* turn on Context Valid and Restart Capable */
331	cpu->flags &= ~1;	/* turn off Bootstrap In Progress */
332	wmb();
333
334	send_secondary_console_msg("START\r\n", cpuid);
335
336	/* Wait 10 seconds for an ACK from the console.  */
337	timeout = jiffies + 10*HZ;
338	while (time_before(jiffies, timeout)) {
339		if (cpu->flags & 1)
340			goto started;
341		udelay(10);
342		barrier();
343	}
344	printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
345	return -1;
346
347 started:
348	DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
349	return 0;
350}
351
352/*
353 * Bring one cpu online.
354 */
355static int __cpuinit
356smp_boot_one_cpu(int cpuid)
357{
358	struct task_struct *idle;
359	unsigned long timeout;
360
361	/* Cook up an idler for this guy.  Note that the address we
362	   give to kernel_thread is irrelevant -- it's going to start
363	   where HWRPB.CPU_restart says to start.  But this gets all
364	   the other task-y sort of data structures set up like we
365	   wish.  We can't use kernel_thread since we must avoid
366	   rescheduling the child.  */
367	idle = fork_idle(cpuid);
368	if (IS_ERR(idle))
369		panic("failed fork for CPU %d", cpuid);
370
371	DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
372	      cpuid, idle->state, idle->flags));
373
374	/* Signal the secondary to wait a moment.  */
375	smp_secondary_alive = -1;
376
377	/* Whirrr, whirrr, whirrrrrrrrr... */
378	if (secondary_cpu_start(cpuid, idle))
379		return -1;
380
381	/* Notify the secondary CPU it can run calibrate_delay.  */
382	mb();
383	smp_secondary_alive = 0;
384
385	/* We've been acked by the console; wait one second for
386	   the task to start up for real.  */
387	timeout = jiffies + 1*HZ;
388	while (time_before(jiffies, timeout)) {
389		if (smp_secondary_alive == 1)
390			goto alive;
391		udelay(10);
392		barrier();
393	}
394
395	/* We failed to boot the CPU.  */
396
397	printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
398	return -1;
399
400 alive:
401	/* Another "Red Snapper". */
402	return 0;
403}
404
405/*
406 * Called from setup_arch.  Detect an SMP system and which processors
407 * are present.
408 */
409void __init
410setup_smp(void)
411{
412	struct percpu_struct *cpubase, *cpu;
413	unsigned long i;
414
415	if (boot_cpuid != 0) {
416		printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
417		       boot_cpuid);
418	}
419
420	if (hwrpb->nr_processors > 1) {
421		int boot_cpu_palrev;
422
423		DBGS(("setup_smp: nr_processors %ld\n",
424		      hwrpb->nr_processors));
425
426		cpubase = (struct percpu_struct *)
427			((char*)hwrpb + hwrpb->processor_offset);
428		boot_cpu_palrev = cpubase->pal_revision;
429
430		for (i = 0; i < hwrpb->nr_processors; i++) {
431			cpu = (struct percpu_struct *)
432				((char *)cpubase + i*hwrpb->processor_size);
433			if ((cpu->flags & 0x1cc) == 0x1cc) {
434				smp_num_probed++;
435				set_cpu_possible(i, true);
436				set_cpu_present(i, true);
437				cpu->pal_revision = boot_cpu_palrev;
438			}
439
440			DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
441			      i, cpu->flags, cpu->type));
442			DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
443			      i, cpu->pal_revision));
444		}
445	} else {
446		smp_num_probed = 1;
447	}
448
449	printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n",
450	       smp_num_probed, cpu_present_map.bits[0]);
451}
452
453/*
454 * Called by smp_init prepare the secondaries
455 */
456void __init
457smp_prepare_cpus(unsigned int max_cpus)
458{
459	/* Take care of some initial bookkeeping.  */
460	memset(ipi_data, 0, sizeof(ipi_data));
461
462	current_thread_info()->cpu = boot_cpuid;
463
464	smp_store_cpu_info(boot_cpuid);
465	smp_setup_percpu_timer(boot_cpuid);
466
467	/* Nothing to do on a UP box, or when told not to.  */
468	if (smp_num_probed == 1 || max_cpus == 0) {
469		init_cpu_possible(cpumask_of(boot_cpuid));
470		init_cpu_present(cpumask_of(boot_cpuid));
471		printk(KERN_INFO "SMP mode deactivated.\n");
472		return;
473	}
474
475	printk(KERN_INFO "SMP starting up secondaries.\n");
476
477	smp_num_cpus = smp_num_probed;
478}
479
480void __devinit
481smp_prepare_boot_cpu(void)
482{
483}
484
485int __cpuinit
486__cpu_up(unsigned int cpu)
487{
488	smp_boot_one_cpu(cpu);
489
490	return cpu_online(cpu) ? 0 : -ENOSYS;
491}
492
493void __init
494smp_cpus_done(unsigned int max_cpus)
495{
496	int cpu;
497	unsigned long bogosum = 0;
498
499	for(cpu = 0; cpu < NR_CPUS; cpu++)
500		if (cpu_online(cpu))
501			bogosum += cpu_data[cpu].loops_per_jiffy;
502
503	printk(KERN_INFO "SMP: Total of %d processors activated "
504	       "(%lu.%02lu BogoMIPS).\n",
505	       num_online_cpus(),
506	       (bogosum + 2500) / (500000/HZ),
507	       ((bogosum + 2500) / (5000/HZ)) % 100);
508}
509
510
511void
512smp_percpu_timer_interrupt(struct pt_regs *regs)
513{
514	struct pt_regs *old_regs;
515	int cpu = smp_processor_id();
516	unsigned long user = user_mode(regs);
517	struct cpuinfo_alpha *data = &cpu_data[cpu];
518
519	old_regs = set_irq_regs(regs);
520
521	/* Record kernel PC.  */
522	profile_tick(CPU_PROFILING);
523
524	if (!--data->prof_counter) {
525		/* We need to make like a normal interrupt -- otherwise
526		   timer interrupts ignore the global interrupt lock,
527		   which would be a Bad Thing.  */
528		irq_enter();
529
530		update_process_times(user);
531
532		data->prof_counter = data->prof_multiplier;
533
534		irq_exit();
535	}
536	set_irq_regs(old_regs);
537}
538
539int
540setup_profiling_timer(unsigned int multiplier)
541{
542	return -EINVAL;
543}
544
545
546static void
547send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
548{
549	int i;
550
551	mb();
552	for_each_cpu(i, to_whom)
553		set_bit(operation, &ipi_data[i].bits);
554
555	mb();
556	for_each_cpu(i, to_whom)
557		wripir(i);
558}
559
560void
561handle_ipi(struct pt_regs *regs)
562{
563	int this_cpu = smp_processor_id();
564	unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
565	unsigned long ops;
566
567
568	mb();	/* Order interrupt and bit testing. */
569	while ((ops = xchg(pending_ipis, 0)) != 0) {
570	  mb();	/* Order bit clearing and data access. */
571	  do {
572		unsigned long which;
573
574		which = ops & -ops;
575		ops &= ~which;
576		which = __ffs(which);
577
578		switch (which) {
579		case IPI_RESCHEDULE:
580			/* Reschedule callback.  Everything to be done
581			   is done by the interrupt return path.  */
582			break;
583
584		case IPI_CALL_FUNC:
585			generic_smp_call_function_interrupt();
586			break;
587
588		case IPI_CALL_FUNC_SINGLE:
589			generic_smp_call_function_single_interrupt();
590			break;
591
592		case IPI_CPU_STOP:
593			halt();
594
595		default:
596			printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
597			       this_cpu, which);
598			break;
599		}
600	  } while (ops);
601
602	  mb();	/* Order data access and bit testing. */
603	}
604
605	cpu_data[this_cpu].ipi_count++;
606
607	if (hwrpb->txrdy)
608		recv_secondary_console_msg();
609}
610
611void
612smp_send_reschedule(int cpu)
613{
614#ifdef DEBUG_IPI_MSG
615	if (cpu == hard_smp_processor_id())
616		printk(KERN_WARNING
617		       "smp_send_reschedule: Sending IPI to self.\n");
618#endif
619	send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
620}
621
622void
623smp_send_stop(void)
624{
625	cpumask_t to_whom = cpu_possible_map;
626	cpu_clear(smp_processor_id(), to_whom);
627#ifdef DEBUG_IPI_MSG
628	if (hard_smp_processor_id() != boot_cpu_id)
629		printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
630#endif
631	send_ipi_message(&to_whom, IPI_CPU_STOP);
632}
633
634void arch_send_call_function_ipi_mask(const struct cpumask *mask)
635{
636	send_ipi_message(mask, IPI_CALL_FUNC);
637}
638
639void arch_send_call_function_single_ipi(int cpu)
640{
641	send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
642}
643
644static void
645ipi_imb(void *ignored)
646{
647	imb();
648}
649
650void
651smp_imb(void)
652{
653	/* Must wait other processors to flush their icache before continue. */
654	if (on_each_cpu(ipi_imb, NULL, 1))
655		printk(KERN_CRIT "smp_imb: timed out\n");
656}
657EXPORT_SYMBOL(smp_imb);
658
659static void
660ipi_flush_tlb_all(void *ignored)
661{
662	tbia();
663}
664
665void
666flush_tlb_all(void)
667{
668	/* Although we don't have any data to pass, we do want to
669	   synchronize with the other processors.  */
670	if (on_each_cpu(ipi_flush_tlb_all, NULL, 1)) {
671		printk(KERN_CRIT "flush_tlb_all: timed out\n");
672	}
673}
674
675#define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
676
677static void
678ipi_flush_tlb_mm(void *x)
679{
680	struct mm_struct *mm = (struct mm_struct *) x;
681	if (mm == current->active_mm && !asn_locked())
682		flush_tlb_current(mm);
683	else
684		flush_tlb_other(mm);
685}
686
687void
688flush_tlb_mm(struct mm_struct *mm)
689{
690	preempt_disable();
691
692	if (mm == current->active_mm) {
693		flush_tlb_current(mm);
694		if (atomic_read(&mm->mm_users) <= 1) {
695			int cpu, this_cpu = smp_processor_id();
696			for (cpu = 0; cpu < NR_CPUS; cpu++) {
697				if (!cpu_online(cpu) || cpu == this_cpu)
698					continue;
699				if (mm->context[cpu])
700					mm->context[cpu] = 0;
701			}
702			preempt_enable();
703			return;
704		}
705	}
706
707	if (smp_call_function(ipi_flush_tlb_mm, mm, 1)) {
708		printk(KERN_CRIT "flush_tlb_mm: timed out\n");
709	}
710
711	preempt_enable();
712}
713EXPORT_SYMBOL(flush_tlb_mm);
714
715struct flush_tlb_page_struct {
716	struct vm_area_struct *vma;
717	struct mm_struct *mm;
718	unsigned long addr;
719};
720
721static void
722ipi_flush_tlb_page(void *x)
723{
724	struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
725	struct mm_struct * mm = data->mm;
726
727	if (mm == current->active_mm && !asn_locked())
728		flush_tlb_current_page(mm, data->vma, data->addr);
729	else
730		flush_tlb_other(mm);
731}
732
733void
734flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
735{
736	struct flush_tlb_page_struct data;
737	struct mm_struct *mm = vma->vm_mm;
738
739	preempt_disable();
740
741	if (mm == current->active_mm) {
742		flush_tlb_current_page(mm, vma, addr);
743		if (atomic_read(&mm->mm_users) <= 1) {
744			int cpu, this_cpu = smp_processor_id();
745			for (cpu = 0; cpu < NR_CPUS; cpu++) {
746				if (!cpu_online(cpu) || cpu == this_cpu)
747					continue;
748				if (mm->context[cpu])
749					mm->context[cpu] = 0;
750			}
751			preempt_enable();
752			return;
753		}
754	}
755
756	data.vma = vma;
757	data.mm = mm;
758	data.addr = addr;
759
760	if (smp_call_function(ipi_flush_tlb_page, &data, 1)) {
761		printk(KERN_CRIT "flush_tlb_page: timed out\n");
762	}
763
764	preempt_enable();
765}
766EXPORT_SYMBOL(flush_tlb_page);
767
768void
769flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
770{
771	/* On the Alpha we always flush the whole user tlb.  */
772	flush_tlb_mm(vma->vm_mm);
773}
774EXPORT_SYMBOL(flush_tlb_range);
775
776static void
777ipi_flush_icache_page(void *x)
778{
779	struct mm_struct *mm = (struct mm_struct *) x;
780	if (mm == current->active_mm && !asn_locked())
781		__load_new_mm_context(mm);
782	else
783		flush_tlb_other(mm);
784}
785
786void
787flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
788			unsigned long addr, int len)
789{
790	struct mm_struct *mm = vma->vm_mm;
791
792	if ((vma->vm_flags & VM_EXEC) == 0)
793		return;
794
795	preempt_disable();
796
797	if (mm == current->active_mm) {
798		__load_new_mm_context(mm);
799		if (atomic_read(&mm->mm_users) <= 1) {
800			int cpu, this_cpu = smp_processor_id();
801			for (cpu = 0; cpu < NR_CPUS; cpu++) {
802				if (!cpu_online(cpu) || cpu == this_cpu)
803					continue;
804				if (mm->context[cpu])
805					mm->context[cpu] = 0;
806			}
807			preempt_enable();
808			return;
809		}
810	}
811
812	if (smp_call_function(ipi_flush_icache_page, mm, 1)) {
813		printk(KERN_CRIT "flush_icache_page: timed out\n");
814	}
815
816	preempt_enable();
817}
818