1/*
2** SMP Support
3**
4** Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
5** Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
6** Copyright (C) 2001 Grant Grundler <grundler@parisc-linux.org>
7**
8** Lots of stuff stolen from arch/alpha/kernel/smp.c
9** ...and then parisc stole from arch/ia64/kernel/smp.c. Thanks David! :^)
10**
11** Thanks to John Curry and Ullas Ponnadi. I learned alot from their work.
12** -grant (1/12/2001)
13**
14**	This program is free software; you can redistribute it and/or modify
15**	it under the terms of the GNU General Public License as published by
16**      the Free Software Foundation; either version 2 of the License, or
17**      (at your option) any later version.
18*/
19#define __KERNEL_SYSCALLS__
20#undef ENTRY_SYS_CPUS	/* syscall support for iCOD-like functionality */
21
22#include <linux/autoconf.h>
23
24#include <linux/types.h>
25#include <linux/spinlock.h>
26#include <linux/slab.h>
27
28#include <linux/kernel.h>
29#include <linux/sched.h>
30#include <linux/init.h>
31#include <linux/interrupt.h>
32#include <linux/smp.h>
33#include <linux/kernel_stat.h>
34#include <linux/mm.h>
35#include <linux/delay.h>
36
37#include <asm/system.h>
38#include <asm/atomic.h>
39#include <asm/bitops.h>
40#include <asm/current.h>
41#include <asm/delay.h>
42#include <asm/pgalloc.h>	/* for flush_tlb_all() proto/macro */
43
44#include <asm/io.h>
45#include <asm/irq.h>		/* for CPU_IRQ_REGION and friends */
46#include <asm/mmu_context.h>
47#include <asm/page.h>
48#include <asm/pgtable.h>
49#include <asm/pgalloc.h>
50#include <asm/processor.h>
51#include <asm/ptrace.h>
52#include <asm/unistd.h>
53
54#define kDEBUG 0
55
56spinlock_t pa_dbit_lock = SPIN_LOCK_UNLOCKED;
57
58spinlock_t smp_lock = SPIN_LOCK_UNLOCKED;
59
60volatile struct task_struct *smp_init_current_idle_task;
61spinlock_t kernel_flag = SPIN_LOCK_UNLOCKED;
62
63static volatile int smp_commenced = 0;   /* Set when the idlers are all forked */
64static volatile int cpu_now_booting = 0;      /* track which CPU is booting */
65volatile unsigned long cpu_online_map = 0;   /* Bitmap of online CPUs */
66#define IS_LOGGED_IN(cpunum) (test_bit(cpunum, (atomic_t *)&cpu_online_map))
67
68int smp_num_cpus = 1;
69int smp_threads_ready = 0;
70static int max_cpus = -1;			     /* Command line */
71struct smp_call_struct {
72	void (*func) (void *info);
73	void *info;
74	long wait;
75	atomic_t unstarted_count;
76	atomic_t unfinished_count;
77};
78static volatile struct smp_call_struct *smp_call_function_data;
79
80enum ipi_message_type {
81	IPI_NOP=0,
82	IPI_RESCHEDULE=1,
83	IPI_CALL_FUNC,
84	IPI_CPU_START,
85	IPI_CPU_STOP,
86	IPI_CPU_TEST
87};
88
89
90/********** SMP inter processor interrupt and communication routines */
91
92#undef PER_CPU_IRQ_REGION
93#ifdef PER_CPU_IRQ_REGION
94static void
95ipi_init(int cpuid)
96{
97
98	/* If CPU is present ... */
99#ifdef ENTRY_SYS_CPUS
100	/* *and* running (not stopped) ... */
101#error iCOD support wants state checked here.
102#endif
103
104#error verify IRQ_OFFSET(IPI_IRQ) is ipi_interrupt() in new IRQ region
105
106	if(IS_LOGGED_IN(cpuid) )
107	{
108		switch_to_idle_task(current);
109	}
110
111	return;
112}
113#endif
114
115
116/*
117** Yoink this CPU from the runnable list...
118**
119*/
120static void
121halt_processor(void)
122{
123#ifdef ENTRY_SYS_CPUS
124#error halt_processor() needs rework
125/*
126** o migrate I/O interrupts off this CPU.
127** o leave IPI enabled - __cli() will disable IPI.
128** o leave CPU in online map - just change the state
129*/
130	cpu_data[this_cpu].state = STATE_STOPPED;
131	mark_bh(IPI_BH);
132#else
133	/* REVISIT : redirect I/O Interrupts to another CPU? */
134	/* REVISIT : does PM *know* this CPU isn't available? */
135	clear_bit(smp_processor_id(), (void *)&cpu_online_map);
136	__cli();
137	for (;;)
138		;
139#endif
140}
141
142
143void
144ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
145{
146	int this_cpu = smp_processor_id();
147	struct cpuinfo_parisc *p = &cpu_data[this_cpu];
148	unsigned long ops;
149	unsigned long flags;
150
151	/* Count this now; we may make a call that never returns. */
152	p->ipi_count++;
153
154	mb();	/* Order interrupt and bit testing. */
155
156	for (;;) {
157		spin_lock_irqsave(&(p->lock),flags);
158		ops = p->pending_ipi;
159		p->pending_ipi = 0;
160		spin_unlock_irqrestore(&(p->lock),flags);
161
162		mb(); /* Order bit clearing and data access. */
163
164		if (!ops)
165		    break;
166
167		while (ops) {
168			unsigned long which = ffz(~ops);
169
170			switch (which) {
171			case IPI_RESCHEDULE:
172#if (kDEBUG>=100)
173				printk(KERN_DEBUG "CPU%d IPI_RESCHEDULE\n",this_cpu);
174#endif /* kDEBUG */
175				ops &= ~(1 << IPI_RESCHEDULE);
176				/*
177				 * Reschedule callback.  Everything to be
178				 * done is done by the interrupt return path.
179				 */
180				break;
181
182			case IPI_CALL_FUNC:
183#if (kDEBUG>=100)
184				printk(KERN_DEBUG "CPU%d IPI_CALL_FUNC\n",this_cpu);
185#endif /* kDEBUG */
186				ops &= ~(1 << IPI_CALL_FUNC);
187				{
188					volatile struct smp_call_struct *data;
189					void (*func)(void *info);
190					void *info;
191					int wait;
192
193					data = smp_call_function_data;
194					func = data->func;
195					info = data->info;
196					wait = data->wait;
197
198					mb();
199					atomic_dec ((atomic_t *)&data->unstarted_count);
200
201					/* At this point, *data can't
202					 * be relied upon.
203					 */
204
205					(*func)(info);
206
207					/* Notify the sending CPU that the
208					 * task is done.
209					 */
210					mb();
211					if (wait)
212						atomic_dec ((atomic_t *)&data->unfinished_count);
213				}
214				break;
215
216			case IPI_CPU_START:
217#if (kDEBUG>=100)
218				printk(KERN_DEBUG "CPU%d IPI_CPU_START\n",this_cpu);
219#endif /* kDEBUG */
220				ops &= ~(1 << IPI_CPU_START);
221#ifdef ENTRY_SYS_CPUS
222				p->state = STATE_RUNNING;
223#endif
224				break;
225
226			case IPI_CPU_STOP:
227#if (kDEBUG>=100)
228				printk(KERN_DEBUG "CPU%d IPI_CPU_STOP\n",this_cpu);
229#endif /* kDEBUG */
230				ops &= ~(1 << IPI_CPU_STOP);
231#ifdef ENTRY_SYS_CPUS
232#else
233				halt_processor();
234#endif
235				break;
236
237			case IPI_CPU_TEST:
238#if (kDEBUG>=100)
239				printk(KERN_DEBUG "CPU%d is alive!\n",this_cpu);
240#endif /* kDEBUG */
241				ops &= ~(1 << IPI_CPU_TEST);
242				break;
243
244			default:
245				printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n",
246					this_cpu, which);
247				ops &= ~(1 << which);
248				return;
249			} /* Switch */
250		} /* while (ops) */
251	}
252	return;
253}
254
255
256static inline void
257ipi_send(int cpu, enum ipi_message_type op)
258{
259	struct cpuinfo_parisc *p = &cpu_data[cpu];
260	unsigned long flags;
261
262	spin_lock_irqsave(&(p->lock),flags);
263	p->pending_ipi |= 1 << op;
264	__raw_writel(IRQ_OFFSET(IPI_IRQ), cpu_data[cpu].hpa);
265	spin_unlock_irqrestore(&(p->lock),flags);
266}
267
268
269static inline void
270send_IPI_single(int dest_cpu, enum ipi_message_type op)
271{
272	if (dest_cpu == NO_PROC_ID) {
273		BUG();
274		return;
275	}
276
277	ipi_send(dest_cpu, op);
278}
279
280static inline void
281send_IPI_allbutself(enum ipi_message_type op)
282{
283	int i;
284
285	for (i = 0; i < smp_num_cpus; i++) {
286		if (i != smp_processor_id())
287			send_IPI_single(i, op);
288	}
289}
290
291inline void
292smp_send_stop(void)	{ send_IPI_allbutself(IPI_CPU_STOP); }
293
294static inline void
295smp_send_start(void)	{ send_IPI_allbutself(IPI_CPU_START); }
296
297void
298smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); }
299
300
301/**
302 * Run a function on all other CPUs.
303 *  <func>	The function to run. This must be fast and non-blocking.
304 *  <info>	An arbitrary pointer to pass to the function.
305 *  <retry>	If true, keep retrying until ready.
306 *  <wait>	If true, wait until function has completed on other CPUs.
307 *  [RETURNS]   0 on success, else a negative status code.
308 *
309 * Does not return until remote CPUs are nearly ready to execute <func>
310 * or have executed.
311 */
312
313int
314smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
315{
316	struct smp_call_struct data;
317	long timeout;
318	static spinlock_t lock = SPIN_LOCK_UNLOCKED;
319
320	data.func = func;
321	data.info = info;
322	data.wait = wait;
323	atomic_set(&data.unstarted_count, smp_num_cpus - 1);
324	atomic_set(&data.unfinished_count, smp_num_cpus - 1);
325
326	if (retry) {
327		spin_lock (&lock);
328		while (smp_call_function_data != 0)
329			barrier();
330	}
331	else {
332		spin_lock (&lock);
333		if (smp_call_function_data) {
334			spin_unlock (&lock);
335			return -EBUSY;
336		}
337	}
338
339	smp_call_function_data = &data;
340	spin_unlock (&lock);
341
342	/*  Send a message to all other CPUs and wait for them to respond  */
343	send_IPI_allbutself(IPI_CALL_FUNC);
344
345	/*  Wait for response  */
346	timeout = jiffies + HZ;
347	while ( (atomic_read (&data.unstarted_count) > 0) &&
348		time_before (jiffies, timeout) )
349		barrier ();
350
351	/* We either got one or timed out. Release the lock */
352
353	mb();
354	smp_call_function_data = NULL;
355	if (atomic_read (&data.unstarted_count) > 0) {
356		printk(KERN_CRIT "SMP CALL FUNCTION TIMED OUT! (cpu=%d)\n",
357		      smp_processor_id());
358		return -ETIMEDOUT;
359	}
360
361	while (wait && atomic_read (&data.unfinished_count) > 0)
362			barrier ();
363
364	return 0;
365}
366
367
368
369/*
370 *	Setup routine for controlling SMP activation
371 *
372 *	Command-line option of "nosmp" or "maxcpus=0" will disable SMP
373 *	activation entirely (the MPS table probe still happens, though).
374 *
375 *	Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
376 *	greater than 0, limits the maximum number of CPUs activated in
377 *	SMP mode to <NUM>.
378 */
379
380static int __init nosmp(char *str)
381{
382	max_cpus = 0;
383	return 1;
384}
385
386__setup("nosmp", nosmp);
387
388static int __init maxcpus(char *str)
389{
390	get_option(&str, &max_cpus);
391	return 1;
392}
393
394__setup("maxcpus=", maxcpus);
395
396/*
397 * Flush all other CPU's tlb and then mine.  Do this with smp_call_function()
398 * as we want to ensure all TLB's flushed before proceeding.
399 */
400
401extern void flush_tlb_all_local(void);
402
403void
404smp_flush_tlb_all(void)
405{
406	smp_call_function((void (*)(void *))flush_tlb_all_local, NULL, 1, 1);
407	flush_tlb_all_local();
408}
409
410
411void
412smp_do_timer(struct pt_regs *regs)
413{
414	int cpu = smp_processor_id();
415	struct cpuinfo_parisc *data = &cpu_data[cpu];
416
417        if (!--data->prof_counter) {
418		data->prof_counter = data->prof_multiplier;
419		update_process_times(user_mode(regs));
420	}
421}
422
423/*
424 * Called by secondaries to update state and initialize CPU registers.
425 */
426static void __init
427smp_cpu_init(int cpunum)
428{
429	extern int init_per_cpu(int);  /* arch/parisc/kernel/setup.c */
430	extern void init_IRQ(void);    /* arch/parisc/kernel/irq.c */
431
432	/* Set modes and Enable floating point coprocessor */
433	(void) init_per_cpu(cpunum);
434
435	disable_sr_hashing();
436
437	mb();
438
439	/* Well, support 2.4 linux scheme as well. */
440	if (test_and_set_bit(cpunum, (unsigned long *) (&cpu_online_map)))
441	{
442		extern void machine_halt(void); /* arch/parisc.../process.c */
443
444		printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum);
445		machine_halt();
446	}
447
448	/* Initialise the idle task for this CPU */
449	atomic_inc(&init_mm.mm_count);
450	current->active_mm = &init_mm;
451	if(current->mm)
452		BUG();
453	enter_lazy_tlb(&init_mm, current, cpunum);
454
455	init_IRQ();   /* make sure no IRQ's are enabled or pending */
456}
457
458
459/*
460 * Slaves start using C here. Indirectly called from smp_slave_stext.
461 * Do what start_kernel() and main() do for boot strap processor (aka monarch)
462 */
463void __init smp_callin(void)
464{
465	extern void cpu_idle(void);	/* arch/parisc/kernel/process.c */
466	int slave_id = cpu_now_booting;
467
468	smp_cpu_init(slave_id);
469
470
471	flush_cache_all_local(); /* start with known state */
472	flush_tlb_all_local();
473
474	local_irq_enable();  /* Interrupts have been off until now */
475
476	/* Slaves wait here until Big Poppa daddy say "jump" */
477	mb();	/* PARANOID */
478	while (!smp_commenced) ;
479	mb();	/* PARANOID */
480
481	cpu_idle();      /* Wait for timer to schedule some work */
482
483	/* NOTREACHED */
484	panic("smp_callin() AAAAaaaaahhhh....\n");
485}
486
487/*
488 * Create the idle task for a new Slave CPU.  DO NOT use kernel_thread()
489 * because that could end up calling schedule(). If it did, the new idle
490 * task could get scheduled before we had a chance to remove it from the
491 * run-queue...
492 */
493static int fork_by_hand(void)
494{
495	struct pt_regs regs;
496
497	/*
498	 * don't care about the regs settings since
499	 * we'll never reschedule the forked task.
500	 */
501	return do_fork(CLONE_VM|CLONE_PID, 0, &regs, 0);
502}
503
504
505/*
506 * Bring one cpu online.
507 */
508static int smp_boot_one_cpu(int cpuid, int cpunum)
509{
510	struct task_struct *idle;
511	long timeout;
512
513	/*
514	 * Create an idle task for this CPU.  Note the address wed* give
515	 * to kernel_thread is irrelevant -- it's going to start
516	 * where OS_BOOT_RENDEVZ vector in SAL says to start.  But
517	 * this gets all the other task-y sort of data structures set
518	 * up like we wish.   We need to pull the just created idle task
519	 * off the run queue and stuff it into the init_tasks[] array.
520	 * Sheesh . . .
521	 */
522
523	if (fork_by_hand() < 0)
524		panic("SMP: fork failed for CPU:%d", cpuid);
525
526	idle = init_task.prev_task;
527	if (!idle)
528		panic("SMP: No idle process for CPU:%d", cpuid);
529
530	task_set_cpu(idle, cpunum);	/* manually schedule idle task */
531	del_from_runqueue(idle);
532	unhash_process(idle);
533	init_tasks[cpunum] = idle;
534
535	/* Let _start know what logical CPU we're booting
536	** (offset into init_tasks[],cpu_data[])
537	*/
538	cpu_now_booting = cpunum;
539
540	/*
541	** boot strap code needs to know the task address since
542	** it also contains the process stack.
543	*/
544	smp_init_current_idle_task = idle ;
545	mb();
546
547	/*
548	** This gets PDC to release the CPU from a very tight loop.
549	** See MEM_RENDEZ comments in head.S.
550	*/
551	__raw_writel(IRQ_OFFSET(TIMER_IRQ), cpu_data[cpunum].hpa);
552	mb();
553
554	/*
555	 * OK, wait a bit for that CPU to finish staggering about.
556	 * Slave will set a bit when it reaches smp_cpu_init() and then
557	 * wait for smp_commenced to be 1.
558	 * Once we see the bit change, we can move on.
559	 */
560	for (timeout = 0; timeout < 10000; timeout++) {
561		if(IS_LOGGED_IN(cpunum)) {
562			/* Which implies Slave has started up */
563			cpu_now_booting = 0;
564			smp_init_current_idle_task = NULL;
565			goto alive ;
566		}
567		udelay(100);
568		barrier();
569	}
570
571	init_tasks[cpunum] = NULL;
572	free_task_struct(idle);
573
574	printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
575	return -1;
576
577alive:
578	/* Remember the Slave data */
579#if (kDEBUG>=100)
580	printk(KERN_DEBUG "SMP: CPU:%d (num %d) came alive after %ld _us\n",
581		cpuid,  cpunum, timeout * 100);
582#endif /* kDEBUG */
583#ifdef ENTRY_SYS_CPUS
584	cpu_data[cpunum].state = STATE_RUNNING;
585#endif
586	return 0;
587}
588
589
590
591
592/*
593** inventory.c:do_inventory() has already 'discovered' the additional CPU's.
594** We are ready to wrest them from PDC's control now.
595** Called by smp_init bring all the secondaries online and hold them.
596**
597** o Setup of the IPI irq handler is done in irq.c.
598** o MEM_RENDEZ is initialzed in head.S:stext()
599**
600*/
601void __init smp_boot_cpus(void)
602{
603	int i, cpu_count = 1;
604	unsigned long bogosum = loops_per_jiffy; /* Count Monarch */
605
606	/* REVISIT - assumes first CPU reported by PAT PDC is BSP */
607	int bootstrap_processor=cpu_data[0].cpuid;	/* CPU ID of BSP */
608
609	/* Setup BSP mappings */
610	printk(KERN_DEBUG "SMP: bootstrap CPU ID is %d\n",bootstrap_processor);
611	init_task.processor = bootstrap_processor;
612	current->processor = bootstrap_processor;
613	cpu_online_map = 1 << bootstrap_processor; /* Mark Boostrap processor as present */
614	current->active_mm = &init_mm;
615
616#ifdef ENTRY_SYS_CPUS
617	cpu_data[0].state = STATE_RUNNING;
618#endif
619
620	/* Nothing to do when told not to.  */
621	if (max_cpus == 0) {
622		printk(KERN_INFO "SMP mode deactivated.\n");
623		return;
624	}
625
626	if (max_cpus != -1)
627		printk(KERN_INFO "Limiting CPUs to %d\n", max_cpus);
628
629	/* We found more than one CPU.... */
630	if (boot_cpu_data.cpu_count > 1) {
631
632		for (i = 0; i < NR_CPUS; i++) {
633			if (cpu_data[i].cpuid == NO_PROC_ID ||
634			    cpu_data[i].cpuid == bootstrap_processor)
635				continue;
636
637			if (smp_boot_one_cpu(cpu_data[i].cpuid, cpu_count) < 0)
638				continue;
639
640			bogosum += loops_per_jiffy;
641			cpu_count++; /* Count good CPUs only... */
642
643			/* Bail when we've started as many CPUS as told to */
644			if (cpu_count == max_cpus)
645				break;
646		}
647	}
648	if (cpu_count == 1) {
649		printk(KERN_INFO "SMP: Bootstrap processor only.\n");
650	}
651
652	printk(KERN_INFO "SMP: Total %d of %d processors activated "
653	       "(%lu.%02lu BogoMIPS noticed).\n",
654	       cpu_count, boot_cpu_data.cpu_count, (bogosum + 25) / 5000,
655	       ((bogosum + 25) / 50) % 100);
656
657	smp_num_cpus = cpu_count;
658#ifdef PER_CPU_IRQ_REGION
659	ipi_init();
660#endif
661	return;
662}
663
664/*
665 * Called from main.c by Monarch Processor.
666 * After this, any CPU can schedule any task.
667 */
668void smp_commence(void)
669{
670	smp_commenced = 1;
671	mb();
672	return;
673}
674
675#ifdef ENTRY_SYS_CPUS
676/* Code goes along with:
677**    entry.s:        ENTRY_NAME(sys_cpus)   / * 215, for cpu stat * /
678*/
679int sys_cpus(int argc, char **argv)
680{
681	int i,j=0;
682	extern int current_pid(int cpu);
683
684	if( argc > 2 ) {
685		printk("sys_cpus:Only one argument supported\n");
686		return (-1);
687	}
688	if ( argc == 1 ){
689
690#ifdef DUMP_MORE_STATE
691		for(i=0; i<NR_CPUS; i++) {
692			int cpus_per_line = 4;
693			if(IS_LOGGED_IN(i)) {
694				if (j++ % cpus_per_line)
695					printk(" %3d",i);
696				else
697					printk("\n %3d",i);
698			}
699		}
700		printk("\n");
701#else
702	    	printk("\n 0\n");
703#endif
704	} else if((argc==2) && !(strcmp(argv[1],"-l"))) {
705		printk("\nCPUSTATE  TASK CPUNUM CPUID HARDCPU(HPA)\n");
706#ifdef DUMP_MORE_STATE
707		for(i=0;i<NR_CPUS;i++) {
708			if (!IS_LOGGED_IN(i))
709				continue;
710			if (cpu_data[i].cpuid != NO_PROC_ID) {
711				switch(cpu_data[i].state) {
712					case STATE_RENDEZVOUS:
713						printk("RENDEZVS ");
714						break;
715					case STATE_RUNNING:
716						printk((current_pid(i)!=0) ? "RUNNING  " : "IDLING   ");
717						break;
718					case STATE_STOPPED:
719						printk("STOPPED  ");
720						break;
721					case STATE_HALTED:
722						printk("HALTED   ");
723						break;
724					default:
725						printk("%08x?", cpu_data[i].state);
726						break;
727				}
728				if(IS_LOGGED_IN(i)) {
729					printk(" %4d",current_pid(i));
730				}
731				printk(" %6d",cpu_number_map(i));
732				printk(" %5d",i);
733				printk(" 0x%lx\n",cpu_data[i].hpa);
734			}
735		}
736#else
737		printk("\n%s  %4d      0     0 --------",
738			(current->pid)?"RUNNING ": "IDLING  ",current->pid);
739#endif
740	} else if ((argc==2) && !(strcmp(argv[1],"-s"))) {
741#ifdef DUMP_MORE_STATE
742     		printk("\nCPUSTATE   CPUID\n");
743		for (i=0;i<NR_CPUS;i++) {
744			if (!IS_LOGGED_IN(i))
745				continue;
746			if (cpu_data[i].cpuid != NO_PROC_ID) {
747				switch(cpu_data[i].state) {
748					case STATE_RENDEZVOUS:
749						printk("RENDEZVS");break;
750					case STATE_RUNNING:
751						printk((current_pid(i)!=0) ? "RUNNING " : "IDLING");
752						break;
753					case STATE_STOPPED:
754						printk("STOPPED ");break;
755					case STATE_HALTED:
756						printk("HALTED  ");break;
757					default:
758				}
759				printk("  %5d\n",i);
760			}
761		}
762#else
763		printk("\n%s    CPU0",(current->pid==0)?"RUNNING ":"IDLING  ");
764#endif
765	} else {
766		printk("sys_cpus:Unknown request\n");
767		return (-1);
768	}
769	return 0;
770}
771#endif /* ENTRY_SYS_CPUS */
772
773#ifdef CONFIG_PROC_FS
774int __init
775setup_profiling_timer(unsigned int multiplier)
776{
777	return -EINVAL;
778}
779#endif
780