1/* smp.h: Sparc specific SMP stuff. 2 * 3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) 4 */ 5 6#ifndef _SPARC_SMP_H 7#define _SPARC_SMP_H 8 9#include <linux/config.h> 10#include <linux/threads.h> 11#include <asm/head.h> 12#include <asm/btfixup.h> 13 14#ifndef __ASSEMBLY__ 15/* PROM provided per-processor information we need 16 * to start them all up. 17 */ 18 19struct prom_cpuinfo { 20 int prom_node; 21 int mid; 22}; 23extern int linux_num_cpus; /* number of CPUs probed */ 24 25#endif /* !(__ASSEMBLY__) */ 26 27#ifdef CONFIG_SMP 28 29#ifndef __ASSEMBLY__ 30 31#include <asm/ptrace.h> 32#include <asm/asi.h> 33 34extern struct prom_cpuinfo linux_cpus[NR_CPUS]; 35 36/* Per processor Sparc parameters we need. */ 37 38struct cpuinfo_sparc { 39 unsigned long udelay_val; /* that's it */ 40 unsigned short next; 41 unsigned short mid; 42}; 43 44extern struct cpuinfo_sparc cpu_data[NR_CPUS]; 45extern unsigned long cpu_offset[NR_CPUS]; 46 47/* 48 * Private routines/data 49 */ 50 51extern int smp_found_cpus; 52extern unsigned char boot_cpu_id; 53extern unsigned long cpu_present_map; 54#define cpu_online_map cpu_present_map 55 56typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long, 57 unsigned long, unsigned long); 58 59/* 60 * General functions that each host system must provide. 61 */ 62 63void sun4m_init_smp(void); 64void sun4d_init_smp(void); 65 66void smp_callin(void); 67void smp_boot_cpus(void); 68void smp_store_cpu_info(int); 69 70struct seq_file; 71void smp_bogo_info(struct seq_file *); 72void smp_info(struct seq_file *); 73 74BTFIXUPDEF_CALL(void, smp_cross_call, smpfunc_t, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) 75BTFIXUPDEF_CALL(void, smp_message_pass, int, int, unsigned long, int) 76BTFIXUPDEF_CALL(int, __smp_processor_id, void) 77BTFIXUPDEF_BLACKBOX(smp_processor_id) 78BTFIXUPDEF_BLACKBOX(load_current) 79 80#define smp_cross_call(func,arg1,arg2,arg3,arg4,arg5) BTFIXUP_CALL(smp_cross_call)(func,arg1,arg2,arg3,arg4,arg5) 81#define smp_message_pass(target,msg,data,wait) BTFIXUP_CALL(smp_message_pass)(target,msg,data,wait) 82 83extern __inline__ void xc0(smpfunc_t func) { smp_cross_call(func, 0, 0, 0, 0, 0); } 84extern __inline__ void xc1(smpfunc_t func, unsigned long arg1) 85{ smp_cross_call(func, arg1, 0, 0, 0, 0); } 86extern __inline__ void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2) 87{ smp_cross_call(func, arg1, arg2, 0, 0, 0); } 88extern __inline__ void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2, 89 unsigned long arg3) 90{ smp_cross_call(func, arg1, arg2, arg3, 0, 0); } 91extern __inline__ void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2, 92 unsigned long arg3, unsigned long arg4) 93{ smp_cross_call(func, arg1, arg2, arg3, arg4, 0); } 94extern __inline__ void xc5(smpfunc_t func, unsigned long arg1, unsigned long arg2, 95 unsigned long arg3, unsigned long arg4, unsigned long arg5) 96{ smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); } 97 98extern __inline__ int smp_call_function(void (*func)(void *info), void *info, int nonatomic, int wait) 99{ 100 xc1((smpfunc_t)func, (unsigned long)info); 101 return 0; 102} 103 104extern __volatile__ int __cpu_number_map[NR_CPUS]; 105extern __volatile__ int __cpu_logical_map[NR_CPUS]; 106extern unsigned long smp_proc_in_lock[NR_CPUS]; 107 108extern __inline__ int cpu_logical_map(int cpu) 109{ 110 return __cpu_logical_map[cpu]; 111} 112extern __inline__ int cpu_number_map(int cpu) 113{ 114 return __cpu_number_map[cpu]; 115} 116 117extern __inline__ int hard_smp4m_processor_id(void) 118{ 119 int cpuid; 120 121 __asm__ __volatile__("rd %%tbr, %0\n\t" 122 "srl %0, 12, %0\n\t" 123 "and %0, 3, %0\n\t" : 124 "=&r" (cpuid)); 125 return cpuid; 126} 127 128extern __inline__ int hard_smp4d_processor_id(void) 129{ 130 int cpuid; 131 132 __asm__ __volatile__("lda [%%g0] %1, %0\n\t" : 133 "=&r" (cpuid) : "i" (ASI_M_VIKING_TMP1)); 134 return cpuid; 135} 136 137#ifndef MODULE 138extern __inline__ int hard_smp_processor_id(void) 139{ 140 int cpuid; 141 142 /* Black box - sun4m 143 __asm__ __volatile__("rd %%tbr, %0\n\t" 144 "srl %0, 12, %0\n\t" 145 "and %0, 3, %0\n\t" : 146 "=&r" (cpuid)); 147 - sun4d 148 __asm__ __volatile__("lda [%g0] ASI_M_VIKING_TMP1, %0\n\t" 149 "nop; nop" : 150 "=&r" (cpuid)); 151 See btfixup.h and btfixupprep.c to understand how a blackbox works. 152 */ 153 __asm__ __volatile__("sethi %%hi(___b_smp_processor_id), %0\n\t" 154 "sethi %%hi(boot_cpu_id), %0\n\t" 155 "ldub [%0 + %%lo(boot_cpu_id)], %0\n\t" : 156 "=&r" (cpuid)); 157 return cpuid; 158} 159#else 160extern __inline__ int hard_smp_processor_id(void) 161{ 162 int cpuid; 163 164 __asm__ __volatile__("mov %%o7, %%g1\n\t" 165 "call ___f___smp_processor_id\n\t" 166 " nop\n\t" 167 "mov %%g2, %0\n\t" : "=r"(cpuid) : : "g1", "g2"); 168 return cpuid; 169} 170#endif 171 172#define smp_processor_id() hard_smp_processor_id() 173extern __inline__ void smp_send_reschedule(int cpu) { } 174extern __inline__ void smp_send_stop(void) { } 175 176#endif /* !(__ASSEMBLY__) */ 177 178/* Sparc specific messages. */ 179#define MSG_CROSS_CALL 0x0005 /* run func on cpus */ 180 181/* Empirical PROM processor mailbox constants. If the per-cpu mailbox 182 * contains something other than one of these then the ipi is from 183 * Linux's active_kernel_processor. This facility exists so that 184 * the boot monitor can capture all the other cpus when one catches 185 * a watchdog reset or the user enters the monitor using L1-A keys. 186 */ 187#define MBOX_STOPCPU 0xFB 188#define MBOX_IDLECPU 0xFC 189#define MBOX_IDLECPU2 0xFD 190#define MBOX_STOPCPU2 0xFE 191 192#define PROC_CHANGE_PENALTY 15 193 194#endif /* !(CONFIG_SMP) */ 195 196#define NO_PROC_ID 0xFF 197 198#endif /* !(_SPARC_SMP_H) */ 199