1/* 2 * pseries CPU Hotplug infrastructure. 3 * 4 * Split out from arch/powerpc/platforms/pseries/setup.c 5 * arch/powerpc/kernel/rtas.c, and arch/powerpc/platforms/pseries/smp.c 6 * 7 * Peter Bergner, IBM March 2001. 8 * Copyright (C) 2001 IBM. 9 * Dave Engebretsen, Peter Bergner, and 10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com 11 * Plus various changes from other IBM teams... 12 * 13 * Copyright (C) 2006 Michael Ellerman, IBM Corporation 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation; either version 18 * 2 of the License, or (at your option) any later version. 19 */ 20 21#include <linux/kernel.h> 22#include <linux/delay.h> 23#include <linux/cpu.h> 24#include <asm/system.h> 25#include <asm/prom.h> 26#include <asm/rtas.h> 27#include <asm/firmware.h> 28#include <asm/machdep.h> 29#include <asm/vdso_datapage.h> 30#include <asm/pSeries_reconfig.h> 31#include "xics.h" 32#include "plpar_wrappers.h" 33#include "offline_states.h" 34 35/* This version can't take the spinlock, because it never returns */ 36static struct rtas_args rtas_stop_self_args = { 37 .token = RTAS_UNKNOWN_SERVICE, 38 .nargs = 0, 39 .nret = 1, 40 .rets = &rtas_stop_self_args.args[0], 41}; 42 43static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) = 44 CPU_STATE_OFFLINE; 45static DEFINE_PER_CPU(enum cpu_state_vals, current_state) = CPU_STATE_OFFLINE; 46 47static enum cpu_state_vals default_offline_state = CPU_STATE_OFFLINE; 48 49static int cede_offline_enabled __read_mostly = 1; 50 51/* 52 * Enable/disable cede_offline when available. 53 */ 54static int __init setup_cede_offline(char *str) 55{ 56 if (!strcmp(str, "off")) 57 cede_offline_enabled = 0; 58 else if (!strcmp(str, "on")) 59 cede_offline_enabled = 1; 60 else 61 return 0; 62 return 1; 63} 64 65__setup("cede_offline=", setup_cede_offline); 66 67enum cpu_state_vals get_cpu_current_state(int cpu) 68{ 69 return per_cpu(current_state, cpu); 70} 71 72void set_cpu_current_state(int cpu, enum cpu_state_vals state) 73{ 74 per_cpu(current_state, cpu) = state; 75} 76 77enum cpu_state_vals get_preferred_offline_state(int cpu) 78{ 79 return per_cpu(preferred_offline_state, cpu); 80} 81 82void set_preferred_offline_state(int cpu, enum cpu_state_vals state) 83{ 84 per_cpu(preferred_offline_state, cpu) = state; 85} 86 87void set_default_offline_state(int cpu) 88{ 89 per_cpu(preferred_offline_state, cpu) = default_offline_state; 90} 91 92static void rtas_stop_self(void) 93{ 94 struct rtas_args *args = &rtas_stop_self_args; 95 96 local_irq_disable(); 97 98 BUG_ON(args->token == RTAS_UNKNOWN_SERVICE); 99 100 printk("cpu %u (hwid %u) Ready to die...\n", 101 smp_processor_id(), hard_smp_processor_id()); 102 enter_rtas(__pa(args)); 103 104 panic("Alas, I survived.\n"); 105} 106 107static void pseries_mach_cpu_die(void) 108{ 109 unsigned int cpu = smp_processor_id(); 110 unsigned int hwcpu = hard_smp_processor_id(); 111 u8 cede_latency_hint = 0; 112 113 local_irq_disable(); 114 idle_task_exit(); 115 xics_teardown_cpu(); 116 117 if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { 118 set_cpu_current_state(cpu, CPU_STATE_INACTIVE); 119 if (ppc_md.suspend_disable_cpu) 120 ppc_md.suspend_disable_cpu(); 121 122 cede_latency_hint = 2; 123 124 get_lppaca()->idle = 1; 125 if (!get_lppaca()->shared_proc) 126 get_lppaca()->donate_dedicated_cpu = 1; 127 128 while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { 129 extended_cede_processor(cede_latency_hint); 130 } 131 132 if (!get_lppaca()->shared_proc) 133 get_lppaca()->donate_dedicated_cpu = 0; 134 get_lppaca()->idle = 0; 135 136 if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) { 137 unregister_slb_shadow(hwcpu, __pa(get_slb_shadow())); 138 139 /* 140 * Call to start_secondary_resume() will not return. 141 * Kernel stack will be reset and start_secondary() 142 * will be called to continue the online operation. 143 */ 144 start_secondary_resume(); 145 } 146 } 147 148 /* Requested state is CPU_STATE_OFFLINE at this point */ 149 WARN_ON(get_preferred_offline_state(cpu) != CPU_STATE_OFFLINE); 150 151 set_cpu_current_state(cpu, CPU_STATE_OFFLINE); 152 unregister_slb_shadow(hwcpu, __pa(get_slb_shadow())); 153 rtas_stop_self(); 154 155 /* Should never get here... */ 156 BUG(); 157 for(;;); 158} 159 160static int pseries_cpu_disable(void) 161{ 162 int cpu = smp_processor_id(); 163 164 set_cpu_online(cpu, false); 165 vdso_data->processorCount--; 166 167 /*fix boot_cpuid here*/ 168 if (cpu == boot_cpuid) 169 boot_cpuid = cpumask_any(cpu_online_mask); 170 171 xics_migrate_irqs_away(); 172 return 0; 173} 174 175/* 176 * pseries_cpu_die: Wait for the cpu to die. 177 * @cpu: logical processor id of the CPU whose death we're awaiting. 178 * 179 * This function is called from the context of the thread which is performing 180 * the cpu-offline. Here we wait for long enough to allow the cpu in question 181 * to self-destroy so that the cpu-offline thread can send the CPU_DEAD 182 * notifications. 183 * 184 * OTOH, pseries_mach_cpu_die() is called by the @cpu when it wants to 185 * self-destruct. 186 */ 187static void pseries_cpu_die(unsigned int cpu) 188{ 189 int tries; 190 int cpu_status = 1; 191 unsigned int pcpu = get_hard_smp_processor_id(cpu); 192 193 if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { 194 cpu_status = 1; 195 for (tries = 0; tries < 5000; tries++) { 196 if (get_cpu_current_state(cpu) == CPU_STATE_INACTIVE) { 197 cpu_status = 0; 198 break; 199 } 200 msleep(1); 201 } 202 } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) { 203 204 for (tries = 0; tries < 25; tries++) { 205 cpu_status = smp_query_cpu_stopped(pcpu); 206 if (cpu_status == QCSS_STOPPED || 207 cpu_status == QCSS_HARDWARE_ERROR) 208 break; 209 cpu_relax(); 210 } 211 } 212 213 if (cpu_status != 0) { 214 printk("Querying DEAD? cpu %i (%i) shows %i\n", 215 cpu, pcpu, cpu_status); 216 } 217 218 /* Isolation and deallocation are definatly done by 219 * drslot_chrp_cpu. If they were not they would be 220 * done here. Change isolate state to Isolate and 221 * change allocation-state to Unusable. 222 */ 223 paca[cpu].cpu_start = 0; 224} 225 226/* 227 * Update cpu_present_mask and paca(s) for a new cpu node. The wrinkle 228 * here is that a cpu device node may represent up to two logical cpus 229 * in the SMT case. We must honor the assumption in other code that 230 * the logical ids for sibling SMT threads x and y are adjacent, such 231 * that x^1 == y and y^1 == x. 232 */ 233static int pseries_add_processor(struct device_node *np) 234{ 235 unsigned int cpu; 236 cpumask_var_t candidate_mask, tmp; 237 int err = -ENOSPC, len, nthreads, i; 238 const u32 *intserv; 239 240 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len); 241 if (!intserv) 242 return 0; 243 244 zalloc_cpumask_var(&candidate_mask, GFP_KERNEL); 245 zalloc_cpumask_var(&tmp, GFP_KERNEL); 246 247 nthreads = len / sizeof(u32); 248 for (i = 0; i < nthreads; i++) 249 cpumask_set_cpu(i, tmp); 250 251 cpu_maps_update_begin(); 252 253 BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask)); 254 255 /* Get a bitmap of unoccupied slots. */ 256 cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask); 257 if (cpumask_empty(candidate_mask)) { 258 /* If we get here, it most likely means that NR_CPUS is 259 * less than the partition's max processors setting. 260 */ 261 printk(KERN_ERR "Cannot add cpu %s; this system configuration" 262 " supports %d logical cpus.\n", np->full_name, 263 cpumask_weight(cpu_possible_mask)); 264 goto out_unlock; 265 } 266 267 while (!cpumask_empty(tmp)) 268 if (cpumask_subset(tmp, candidate_mask)) 269 /* Found a range where we can insert the new cpu(s) */ 270 break; 271 else 272 cpumask_shift_left(tmp, tmp, nthreads); 273 274 if (cpumask_empty(tmp)) { 275 printk(KERN_ERR "Unable to find space in cpu_present_mask for" 276 " processor %s with %d thread(s)\n", np->name, 277 nthreads); 278 goto out_unlock; 279 } 280 281 for_each_cpu(cpu, tmp) { 282 BUG_ON(cpumask_test_cpu(cpu, cpu_present_mask)); 283 set_cpu_present(cpu, true); 284 set_hard_smp_processor_id(cpu, *intserv++); 285 } 286 err = 0; 287out_unlock: 288 cpu_maps_update_done(); 289 free_cpumask_var(candidate_mask); 290 free_cpumask_var(tmp); 291 return err; 292} 293 294/* 295 * Update the present map for a cpu node which is going away, and set 296 * the hard id in the paca(s) to -1 to be consistent with boot time 297 * convention for non-present cpus. 298 */ 299static void pseries_remove_processor(struct device_node *np) 300{ 301 unsigned int cpu; 302 int len, nthreads, i; 303 const u32 *intserv; 304 305 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len); 306 if (!intserv) 307 return; 308 309 nthreads = len / sizeof(u32); 310 311 cpu_maps_update_begin(); 312 for (i = 0; i < nthreads; i++) { 313 for_each_present_cpu(cpu) { 314 if (get_hard_smp_processor_id(cpu) != intserv[i]) 315 continue; 316 BUG_ON(cpu_online(cpu)); 317 set_cpu_present(cpu, false); 318 set_hard_smp_processor_id(cpu, -1); 319 break; 320 } 321 if (cpu >= nr_cpu_ids) 322 printk(KERN_WARNING "Could not find cpu to remove " 323 "with physical id 0x%x\n", intserv[i]); 324 } 325 cpu_maps_update_done(); 326} 327 328static int pseries_smp_notifier(struct notifier_block *nb, 329 unsigned long action, void *node) 330{ 331 int err = NOTIFY_OK; 332 333 switch (action) { 334 case PSERIES_RECONFIG_ADD: 335 if (pseries_add_processor(node)) 336 err = NOTIFY_BAD; 337 break; 338 case PSERIES_RECONFIG_REMOVE: 339 pseries_remove_processor(node); 340 break; 341 default: 342 err = NOTIFY_DONE; 343 break; 344 } 345 return err; 346} 347 348static struct notifier_block pseries_smp_nb = { 349 .notifier_call = pseries_smp_notifier, 350}; 351 352#define MAX_CEDE_LATENCY_LEVELS 4 353#define CEDE_LATENCY_PARAM_LENGTH 10 354#define CEDE_LATENCY_PARAM_MAX_LENGTH \ 355 (MAX_CEDE_LATENCY_LEVELS * CEDE_LATENCY_PARAM_LENGTH * sizeof(char)) 356#define CEDE_LATENCY_TOKEN 45 357 358static char cede_parameters[CEDE_LATENCY_PARAM_MAX_LENGTH]; 359 360static int parse_cede_parameters(void) 361{ 362 memset(cede_parameters, 0, CEDE_LATENCY_PARAM_MAX_LENGTH); 363 return rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1, 364 NULL, 365 CEDE_LATENCY_TOKEN, 366 __pa(cede_parameters), 367 CEDE_LATENCY_PARAM_MAX_LENGTH); 368} 369 370static int __init pseries_cpu_hotplug_init(void) 371{ 372 struct device_node *np; 373 const char *typep; 374 int cpu; 375 int qcss_tok; 376 377 for_each_node_by_name(np, "interrupt-controller") { 378 typep = of_get_property(np, "compatible", NULL); 379 if (strstr(typep, "open-pic")) { 380 of_node_put(np); 381 382 printk(KERN_INFO "CPU Hotplug not supported on " 383 "systems using MPIC\n"); 384 return 0; 385 } 386 } 387 388 rtas_stop_self_args.token = rtas_token("stop-self"); 389 qcss_tok = rtas_token("query-cpu-stopped-state"); 390 391 if (rtas_stop_self_args.token == RTAS_UNKNOWN_SERVICE || 392 qcss_tok == RTAS_UNKNOWN_SERVICE) { 393 printk(KERN_INFO "CPU Hotplug not supported by firmware " 394 "- disabling.\n"); 395 return 0; 396 } 397 398 ppc_md.cpu_die = pseries_mach_cpu_die; 399 smp_ops->cpu_disable = pseries_cpu_disable; 400 smp_ops->cpu_die = pseries_cpu_die; 401 402 /* Processors can be added/removed only on LPAR */ 403 if (firmware_has_feature(FW_FEATURE_LPAR)) { 404 pSeries_reconfig_notifier_register(&pseries_smp_nb); 405 cpu_maps_update_begin(); 406 if (cede_offline_enabled && parse_cede_parameters() == 0) { 407 default_offline_state = CPU_STATE_INACTIVE; 408 for_each_online_cpu(cpu) 409 set_default_offline_state(cpu); 410 } 411 cpu_maps_update_done(); 412 } 413 414 return 0; 415} 416arch_initcall(pseries_cpu_hotplug_init); 417