Lines Matching refs:pr

187 static int acpi_processor_hotadd_init(struct acpi_processor *pr)
193 if (invalid_phys_cpuid(pr->phys_id))
196 status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
203 ret = acpi_map_cpu(pr->handle, pr->phys_id, pr->acpi_id, &pr->id);
207 ret = arch_register_cpu(pr->id);
209 acpi_unmap_cpu(pr->id);
218 pr_info("CPU%d has been hot-added\n", pr->id);
219 pr->flags.need_hotplug_init = 1;
227 static inline int acpi_processor_hotadd_init(struct acpi_processor *pr)
237 struct acpi_processor *pr = acpi_driver_data(device);
250 pr->flags.bm_control = 1;
257 status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
265 pr->acpi_id = object.processor.proc_id;
270 status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
279 pr->acpi_id = value;
282 if (acpi_duplicate_processor_id(pr->acpi_id)) {
283 if (pr->acpi_id == 0xff)
289 pr->acpi_id);
293 pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration,
294 pr->acpi_id);
295 if (invalid_phys_cpuid(pr->phys_id))
298 pr->id = acpi_map_cpuid(pr->phys_id, pr->acpi_id);
305 if (!acpi_has_cpu_in_madt() && invalid_logical_cpuid(pr->id) &&
307 pr->id = 0;
313 if (acpi_has_method(pr->handle, "_PCT"))
325 if (invalid_logical_cpuid(pr->id) || !cpu_present(pr->id)) {
326 int ret = acpi_processor_hotadd_init(pr);
341 sprintf(acpi_device_bid(device), "CPU%X", pr->id);
342 dev_dbg(&device->dev, "Processor [%d:%d]\n", pr->id, pr->acpi_id);
350 pr->throttling.address = object.processor.pblk_address;
351 pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset;
352 pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
354 pr->pblk = object.processor.pblk_address;
362 status = acpi_evaluate_integer(pr->handle, "_SUN", NULL, &value);
364 arch_fix_phys_package_id(pr->id, value);
380 struct acpi_processor *pr;
387 pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
388 if (!pr)
391 if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
396 pr->handle = device->handle;
399 device->driver_data = pr;
405 BUG_ON(pr->id >= nr_cpu_ids);
412 if (per_cpu(processor_device_array, pr->id) != NULL &&
413 per_cpu(processor_device_array, pr->id) != device) {
416 pr->id);
424 per_cpu(processor_device_array, pr->id) = device;
425 per_cpu(processors, pr->id) = pr;
427 dev = get_cpu_device(pr->id);
437 pr->dev = dev;
447 free_cpumask_var(pr->throttling.shared_cpu_map);
449 per_cpu(processors, pr->id) = NULL;
451 kfree(pr);
459 struct acpi_processor *pr;
464 pr = acpi_driver_data(device);
465 if (pr->id >= nr_cpu_ids)
476 device_release_driver(pr->dev);
477 acpi_unbind_one(pr->dev);
480 per_cpu(processor_device_array, pr->id) = NULL;
481 per_cpu(processors, pr->id) = NULL;
487 arch_unregister_cpu(pr->id);
488 acpi_unmap_cpu(pr->id);
493 try_offline_node(cpu_to_node(pr->id));
496 free_cpumask_var(pr->throttling.shared_cpu_map);
497 kfree(pr);