1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Support for dynamic reconfiguration for PCI, Memory, and CPU
4 * Hotplug and Dynamic Logical Partitioning on RPA platforms.
5 *
6 * Copyright (C) 2009 Nathan Fontenot
7 * Copyright (C) 2009 IBM Corporation
8 */
9
10#define pr_fmt(fmt)	"dlpar: " fmt
11
12#include <linux/kernel.h>
13#include <linux/notifier.h>
14#include <linux/spinlock.h>
15#include <linux/cpu.h>
16#include <linux/slab.h>
17#include <linux/of.h>
18
19#include "of_helpers.h"
20#include "pseries.h"
21
22#include <asm/machdep.h>
23#include <linux/uaccess.h>
24#include <asm/rtas.h>
25#include <asm/rtas-work-area.h>
26
27static struct workqueue_struct *pseries_hp_wq;
28
29struct pseries_hp_work {
30	struct work_struct work;
31	struct pseries_hp_errorlog *errlog;
32};
33
34struct cc_workarea {
35	__be32	drc_index;
36	__be32	zero;
37	__be32	name_offset;
38	__be32	prop_length;
39	__be32	prop_offset;
40};
41
42void dlpar_free_cc_property(struct property *prop)
43{
44	kfree(prop->name);
45	kfree(prop->value);
46	kfree(prop);
47}
48
49static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa)
50{
51	struct property *prop;
52	char *name;
53	char *value;
54
55	prop = kzalloc(sizeof(*prop), GFP_KERNEL);
56	if (!prop)
57		return NULL;
58
59	name = (char *)ccwa + be32_to_cpu(ccwa->name_offset);
60	prop->name = kstrdup(name, GFP_KERNEL);
61	if (!prop->name) {
62		dlpar_free_cc_property(prop);
63		return NULL;
64	}
65
66	prop->length = be32_to_cpu(ccwa->prop_length);
67	value = (char *)ccwa + be32_to_cpu(ccwa->prop_offset);
68	prop->value = kmemdup(value, prop->length, GFP_KERNEL);
69	if (!prop->value) {
70		dlpar_free_cc_property(prop);
71		return NULL;
72	}
73
74	return prop;
75}
76
77static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa)
78{
79	struct device_node *dn;
80	const char *name;
81
82	dn = kzalloc(sizeof(*dn), GFP_KERNEL);
83	if (!dn)
84		return NULL;
85
86	name = (const char *)ccwa + be32_to_cpu(ccwa->name_offset);
87	dn->full_name = kstrdup(name, GFP_KERNEL);
88	if (!dn->full_name) {
89		kfree(dn);
90		return NULL;
91	}
92
93	of_node_set_flag(dn, OF_DYNAMIC);
94	of_node_init(dn);
95
96	return dn;
97}
98
99static void dlpar_free_one_cc_node(struct device_node *dn)
100{
101	struct property *prop;
102
103	while (dn->properties) {
104		prop = dn->properties;
105		dn->properties = prop->next;
106		dlpar_free_cc_property(prop);
107	}
108
109	kfree(dn->full_name);
110	kfree(dn);
111}
112
113void dlpar_free_cc_nodes(struct device_node *dn)
114{
115	if (dn->child)
116		dlpar_free_cc_nodes(dn->child);
117
118	if (dn->sibling)
119		dlpar_free_cc_nodes(dn->sibling);
120
121	dlpar_free_one_cc_node(dn);
122}
123
124#define COMPLETE	0
125#define NEXT_SIBLING    1
126#define NEXT_CHILD      2
127#define NEXT_PROPERTY   3
128#define PREV_PARENT     4
129#define MORE_MEMORY     5
130#define ERR_CFG_USE     -9003
131
132struct device_node *dlpar_configure_connector(__be32 drc_index,
133					      struct device_node *parent)
134{
135	struct device_node *dn;
136	struct device_node *first_dn = NULL;
137	struct device_node *last_dn = NULL;
138	struct property *property;
139	struct property *last_property = NULL;
140	struct cc_workarea *ccwa;
141	struct rtas_work_area *work_area;
142	char *data_buf;
143	int cc_token;
144	int rc = -1;
145
146	cc_token = rtas_function_token(RTAS_FN_IBM_CONFIGURE_CONNECTOR);
147	if (cc_token == RTAS_UNKNOWN_SERVICE)
148		return NULL;
149
150	work_area = rtas_work_area_alloc(SZ_4K);
151	data_buf = rtas_work_area_raw_buf(work_area);
152
153	ccwa = (struct cc_workarea *)&data_buf[0];
154	ccwa->drc_index = drc_index;
155	ccwa->zero = 0;
156
157	do {
158		do {
159			rc = rtas_call(cc_token, 2, 1, NULL,
160				       rtas_work_area_phys(work_area), NULL);
161		} while (rtas_busy_delay(rc));
162
163		switch (rc) {
164		case COMPLETE:
165			break;
166
167		case NEXT_SIBLING:
168			dn = dlpar_parse_cc_node(ccwa);
169			if (!dn)
170				goto cc_error;
171
172			dn->parent = last_dn->parent;
173			last_dn->sibling = dn;
174			last_dn = dn;
175			break;
176
177		case NEXT_CHILD:
178			dn = dlpar_parse_cc_node(ccwa);
179			if (!dn)
180				goto cc_error;
181
182			if (!first_dn) {
183				dn->parent = parent;
184				first_dn = dn;
185			} else {
186				dn->parent = last_dn;
187				if (last_dn)
188					last_dn->child = dn;
189			}
190
191			last_dn = dn;
192			break;
193
194		case NEXT_PROPERTY:
195			property = dlpar_parse_cc_property(ccwa);
196			if (!property)
197				goto cc_error;
198
199			if (!last_dn->properties)
200				last_dn->properties = property;
201			else
202				last_property->next = property;
203
204			last_property = property;
205			break;
206
207		case PREV_PARENT:
208			last_dn = last_dn->parent;
209			break;
210
211		case MORE_MEMORY:
212		case ERR_CFG_USE:
213		default:
214			printk(KERN_ERR "Unexpected Error (%d) "
215			       "returned from configure-connector\n", rc);
216			goto cc_error;
217		}
218	} while (rc);
219
220cc_error:
221	rtas_work_area_free(work_area);
222
223	if (rc) {
224		if (first_dn)
225			dlpar_free_cc_nodes(first_dn);
226
227		return NULL;
228	}
229
230	return first_dn;
231}
232
233int dlpar_attach_node(struct device_node *dn, struct device_node *parent)
234{
235	int rc;
236
237	dn->parent = parent;
238
239	rc = of_attach_node(dn);
240	if (rc) {
241		printk(KERN_ERR "Failed to add device node %pOF\n", dn);
242		return rc;
243	}
244
245	return 0;
246}
247
248int dlpar_detach_node(struct device_node *dn)
249{
250	struct device_node *child;
251	int rc;
252
253	child = of_get_next_child(dn, NULL);
254	while (child) {
255		dlpar_detach_node(child);
256		child = of_get_next_child(dn, child);
257	}
258
259	rc = of_detach_node(dn);
260	if (rc)
261		return rc;
262
263	of_node_put(dn);
264
265	return 0;
266}
267
268#define DR_ENTITY_SENSE		9003
269#define DR_ENTITY_PRESENT	1
270#define DR_ENTITY_UNUSABLE	2
271#define ALLOCATION_STATE	9003
272#define ALLOC_UNUSABLE		0
273#define ALLOC_USABLE		1
274#define ISOLATION_STATE		9001
275#define ISOLATE			0
276#define UNISOLATE		1
277
278int dlpar_acquire_drc(u32 drc_index)
279{
280	int dr_status, rc;
281
282	rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
283	if (rc || dr_status != DR_ENTITY_UNUSABLE)
284		return -1;
285
286	rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_USABLE);
287	if (rc)
288		return rc;
289
290	rc = rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
291	if (rc) {
292		rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
293		return rc;
294	}
295
296	return 0;
297}
298
299int dlpar_release_drc(u32 drc_index)
300{
301	int dr_status, rc;
302
303	rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
304	if (rc || dr_status != DR_ENTITY_PRESENT)
305		return -1;
306
307	rc = rtas_set_indicator(ISOLATION_STATE, drc_index, ISOLATE);
308	if (rc)
309		return rc;
310
311	rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
312	if (rc) {
313		rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
314		return rc;
315	}
316
317	return 0;
318}
319
320int dlpar_unisolate_drc(u32 drc_index)
321{
322	int dr_status, rc;
323
324	rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
325	if (rc || dr_status != DR_ENTITY_PRESENT)
326		return -1;
327
328	rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
329
330	return 0;
331}
332
333int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
334{
335	int rc;
336
337	/* pseries error logs are in BE format, convert to cpu type */
338	switch (hp_elog->id_type) {
339	case PSERIES_HP_ELOG_ID_DRC_COUNT:
340		hp_elog->_drc_u.drc_count =
341				be32_to_cpu(hp_elog->_drc_u.drc_count);
342		break;
343	case PSERIES_HP_ELOG_ID_DRC_INDEX:
344		hp_elog->_drc_u.drc_index =
345				be32_to_cpu(hp_elog->_drc_u.drc_index);
346		break;
347	case PSERIES_HP_ELOG_ID_DRC_IC:
348		hp_elog->_drc_u.ic.count =
349				be32_to_cpu(hp_elog->_drc_u.ic.count);
350		hp_elog->_drc_u.ic.index =
351				be32_to_cpu(hp_elog->_drc_u.ic.index);
352	}
353
354	switch (hp_elog->resource) {
355	case PSERIES_HP_ELOG_RESOURCE_MEM:
356		rc = dlpar_memory(hp_elog);
357		break;
358	case PSERIES_HP_ELOG_RESOURCE_CPU:
359		rc = dlpar_cpu(hp_elog);
360		break;
361	case PSERIES_HP_ELOG_RESOURCE_PMEM:
362		rc = dlpar_hp_pmem(hp_elog);
363		break;
364
365	default:
366		pr_warn_ratelimited("Invalid resource (%d) specified\n",
367				    hp_elog->resource);
368		rc = -EINVAL;
369	}
370
371	return rc;
372}
373
374static void pseries_hp_work_fn(struct work_struct *work)
375{
376	struct pseries_hp_work *hp_work =
377			container_of(work, struct pseries_hp_work, work);
378
379	handle_dlpar_errorlog(hp_work->errlog);
380
381	kfree(hp_work->errlog);
382	kfree(work);
383}
384
385void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog)
386{
387	struct pseries_hp_work *work;
388	struct pseries_hp_errorlog *hp_errlog_copy;
389
390	hp_errlog_copy = kmemdup(hp_errlog, sizeof(*hp_errlog), GFP_ATOMIC);
391	if (!hp_errlog_copy)
392		return;
393
394	work = kmalloc(sizeof(struct pseries_hp_work), GFP_ATOMIC);
395	if (work) {
396		INIT_WORK((struct work_struct *)work, pseries_hp_work_fn);
397		work->errlog = hp_errlog_copy;
398		queue_work(pseries_hp_wq, (struct work_struct *)work);
399	} else {
400		kfree(hp_errlog_copy);
401	}
402}
403
404static int dlpar_parse_resource(char **cmd, struct pseries_hp_errorlog *hp_elog)
405{
406	char *arg;
407
408	arg = strsep(cmd, " ");
409	if (!arg)
410		return -EINVAL;
411
412	if (sysfs_streq(arg, "memory")) {
413		hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM;
414	} else if (sysfs_streq(arg, "cpu")) {
415		hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_CPU;
416	} else {
417		pr_err("Invalid resource specified.\n");
418		return -EINVAL;
419	}
420
421	return 0;
422}
423
424static int dlpar_parse_action(char **cmd, struct pseries_hp_errorlog *hp_elog)
425{
426	char *arg;
427
428	arg = strsep(cmd, " ");
429	if (!arg)
430		return -EINVAL;
431
432	if (sysfs_streq(arg, "add")) {
433		hp_elog->action = PSERIES_HP_ELOG_ACTION_ADD;
434	} else if (sysfs_streq(arg, "remove")) {
435		hp_elog->action = PSERIES_HP_ELOG_ACTION_REMOVE;
436	} else {
437		pr_err("Invalid action specified.\n");
438		return -EINVAL;
439	}
440
441	return 0;
442}
443
444static int dlpar_parse_id_type(char **cmd, struct pseries_hp_errorlog *hp_elog)
445{
446	char *arg;
447	u32 count, index;
448
449	arg = strsep(cmd, " ");
450	if (!arg)
451		return -EINVAL;
452
453	if (sysfs_streq(arg, "indexed-count")) {
454		hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_IC;
455		arg = strsep(cmd, " ");
456		if (!arg) {
457			pr_err("No DRC count specified.\n");
458			return -EINVAL;
459		}
460
461		if (kstrtou32(arg, 0, &count)) {
462			pr_err("Invalid DRC count specified.\n");
463			return -EINVAL;
464		}
465
466		arg = strsep(cmd, " ");
467		if (!arg) {
468			pr_err("No DRC Index specified.\n");
469			return -EINVAL;
470		}
471
472		if (kstrtou32(arg, 0, &index)) {
473			pr_err("Invalid DRC Index specified.\n");
474			return -EINVAL;
475		}
476
477		hp_elog->_drc_u.ic.count = cpu_to_be32(count);
478		hp_elog->_drc_u.ic.index = cpu_to_be32(index);
479	} else if (sysfs_streq(arg, "index")) {
480		hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX;
481		arg = strsep(cmd, " ");
482		if (!arg) {
483			pr_err("No DRC Index specified.\n");
484			return -EINVAL;
485		}
486
487		if (kstrtou32(arg, 0, &index)) {
488			pr_err("Invalid DRC Index specified.\n");
489			return -EINVAL;
490		}
491
492		hp_elog->_drc_u.drc_index = cpu_to_be32(index);
493	} else if (sysfs_streq(arg, "count")) {
494		hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_COUNT;
495		arg = strsep(cmd, " ");
496		if (!arg) {
497			pr_err("No DRC count specified.\n");
498			return -EINVAL;
499		}
500
501		if (kstrtou32(arg, 0, &count)) {
502			pr_err("Invalid DRC count specified.\n");
503			return -EINVAL;
504		}
505
506		hp_elog->_drc_u.drc_count = cpu_to_be32(count);
507	} else {
508		pr_err("Invalid id_type specified.\n");
509		return -EINVAL;
510	}
511
512	return 0;
513}
514
515static ssize_t dlpar_store(const struct class *class, const struct class_attribute *attr,
516			   const char *buf, size_t count)
517{
518	struct pseries_hp_errorlog hp_elog;
519	char *argbuf;
520	char *args;
521	int rc;
522
523	args = argbuf = kstrdup(buf, GFP_KERNEL);
524	if (!argbuf)
525		return -ENOMEM;
526
527	/*
528	 * Parse out the request from the user, this will be in the form:
529	 * <resource> <action> <id_type> <id>
530	 */
531	rc = dlpar_parse_resource(&args, &hp_elog);
532	if (rc)
533		goto dlpar_store_out;
534
535	rc = dlpar_parse_action(&args, &hp_elog);
536	if (rc)
537		goto dlpar_store_out;
538
539	rc = dlpar_parse_id_type(&args, &hp_elog);
540	if (rc)
541		goto dlpar_store_out;
542
543	rc = handle_dlpar_errorlog(&hp_elog);
544
545dlpar_store_out:
546	kfree(argbuf);
547
548	if (rc)
549		pr_err("Could not handle DLPAR request \"%s\"\n", buf);
550
551	return rc ? rc : count;
552}
553
554static ssize_t dlpar_show(const struct class *class, const struct class_attribute *attr,
555			  char *buf)
556{
557	return sprintf(buf, "%s\n", "memory,cpu");
558}
559
560static CLASS_ATTR_RW(dlpar);
561
562int __init dlpar_workqueue_init(void)
563{
564	if (pseries_hp_wq)
565		return 0;
566
567	pseries_hp_wq = alloc_ordered_workqueue("pseries hotplug workqueue", 0);
568
569	return pseries_hp_wq ? 0 : -ENOMEM;
570}
571
572static int __init dlpar_sysfs_init(void)
573{
574	int rc;
575
576	rc = dlpar_workqueue_init();
577	if (rc)
578		return rc;
579
580	return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr);
581}
582machine_device_initcall(pseries, dlpar_sysfs_init);
583
584