1/*
2 * Support for dynamic reconfiguration for PCI, Memory, and CPU
3 * Hotplug and Dynamic Logical Partitioning on RPA platforms.
4 *
5 * Copyright (C) 2009 Nathan Fontenot
6 * Copyright (C) 2009 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 */
12
13#define pr_fmt(fmt)	"dlpar: " fmt
14
15#include <linux/kernel.h>
16#include <linux/notifier.h>
17#include <linux/spinlock.h>
18#include <linux/cpu.h>
19#include <linux/slab.h>
20#include <linux/of.h>
21#include "offline_states.h"
22#include "pseries.h"
23
24#include <asm/prom.h>
25#include <asm/machdep.h>
26#include <asm/uaccess.h>
27#include <asm/rtas.h>
28
29struct cc_workarea {
30	__be32	drc_index;
31	__be32	zero;
32	__be32	name_offset;
33	__be32	prop_length;
34	__be32	prop_offset;
35};
36
37void dlpar_free_cc_property(struct property *prop)
38{
39	kfree(prop->name);
40	kfree(prop->value);
41	kfree(prop);
42}
43
44static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa)
45{
46	struct property *prop;
47	char *name;
48	char *value;
49
50	prop = kzalloc(sizeof(*prop), GFP_KERNEL);
51	if (!prop)
52		return NULL;
53
54	name = (char *)ccwa + be32_to_cpu(ccwa->name_offset);
55	prop->name = kstrdup(name, GFP_KERNEL);
56
57	prop->length = be32_to_cpu(ccwa->prop_length);
58	value = (char *)ccwa + be32_to_cpu(ccwa->prop_offset);
59	prop->value = kmemdup(value, prop->length, GFP_KERNEL);
60	if (!prop->value) {
61		dlpar_free_cc_property(prop);
62		return NULL;
63	}
64
65	return prop;
66}
67
68static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa,
69					       const char *path)
70{
71	struct device_node *dn;
72	char *name;
73
74	/* If parent node path is "/" advance path to NULL terminator to
75	 * prevent double leading slashs in full_name.
76	 */
77	if (!path[1])
78		path++;
79
80	dn = kzalloc(sizeof(*dn), GFP_KERNEL);
81	if (!dn)
82		return NULL;
83
84	name = (char *)ccwa + be32_to_cpu(ccwa->name_offset);
85	dn->full_name = kasprintf(GFP_KERNEL, "%s/%s", path, name);
86	if (!dn->full_name) {
87		kfree(dn);
88		return NULL;
89	}
90
91	of_node_set_flag(dn, OF_DYNAMIC);
92	of_node_init(dn);
93
94	return dn;
95}
96
97static void dlpar_free_one_cc_node(struct device_node *dn)
98{
99	struct property *prop;
100
101	while (dn->properties) {
102		prop = dn->properties;
103		dn->properties = prop->next;
104		dlpar_free_cc_property(prop);
105	}
106
107	kfree(dn->full_name);
108	kfree(dn);
109}
110
111void dlpar_free_cc_nodes(struct device_node *dn)
112{
113	if (dn->child)
114		dlpar_free_cc_nodes(dn->child);
115
116	if (dn->sibling)
117		dlpar_free_cc_nodes(dn->sibling);
118
119	dlpar_free_one_cc_node(dn);
120}
121
122#define COMPLETE	0
123#define NEXT_SIBLING    1
124#define NEXT_CHILD      2
125#define NEXT_PROPERTY   3
126#define PREV_PARENT     4
127#define MORE_MEMORY     5
128#define CALL_AGAIN	-2
129#define ERR_CFG_USE     -9003
130
131struct device_node *dlpar_configure_connector(__be32 drc_index,
132					      struct device_node *parent)
133{
134	struct device_node *dn;
135	struct device_node *first_dn = NULL;
136	struct device_node *last_dn = NULL;
137	struct property *property;
138	struct property *last_property = NULL;
139	struct cc_workarea *ccwa;
140	char *data_buf;
141	const char *parent_path = parent->full_name;
142	int cc_token;
143	int rc = -1;
144
145	cc_token = rtas_token("ibm,configure-connector");
146	if (cc_token == RTAS_UNKNOWN_SERVICE)
147		return NULL;
148
149	data_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
150	if (!data_buf)
151		return NULL;
152
153	ccwa = (struct cc_workarea *)&data_buf[0];
154	ccwa->drc_index = drc_index;
155	ccwa->zero = 0;
156
157	do {
158		/* Since we release the rtas_data_buf lock between configure
159		 * connector calls we want to re-populate the rtas_data_buffer
160		 * with the contents of the previous call.
161		 */
162		spin_lock(&rtas_data_buf_lock);
163
164		memcpy(rtas_data_buf, data_buf, RTAS_DATA_BUF_SIZE);
165		rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL);
166		memcpy(data_buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
167
168		spin_unlock(&rtas_data_buf_lock);
169
170		switch (rc) {
171		case COMPLETE:
172			break;
173
174		case NEXT_SIBLING:
175			dn = dlpar_parse_cc_node(ccwa, parent_path);
176			if (!dn)
177				goto cc_error;
178
179			dn->parent = last_dn->parent;
180			last_dn->sibling = dn;
181			last_dn = dn;
182			break;
183
184		case NEXT_CHILD:
185			if (first_dn)
186				parent_path = last_dn->full_name;
187
188			dn = dlpar_parse_cc_node(ccwa, parent_path);
189			if (!dn)
190				goto cc_error;
191
192			if (!first_dn) {
193				dn->parent = parent;
194				first_dn = dn;
195			} else {
196				dn->parent = last_dn;
197				if (last_dn)
198					last_dn->child = dn;
199			}
200
201			last_dn = dn;
202			break;
203
204		case NEXT_PROPERTY:
205			property = dlpar_parse_cc_property(ccwa);
206			if (!property)
207				goto cc_error;
208
209			if (!last_dn->properties)
210				last_dn->properties = property;
211			else
212				last_property->next = property;
213
214			last_property = property;
215			break;
216
217		case PREV_PARENT:
218			last_dn = last_dn->parent;
219			parent_path = last_dn->parent->full_name;
220			break;
221
222		case CALL_AGAIN:
223			break;
224
225		case MORE_MEMORY:
226		case ERR_CFG_USE:
227		default:
228			printk(KERN_ERR "Unexpected Error (%d) "
229			       "returned from configure-connector\n", rc);
230			goto cc_error;
231		}
232	} while (rc);
233
234cc_error:
235	kfree(data_buf);
236
237	if (rc) {
238		if (first_dn)
239			dlpar_free_cc_nodes(first_dn);
240
241		return NULL;
242	}
243
244	return first_dn;
245}
246
247static struct device_node *derive_parent(const char *path)
248{
249	struct device_node *parent;
250	char *last_slash;
251
252	last_slash = strrchr(path, '/');
253	if (last_slash == path) {
254		parent = of_find_node_by_path("/");
255	} else {
256		char *parent_path;
257		int parent_path_len = last_slash - path + 1;
258		parent_path = kmalloc(parent_path_len, GFP_KERNEL);
259		if (!parent_path)
260			return NULL;
261
262		strlcpy(parent_path, path, parent_path_len);
263		parent = of_find_node_by_path(parent_path);
264		kfree(parent_path);
265	}
266
267	return parent;
268}
269
270int dlpar_attach_node(struct device_node *dn)
271{
272	int rc;
273
274	dn->parent = derive_parent(dn->full_name);
275	if (!dn->parent)
276		return -ENOMEM;
277
278	rc = of_attach_node(dn);
279	if (rc) {
280		printk(KERN_ERR "Failed to add device node %s\n",
281		       dn->full_name);
282		return rc;
283	}
284
285	of_node_put(dn->parent);
286	return 0;
287}
288
289int dlpar_detach_node(struct device_node *dn)
290{
291	struct device_node *child;
292	int rc;
293
294	child = of_get_next_child(dn, NULL);
295	while (child) {
296		dlpar_detach_node(child);
297		child = of_get_next_child(dn, child);
298	}
299
300	rc = of_detach_node(dn);
301	if (rc)
302		return rc;
303
304	of_node_put(dn); /* Must decrement the refcount */
305	return 0;
306}
307
308#define DR_ENTITY_SENSE		9003
309#define DR_ENTITY_PRESENT	1
310#define DR_ENTITY_UNUSABLE	2
311#define ALLOCATION_STATE	9003
312#define ALLOC_UNUSABLE		0
313#define ALLOC_USABLE		1
314#define ISOLATION_STATE		9001
315#define ISOLATE			0
316#define UNISOLATE		1
317
318int dlpar_acquire_drc(u32 drc_index)
319{
320	int dr_status, rc;
321
322	rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
323		       DR_ENTITY_SENSE, drc_index);
324	if (rc || dr_status != DR_ENTITY_UNUSABLE)
325		return -1;
326
327	rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_USABLE);
328	if (rc)
329		return rc;
330
331	rc = rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
332	if (rc) {
333		rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
334		return rc;
335	}
336
337	return 0;
338}
339
340int dlpar_release_drc(u32 drc_index)
341{
342	int dr_status, rc;
343
344	rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
345		       DR_ENTITY_SENSE, drc_index);
346	if (rc || dr_status != DR_ENTITY_PRESENT)
347		return -1;
348
349	rc = rtas_set_indicator(ISOLATION_STATE, drc_index, ISOLATE);
350	if (rc)
351		return rc;
352
353	rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
354	if (rc) {
355		rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
356		return rc;
357	}
358
359	return 0;
360}
361
362#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
363
364static int dlpar_online_cpu(struct device_node *dn)
365{
366	int rc = 0;
367	unsigned int cpu;
368	int len, nthreads, i;
369	const __be32 *intserv;
370	u32 thread;
371
372	intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
373	if (!intserv)
374		return -EINVAL;
375
376	nthreads = len / sizeof(u32);
377
378	cpu_maps_update_begin();
379	for (i = 0; i < nthreads; i++) {
380		thread = be32_to_cpu(intserv[i]);
381		for_each_present_cpu(cpu) {
382			if (get_hard_smp_processor_id(cpu) != thread)
383				continue;
384			BUG_ON(get_cpu_current_state(cpu)
385					!= CPU_STATE_OFFLINE);
386			cpu_maps_update_done();
387			rc = device_online(get_cpu_device(cpu));
388			if (rc)
389				goto out;
390			cpu_maps_update_begin();
391
392			break;
393		}
394		if (cpu == num_possible_cpus())
395			printk(KERN_WARNING "Could not find cpu to online "
396			       "with physical id 0x%x\n", thread);
397	}
398	cpu_maps_update_done();
399
400out:
401	return rc;
402
403}
404
405static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
406{
407	struct device_node *dn, *parent;
408	u32 drc_index;
409	int rc;
410
411	rc = kstrtou32(buf, 0, &drc_index);
412	if (rc)
413		return -EINVAL;
414
415	rc = dlpar_acquire_drc(drc_index);
416	if (rc)
417		return -EINVAL;
418
419	parent = of_find_node_by_path("/cpus");
420	if (!parent)
421		return -ENODEV;
422
423	dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent);
424	if (!dn)
425		return -EINVAL;
426
427	of_node_put(parent);
428
429	rc = dlpar_attach_node(dn);
430	if (rc) {
431		dlpar_release_drc(drc_index);
432		dlpar_free_cc_nodes(dn);
433		return rc;
434	}
435
436	rc = dlpar_online_cpu(dn);
437	if (rc)
438		return rc;
439
440	return count;
441}
442
443static int dlpar_offline_cpu(struct device_node *dn)
444{
445	int rc = 0;
446	unsigned int cpu;
447	int len, nthreads, i;
448	const __be32 *intserv;
449	u32 thread;
450
451	intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
452	if (!intserv)
453		return -EINVAL;
454
455	nthreads = len / sizeof(u32);
456
457	cpu_maps_update_begin();
458	for (i = 0; i < nthreads; i++) {
459		thread = be32_to_cpu(intserv[i]);
460		for_each_present_cpu(cpu) {
461			if (get_hard_smp_processor_id(cpu) != thread)
462				continue;
463
464			if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE)
465				break;
466
467			if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) {
468				set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
469				cpu_maps_update_done();
470				rc = device_offline(get_cpu_device(cpu));
471				if (rc)
472					goto out;
473				cpu_maps_update_begin();
474				break;
475
476			}
477
478			/*
479			 * The cpu is in CPU_STATE_INACTIVE.
480			 * Upgrade it's state to CPU_STATE_OFFLINE.
481			 */
482			set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
483			BUG_ON(plpar_hcall_norets(H_PROD, thread)
484								!= H_SUCCESS);
485			__cpu_die(cpu);
486			break;
487		}
488		if (cpu == num_possible_cpus())
489			printk(KERN_WARNING "Could not find cpu to offline "
490			       "with physical id 0x%x\n", thread);
491	}
492	cpu_maps_update_done();
493
494out:
495	return rc;
496
497}
498
499static ssize_t dlpar_cpu_release(const char *buf, size_t count)
500{
501	struct device_node *dn;
502	u32 drc_index;
503	int rc;
504
505	dn = of_find_node_by_path(buf);
506	if (!dn)
507		return -EINVAL;
508
509	rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index);
510	if (rc) {
511		of_node_put(dn);
512		return -EINVAL;
513	}
514
515	rc = dlpar_offline_cpu(dn);
516	if (rc) {
517		of_node_put(dn);
518		return -EINVAL;
519	}
520
521	rc = dlpar_release_drc(drc_index);
522	if (rc) {
523		of_node_put(dn);
524		return rc;
525	}
526
527	rc = dlpar_detach_node(dn);
528	if (rc) {
529		dlpar_acquire_drc(drc_index);
530		return rc;
531	}
532
533	of_node_put(dn);
534
535	return count;
536}
537
538#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
539
540static int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
541{
542	int rc;
543
544	/* pseries error logs are in BE format, convert to cpu type */
545	switch (hp_elog->id_type) {
546	case PSERIES_HP_ELOG_ID_DRC_COUNT:
547		hp_elog->_drc_u.drc_count =
548					be32_to_cpu(hp_elog->_drc_u.drc_count);
549		break;
550	case PSERIES_HP_ELOG_ID_DRC_INDEX:
551		hp_elog->_drc_u.drc_index =
552					be32_to_cpu(hp_elog->_drc_u.drc_index);
553	}
554
555	switch (hp_elog->resource) {
556	case PSERIES_HP_ELOG_RESOURCE_MEM:
557		rc = dlpar_memory(hp_elog);
558		break;
559	default:
560		pr_warn_ratelimited("Invalid resource (%d) specified\n",
561				    hp_elog->resource);
562		rc = -EINVAL;
563	}
564
565	return rc;
566}
567
568static ssize_t dlpar_store(struct class *class, struct class_attribute *attr,
569			   const char *buf, size_t count)
570{
571	struct pseries_hp_errorlog *hp_elog;
572	const char *arg;
573	int rc;
574
575	hp_elog = kzalloc(sizeof(*hp_elog), GFP_KERNEL);
576	if (!hp_elog) {
577		rc = -ENOMEM;
578		goto dlpar_store_out;
579	}
580
581	/* Parse out the request from the user, this will be in the form
582	 * <resource> <action> <id_type> <id>
583	 */
584	arg = buf;
585	if (!strncmp(arg, "memory", 6)) {
586		hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM;
587		arg += strlen("memory ");
588	} else {
589		pr_err("Invalid resource specified: \"%s\"\n", buf);
590		rc = -EINVAL;
591		goto dlpar_store_out;
592	}
593
594	if (!strncmp(arg, "add", 3)) {
595		hp_elog->action = PSERIES_HP_ELOG_ACTION_ADD;
596		arg += strlen("add ");
597	} else if (!strncmp(arg, "remove", 6)) {
598		hp_elog->action = PSERIES_HP_ELOG_ACTION_REMOVE;
599		arg += strlen("remove ");
600	} else {
601		pr_err("Invalid action specified: \"%s\"\n", buf);
602		rc = -EINVAL;
603		goto dlpar_store_out;
604	}
605
606	if (!strncmp(arg, "index", 5)) {
607		u32 index;
608
609		hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX;
610		arg += strlen("index ");
611		if (kstrtou32(arg, 0, &index)) {
612			rc = -EINVAL;
613			pr_err("Invalid drc_index specified: \"%s\"\n", buf);
614			goto dlpar_store_out;
615		}
616
617		hp_elog->_drc_u.drc_index = cpu_to_be32(index);
618	} else if (!strncmp(arg, "count", 5)) {
619		u32 count;
620
621		hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_COUNT;
622		arg += strlen("count ");
623		if (kstrtou32(arg, 0, &count)) {
624			rc = -EINVAL;
625			pr_err("Invalid count specified: \"%s\"\n", buf);
626			goto dlpar_store_out;
627		}
628
629		hp_elog->_drc_u.drc_count = cpu_to_be32(count);
630	} else {
631		pr_err("Invalid id_type specified: \"%s\"\n", buf);
632		rc = -EINVAL;
633		goto dlpar_store_out;
634	}
635
636	rc = handle_dlpar_errorlog(hp_elog);
637
638dlpar_store_out:
639	kfree(hp_elog);
640	return rc ? rc : count;
641}
642
643static CLASS_ATTR(dlpar, S_IWUSR, NULL, dlpar_store);
644
645static int __init pseries_dlpar_init(void)
646{
647	int rc;
648
649#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
650	ppc_md.cpu_probe = dlpar_cpu_probe;
651	ppc_md.cpu_release = dlpar_cpu_release;
652#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
653
654	rc = sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr);
655
656	return rc;
657}
658machine_device_initcall(pseries, pseries_dlpar_init);
659
660