1/*
2 * apei-base.c - ACPI Platform Error Interface (APEI) supporting
3 * infrastructure
4 *
5 * APEI allows to report errors (for example from the chipset) to the
6 * the operating system. This improves NMI handling especially. In
7 * addition it supports error serialization and error injection.
8 *
9 * For more information about APEI, please refer to ACPI Specification
10 * version 4.0, chapter 17.
11 *
12 * This file has Common functions used by more than one APEI table,
13 * including framework of interpreter for ERST and EINJ; resource
14 * management for APEI registers.
15 *
16 * Copyright (C) 2009, Intel Corp.
17 *	Author: Huang Ying <ying.huang@intel.com>
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License version
21 * 2 as published by the Free Software Foundation.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/init.h>
36#include <linux/acpi.h>
37#include <linux/slab.h>
38#include <linux/io.h>
39#include <linux/kref.h>
40#include <linux/rculist.h>
41#include <linux/interrupt.h>
42#include <linux/debugfs.h>
43#include <asm/unaligned.h>
44
45#include "apei-internal.h"
46
47#define APEI_PFX "APEI: "
48
49/*
50 * APEI ERST (Error Record Serialization Table) and EINJ (Error
51 * INJection) interpreter framework.
52 */
53
54#define APEI_EXEC_PRESERVE_REGISTER	0x1
55
56void apei_exec_ctx_init(struct apei_exec_context *ctx,
57			struct apei_exec_ins_type *ins_table,
58			u32 instructions,
59			struct acpi_whea_header *action_table,
60			u32 entries)
61{
62	ctx->ins_table = ins_table;
63	ctx->instructions = instructions;
64	ctx->action_table = action_table;
65	ctx->entries = entries;
66}
67EXPORT_SYMBOL_GPL(apei_exec_ctx_init);
68
69int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val)
70{
71	int rc;
72
73	rc = apei_read(val, &entry->register_region);
74	if (rc)
75		return rc;
76	*val >>= entry->register_region.bit_offset;
77	*val &= entry->mask;
78
79	return 0;
80}
81
82int apei_exec_read_register(struct apei_exec_context *ctx,
83			    struct acpi_whea_header *entry)
84{
85	int rc;
86	u64 val = 0;
87
88	rc = __apei_exec_read_register(entry, &val);
89	if (rc)
90		return rc;
91	ctx->value = val;
92
93	return 0;
94}
95EXPORT_SYMBOL_GPL(apei_exec_read_register);
96
97int apei_exec_read_register_value(struct apei_exec_context *ctx,
98				  struct acpi_whea_header *entry)
99{
100	int rc;
101
102	rc = apei_exec_read_register(ctx, entry);
103	if (rc)
104		return rc;
105	ctx->value = (ctx->value == entry->value);
106
107	return 0;
108}
109EXPORT_SYMBOL_GPL(apei_exec_read_register_value);
110
111int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val)
112{
113	int rc;
114
115	val &= entry->mask;
116	val <<= entry->register_region.bit_offset;
117	if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) {
118		u64 valr = 0;
119		rc = apei_read(&valr, &entry->register_region);
120		if (rc)
121			return rc;
122		valr &= ~(entry->mask << entry->register_region.bit_offset);
123		val |= valr;
124	}
125	rc = apei_write(val, &entry->register_region);
126
127	return rc;
128}
129
130int apei_exec_write_register(struct apei_exec_context *ctx,
131			     struct acpi_whea_header *entry)
132{
133	return __apei_exec_write_register(entry, ctx->value);
134}
135EXPORT_SYMBOL_GPL(apei_exec_write_register);
136
137int apei_exec_write_register_value(struct apei_exec_context *ctx,
138				   struct acpi_whea_header *entry)
139{
140	int rc;
141
142	ctx->value = entry->value;
143	rc = apei_exec_write_register(ctx, entry);
144
145	return rc;
146}
147EXPORT_SYMBOL_GPL(apei_exec_write_register_value);
148
149int apei_exec_noop(struct apei_exec_context *ctx,
150		   struct acpi_whea_header *entry)
151{
152	return 0;
153}
154EXPORT_SYMBOL_GPL(apei_exec_noop);
155
156/*
157 * Interpret the specified action. Go through whole action table,
158 * execute all instructions belong to the action.
159 */
160int __apei_exec_run(struct apei_exec_context *ctx, u8 action,
161		    bool optional)
162{
163	int rc = -ENOENT;
164	u32 i, ip;
165	struct acpi_whea_header *entry;
166	apei_exec_ins_func_t run;
167
168	ctx->ip = 0;
169
170	/*
171	 * "ip" is the instruction pointer of current instruction,
172	 * "ctx->ip" specifies the next instruction to executed,
173	 * instruction "run" function may change the "ctx->ip" to
174	 * implement "goto" semantics.
175	 */
176rewind:
177	ip = 0;
178	for (i = 0; i < ctx->entries; i++) {
179		entry = &ctx->action_table[i];
180		if (entry->action != action)
181			continue;
182		if (ip == ctx->ip) {
183			if (entry->instruction >= ctx->instructions ||
184			    !ctx->ins_table[entry->instruction].run) {
185				pr_warning(FW_WARN APEI_PFX
186			"Invalid action table, unknown instruction type: %d\n",
187					   entry->instruction);
188				return -EINVAL;
189			}
190			run = ctx->ins_table[entry->instruction].run;
191			rc = run(ctx, entry);
192			if (rc < 0)
193				return rc;
194			else if (rc != APEI_EXEC_SET_IP)
195				ctx->ip++;
196		}
197		ip++;
198		if (ctx->ip < ip)
199			goto rewind;
200	}
201
202	return !optional && rc < 0 ? rc : 0;
203}
204EXPORT_SYMBOL_GPL(__apei_exec_run);
205
206typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx,
207				      struct acpi_whea_header *entry,
208				      void *data);
209
210static int apei_exec_for_each_entry(struct apei_exec_context *ctx,
211				    apei_exec_entry_func_t func,
212				    void *data,
213				    int *end)
214{
215	u8 ins;
216	int i, rc;
217	struct acpi_whea_header *entry;
218	struct apei_exec_ins_type *ins_table = ctx->ins_table;
219
220	for (i = 0; i < ctx->entries; i++) {
221		entry = ctx->action_table + i;
222		ins = entry->instruction;
223		if (end)
224			*end = i;
225		if (ins >= ctx->instructions || !ins_table[ins].run) {
226			pr_warning(FW_WARN APEI_PFX
227			"Invalid action table, unknown instruction type: %d\n",
228				   ins);
229			return -EINVAL;
230		}
231		rc = func(ctx, entry, data);
232		if (rc)
233			return rc;
234	}
235
236	return 0;
237}
238
239static int pre_map_gar_callback(struct apei_exec_context *ctx,
240				struct acpi_whea_header *entry,
241				void *data)
242{
243	u8 ins = entry->instruction;
244
245	if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
246		return apei_map_generic_address(&entry->register_region);
247
248	return 0;
249}
250
251/*
252 * Pre-map all GARs in action table to make it possible to access them
253 * in NMI handler.
254 */
255int apei_exec_pre_map_gars(struct apei_exec_context *ctx)
256{
257	int rc, end;
258
259	rc = apei_exec_for_each_entry(ctx, pre_map_gar_callback,
260				      NULL, &end);
261	if (rc) {
262		struct apei_exec_context ctx_unmap;
263		memcpy(&ctx_unmap, ctx, sizeof(*ctx));
264		ctx_unmap.entries = end;
265		apei_exec_post_unmap_gars(&ctx_unmap);
266	}
267
268	return rc;
269}
270EXPORT_SYMBOL_GPL(apei_exec_pre_map_gars);
271
272static int post_unmap_gar_callback(struct apei_exec_context *ctx,
273				   struct acpi_whea_header *entry,
274				   void *data)
275{
276	u8 ins = entry->instruction;
277
278	if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
279		apei_unmap_generic_address(&entry->register_region);
280
281	return 0;
282}
283
284/* Post-unmap all GAR in action table. */
285int apei_exec_post_unmap_gars(struct apei_exec_context *ctx)
286{
287	return apei_exec_for_each_entry(ctx, post_unmap_gar_callback,
288					NULL, NULL);
289}
290EXPORT_SYMBOL_GPL(apei_exec_post_unmap_gars);
291
292/*
293 * Resource management for GARs in APEI
294 */
295struct apei_res {
296	struct list_head list;
297	unsigned long start;
298	unsigned long end;
299};
300
301/* Collect all resources requested, to avoid conflict */
302struct apei_resources apei_resources_all = {
303	.iomem = LIST_HEAD_INIT(apei_resources_all.iomem),
304	.ioport = LIST_HEAD_INIT(apei_resources_all.ioport),
305};
306
307static int apei_res_add(struct list_head *res_list,
308			unsigned long start, unsigned long size)
309{
310	struct apei_res *res, *resn, *res_ins = NULL;
311	unsigned long end = start + size;
312
313	if (end <= start)
314		return 0;
315repeat:
316	list_for_each_entry_safe(res, resn, res_list, list) {
317		if (res->start > end || res->end < start)
318			continue;
319		else if (end <= res->end && start >= res->start) {
320			kfree(res_ins);
321			return 0;
322		}
323		list_del(&res->list);
324		res->start = start = min(res->start, start);
325		res->end = end = max(res->end, end);
326		kfree(res_ins);
327		res_ins = res;
328		goto repeat;
329	}
330
331	if (res_ins)
332		list_add(&res_ins->list, res_list);
333	else {
334		res_ins = kmalloc(sizeof(*res), GFP_KERNEL);
335		if (!res_ins)
336			return -ENOMEM;
337		res_ins->start = start;
338		res_ins->end = end;
339		list_add(&res_ins->list, res_list);
340	}
341
342	return 0;
343}
344
345static int apei_res_sub(struct list_head *res_list1,
346			struct list_head *res_list2)
347{
348	struct apei_res *res1, *resn1, *res2, *res;
349	res1 = list_entry(res_list1->next, struct apei_res, list);
350	resn1 = list_entry(res1->list.next, struct apei_res, list);
351	while (&res1->list != res_list1) {
352		list_for_each_entry(res2, res_list2, list) {
353			if (res1->start >= res2->end ||
354			    res1->end <= res2->start)
355				continue;
356			else if (res1->end <= res2->end &&
357				 res1->start >= res2->start) {
358				list_del(&res1->list);
359				kfree(res1);
360				break;
361			} else if (res1->end > res2->end &&
362				   res1->start < res2->start) {
363				res = kmalloc(sizeof(*res), GFP_KERNEL);
364				if (!res)
365					return -ENOMEM;
366				res->start = res2->end;
367				res->end = res1->end;
368				res1->end = res2->start;
369				list_add(&res->list, &res1->list);
370				resn1 = res;
371			} else {
372				if (res1->start < res2->start)
373					res1->end = res2->start;
374				else
375					res1->start = res2->end;
376			}
377		}
378		res1 = resn1;
379		resn1 = list_entry(resn1->list.next, struct apei_res, list);
380	}
381
382	return 0;
383}
384
385static void apei_res_clean(struct list_head *res_list)
386{
387	struct apei_res *res, *resn;
388
389	list_for_each_entry_safe(res, resn, res_list, list) {
390		list_del(&res->list);
391		kfree(res);
392	}
393}
394
395void apei_resources_fini(struct apei_resources *resources)
396{
397	apei_res_clean(&resources->iomem);
398	apei_res_clean(&resources->ioport);
399}
400EXPORT_SYMBOL_GPL(apei_resources_fini);
401
402static int apei_resources_merge(struct apei_resources *resources1,
403				struct apei_resources *resources2)
404{
405	int rc;
406	struct apei_res *res;
407
408	list_for_each_entry(res, &resources2->iomem, list) {
409		rc = apei_res_add(&resources1->iomem, res->start,
410				  res->end - res->start);
411		if (rc)
412			return rc;
413	}
414	list_for_each_entry(res, &resources2->ioport, list) {
415		rc = apei_res_add(&resources1->ioport, res->start,
416				  res->end - res->start);
417		if (rc)
418			return rc;
419	}
420
421	return 0;
422}
423
424int apei_resources_add(struct apei_resources *resources,
425		       unsigned long start, unsigned long size,
426		       bool iomem)
427{
428	if (iomem)
429		return apei_res_add(&resources->iomem, start, size);
430	else
431		return apei_res_add(&resources->ioport, start, size);
432}
433EXPORT_SYMBOL_GPL(apei_resources_add);
434
435/*
436 * EINJ has two groups of GARs (EINJ table entry and trigger table
437 * entry), so common resources are subtracted from the trigger table
438 * resources before the second requesting.
439 */
440int apei_resources_sub(struct apei_resources *resources1,
441		       struct apei_resources *resources2)
442{
443	int rc;
444
445	rc = apei_res_sub(&resources1->iomem, &resources2->iomem);
446	if (rc)
447		return rc;
448	return apei_res_sub(&resources1->ioport, &resources2->ioport);
449}
450EXPORT_SYMBOL_GPL(apei_resources_sub);
451
452static int apei_get_res_callback(__u64 start, __u64 size, void *data)
453{
454	struct apei_resources *resources = data;
455	return apei_res_add(&resources->iomem, start, size);
456}
457
458static int apei_get_nvs_resources(struct apei_resources *resources)
459{
460	return acpi_nvs_for_each_region(apei_get_res_callback, resources);
461}
462
463int (*arch_apei_filter_addr)(int (*func)(__u64 start, __u64 size,
464				     void *data), void *data);
465static int apei_get_arch_resources(struct apei_resources *resources)
466
467{
468	return arch_apei_filter_addr(apei_get_res_callback, resources);
469}
470
471/*
472 * IO memory/port resource management mechanism is used to check
473 * whether memory/port area used by GARs conflicts with normal memory
474 * or IO memory/port of devices.
475 */
476int apei_resources_request(struct apei_resources *resources,
477			   const char *desc)
478{
479	struct apei_res *res, *res_bak = NULL;
480	struct resource *r;
481	struct apei_resources nvs_resources, arch_res;
482	int rc;
483
484	rc = apei_resources_sub(resources, &apei_resources_all);
485	if (rc)
486		return rc;
487
488	/*
489	 * Some firmware uses ACPI NVS region, that has been marked as
490	 * busy, so exclude it from APEI resources to avoid false
491	 * conflict.
492	 */
493	apei_resources_init(&nvs_resources);
494	rc = apei_get_nvs_resources(&nvs_resources);
495	if (rc)
496		goto nvs_res_fini;
497	rc = apei_resources_sub(resources, &nvs_resources);
498	if (rc)
499		goto nvs_res_fini;
500
501	if (arch_apei_filter_addr) {
502		apei_resources_init(&arch_res);
503		rc = apei_get_arch_resources(&arch_res);
504		if (rc)
505			goto arch_res_fini;
506		rc = apei_resources_sub(resources, &arch_res);
507		if (rc)
508			goto arch_res_fini;
509	}
510
511	rc = -EINVAL;
512	list_for_each_entry(res, &resources->iomem, list) {
513		r = request_mem_region(res->start, res->end - res->start,
514				       desc);
515		if (!r) {
516			pr_err(APEI_PFX
517		"Can not request [mem %#010llx-%#010llx] for %s registers\n",
518			       (unsigned long long)res->start,
519			       (unsigned long long)res->end - 1, desc);
520			res_bak = res;
521			goto err_unmap_iomem;
522		}
523	}
524
525	list_for_each_entry(res, &resources->ioport, list) {
526		r = request_region(res->start, res->end - res->start, desc);
527		if (!r) {
528			pr_err(APEI_PFX
529		"Can not request [io  %#06llx-%#06llx] for %s registers\n",
530			       (unsigned long long)res->start,
531			       (unsigned long long)res->end - 1, desc);
532			res_bak = res;
533			goto err_unmap_ioport;
534		}
535	}
536
537	rc = apei_resources_merge(&apei_resources_all, resources);
538	if (rc) {
539		pr_err(APEI_PFX "Fail to merge resources!\n");
540		goto err_unmap_ioport;
541	}
542
543	return 0;
544err_unmap_ioport:
545	list_for_each_entry(res, &resources->ioport, list) {
546		if (res == res_bak)
547			break;
548		release_region(res->start, res->end - res->start);
549	}
550	res_bak = NULL;
551err_unmap_iomem:
552	list_for_each_entry(res, &resources->iomem, list) {
553		if (res == res_bak)
554			break;
555		release_mem_region(res->start, res->end - res->start);
556	}
557arch_res_fini:
558	apei_resources_fini(&arch_res);
559nvs_res_fini:
560	apei_resources_fini(&nvs_resources);
561	return rc;
562}
563EXPORT_SYMBOL_GPL(apei_resources_request);
564
565void apei_resources_release(struct apei_resources *resources)
566{
567	int rc;
568	struct apei_res *res;
569
570	list_for_each_entry(res, &resources->iomem, list)
571		release_mem_region(res->start, res->end - res->start);
572	list_for_each_entry(res, &resources->ioport, list)
573		release_region(res->start, res->end - res->start);
574
575	rc = apei_resources_sub(&apei_resources_all, resources);
576	if (rc)
577		pr_err(APEI_PFX "Fail to sub resources!\n");
578}
579EXPORT_SYMBOL_GPL(apei_resources_release);
580
581static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
582				u32 *access_bit_width)
583{
584	u32 bit_width, bit_offset, access_size_code, space_id;
585
586	bit_width = reg->bit_width;
587	bit_offset = reg->bit_offset;
588	access_size_code = reg->access_width;
589	space_id = reg->space_id;
590	*paddr = get_unaligned(&reg->address);
591	if (!*paddr) {
592		pr_warning(FW_BUG APEI_PFX
593			   "Invalid physical address in GAR [0x%llx/%u/%u/%u/%u]\n",
594			   *paddr, bit_width, bit_offset, access_size_code,
595			   space_id);
596		return -EINVAL;
597	}
598
599	if (access_size_code < 1 || access_size_code > 4) {
600		pr_warning(FW_BUG APEI_PFX
601			   "Invalid access size code in GAR [0x%llx/%u/%u/%u/%u]\n",
602			   *paddr, bit_width, bit_offset, access_size_code,
603			   space_id);
604		return -EINVAL;
605	}
606	*access_bit_width = 1UL << (access_size_code + 2);
607
608	/* Fixup common BIOS bug */
609	if (bit_width == 32 && bit_offset == 0 && (*paddr & 0x03) == 0 &&
610	    *access_bit_width < 32)
611		*access_bit_width = 32;
612	else if (bit_width == 64 && bit_offset == 0 && (*paddr & 0x07) == 0 &&
613	    *access_bit_width < 64)
614		*access_bit_width = 64;
615
616	if ((bit_width + bit_offset) > *access_bit_width) {
617		pr_warning(FW_BUG APEI_PFX
618			   "Invalid bit width + offset in GAR [0x%llx/%u/%u/%u/%u]\n",
619			   *paddr, bit_width, bit_offset, access_size_code,
620			   space_id);
621		return -EINVAL;
622	}
623
624	if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
625	    space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
626		pr_warning(FW_BUG APEI_PFX
627			   "Invalid address space type in GAR [0x%llx/%u/%u/%u/%u]\n",
628			   *paddr, bit_width, bit_offset, access_size_code,
629			   space_id);
630		return -EINVAL;
631	}
632
633	return 0;
634}
635
636int apei_map_generic_address(struct acpi_generic_address *reg)
637{
638	int rc;
639	u32 access_bit_width;
640	u64 address;
641
642	rc = apei_check_gar(reg, &address, &access_bit_width);
643	if (rc)
644		return rc;
645	return acpi_os_map_generic_address(reg);
646}
647EXPORT_SYMBOL_GPL(apei_map_generic_address);
648
649/* read GAR in interrupt (including NMI) or process context */
650int apei_read(u64 *val, struct acpi_generic_address *reg)
651{
652	int rc;
653	u32 access_bit_width;
654	u64 address;
655	acpi_status status;
656
657	rc = apei_check_gar(reg, &address, &access_bit_width);
658	if (rc)
659		return rc;
660
661	*val = 0;
662	switch(reg->space_id) {
663	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
664		status = acpi_os_read_memory((acpi_physical_address) address,
665					       val, access_bit_width);
666		if (ACPI_FAILURE(status))
667			return -EIO;
668		break;
669	case ACPI_ADR_SPACE_SYSTEM_IO:
670		status = acpi_os_read_port(address, (u32 *)val,
671					   access_bit_width);
672		if (ACPI_FAILURE(status))
673			return -EIO;
674		break;
675	default:
676		return -EINVAL;
677	}
678
679	return 0;
680}
681EXPORT_SYMBOL_GPL(apei_read);
682
683/* write GAR in interrupt (including NMI) or process context */
684int apei_write(u64 val, struct acpi_generic_address *reg)
685{
686	int rc;
687	u32 access_bit_width;
688	u64 address;
689	acpi_status status;
690
691	rc = apei_check_gar(reg, &address, &access_bit_width);
692	if (rc)
693		return rc;
694
695	switch (reg->space_id) {
696	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
697		status = acpi_os_write_memory((acpi_physical_address) address,
698						val, access_bit_width);
699		if (ACPI_FAILURE(status))
700			return -EIO;
701		break;
702	case ACPI_ADR_SPACE_SYSTEM_IO:
703		status = acpi_os_write_port(address, val, access_bit_width);
704		if (ACPI_FAILURE(status))
705			return -EIO;
706		break;
707	default:
708		return -EINVAL;
709	}
710
711	return 0;
712}
713EXPORT_SYMBOL_GPL(apei_write);
714
715static int collect_res_callback(struct apei_exec_context *ctx,
716				struct acpi_whea_header *entry,
717				void *data)
718{
719	struct apei_resources *resources = data;
720	struct acpi_generic_address *reg = &entry->register_region;
721	u8 ins = entry->instruction;
722	u32 access_bit_width;
723	u64 paddr;
724	int rc;
725
726	if (!(ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER))
727		return 0;
728
729	rc = apei_check_gar(reg, &paddr, &access_bit_width);
730	if (rc)
731		return rc;
732
733	switch (reg->space_id) {
734	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
735		return apei_res_add(&resources->iomem, paddr,
736				    access_bit_width / 8);
737	case ACPI_ADR_SPACE_SYSTEM_IO:
738		return apei_res_add(&resources->ioport, paddr,
739				    access_bit_width / 8);
740	default:
741		return -EINVAL;
742	}
743}
744
745/*
746 * Same register may be used by multiple instructions in GARs, so
747 * resources are collected before requesting.
748 */
749int apei_exec_collect_resources(struct apei_exec_context *ctx,
750				struct apei_resources *resources)
751{
752	return apei_exec_for_each_entry(ctx, collect_res_callback,
753					resources, NULL);
754}
755EXPORT_SYMBOL_GPL(apei_exec_collect_resources);
756
757struct dentry *apei_get_debugfs_dir(void)
758{
759	static struct dentry *dapei;
760
761	if (!dapei)
762		dapei = debugfs_create_dir("apei", NULL);
763
764	return dapei;
765}
766EXPORT_SYMBOL_GPL(apei_get_debugfs_dir);
767
768int __weak arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr,
769				  void *data)
770{
771	return 1;
772}
773EXPORT_SYMBOL_GPL(arch_apei_enable_cmcff);
774
775void __weak arch_apei_report_mem_error(int sev,
776				       struct cper_sec_mem_err *mem_err)
777{
778}
779EXPORT_SYMBOL_GPL(arch_apei_report_mem_error);
780
781int apei_osc_setup(void)
782{
783	static u8 whea_uuid_str[] = "ed855e0c-6c90-47bf-a62a-26de0fc5ad5c";
784	acpi_handle handle;
785	u32 capbuf[3];
786	struct acpi_osc_context context = {
787		.uuid_str	= whea_uuid_str,
788		.rev		= 1,
789		.cap.length	= sizeof(capbuf),
790		.cap.pointer	= capbuf,
791	};
792
793	capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE;
794	capbuf[OSC_SUPPORT_DWORD] = 1;
795	capbuf[OSC_CONTROL_DWORD] = 0;
796
797	if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))
798	    || ACPI_FAILURE(acpi_run_osc(handle, &context)))
799		return -EIO;
800	else {
801		kfree(context.ret.pointer);
802		return 0;
803	}
804}
805EXPORT_SYMBOL_GPL(apei_osc_setup);
806