1 /*
2  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #include <linux/list_sort.h>
14 #include <linux/libnvdimm.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/ndctl.h>
18 #include <linux/list.h>
19 #include <linux/acpi.h>
20 #include <linux/sort.h>
21 #include <linux/pmem.h>
22 #include <linux/io.h>
23 #include <asm/cacheflush.h>
24 #include "nfit.h"
25 
26 /*
27  * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
28  * irrelevant.
29  */
30 #include <linux/io-64-nonatomic-hi-lo.h>
31 
32 static bool force_enable_dimms;
33 module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
34 MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
35 
36 struct nfit_table_prev {
37 	struct list_head spas;
38 	struct list_head memdevs;
39 	struct list_head dcrs;
40 	struct list_head bdws;
41 	struct list_head idts;
42 	struct list_head flushes;
43 };
44 
45 static u8 nfit_uuid[NFIT_UUID_MAX][16];
46 
to_nfit_uuid(enum nfit_uuids id)47 const u8 *to_nfit_uuid(enum nfit_uuids id)
48 {
49 	return nfit_uuid[id];
50 }
51 EXPORT_SYMBOL(to_nfit_uuid);
52 
to_acpi_nfit_desc(struct nvdimm_bus_descriptor * nd_desc)53 static struct acpi_nfit_desc *to_acpi_nfit_desc(
54 		struct nvdimm_bus_descriptor *nd_desc)
55 {
56 	return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
57 }
58 
to_acpi_dev(struct acpi_nfit_desc * acpi_desc)59 static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
60 {
61 	struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
62 
63 	/*
64 	 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
65 	 * acpi_device.
66 	 */
67 	if (!nd_desc->provider_name
68 			|| strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0)
69 		return NULL;
70 
71 	return to_acpi_device(acpi_desc->dev);
72 }
73 
acpi_nfit_ctl(struct nvdimm_bus_descriptor * nd_desc,struct nvdimm * nvdimm,unsigned int cmd,void * buf,unsigned int buf_len)74 static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
75 		struct nvdimm *nvdimm, unsigned int cmd, void *buf,
76 		unsigned int buf_len)
77 {
78 	struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
79 	const struct nd_cmd_desc *desc = NULL;
80 	union acpi_object in_obj, in_buf, *out_obj;
81 	struct device *dev = acpi_desc->dev;
82 	const char *cmd_name, *dimm_name;
83 	unsigned long dsm_mask;
84 	acpi_handle handle;
85 	const u8 *uuid;
86 	u32 offset;
87 	int rc, i;
88 
89 	if (nvdimm) {
90 		struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
91 		struct acpi_device *adev = nfit_mem->adev;
92 
93 		if (!adev)
94 			return -ENOTTY;
95 		dimm_name = nvdimm_name(nvdimm);
96 		cmd_name = nvdimm_cmd_name(cmd);
97 		dsm_mask = nfit_mem->dsm_mask;
98 		desc = nd_cmd_dimm_desc(cmd);
99 		uuid = to_nfit_uuid(NFIT_DEV_DIMM);
100 		handle = adev->handle;
101 	} else {
102 		struct acpi_device *adev = to_acpi_dev(acpi_desc);
103 
104 		cmd_name = nvdimm_bus_cmd_name(cmd);
105 		dsm_mask = nd_desc->dsm_mask;
106 		desc = nd_cmd_bus_desc(cmd);
107 		uuid = to_nfit_uuid(NFIT_DEV_BUS);
108 		handle = adev->handle;
109 		dimm_name = "bus";
110 	}
111 
112 	if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
113 		return -ENOTTY;
114 
115 	if (!test_bit(cmd, &dsm_mask))
116 		return -ENOTTY;
117 
118 	in_obj.type = ACPI_TYPE_PACKAGE;
119 	in_obj.package.count = 1;
120 	in_obj.package.elements = &in_buf;
121 	in_buf.type = ACPI_TYPE_BUFFER;
122 	in_buf.buffer.pointer = buf;
123 	in_buf.buffer.length = 0;
124 
125 	/* libnvdimm has already validated the input envelope */
126 	for (i = 0; i < desc->in_num; i++)
127 		in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
128 				i, buf);
129 
130 	if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
131 		dev_dbg(dev, "%s:%s cmd: %s input length: %d\n", __func__,
132 				dimm_name, cmd_name, in_buf.buffer.length);
133 		print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4,
134 				4, in_buf.buffer.pointer, min_t(u32, 128,
135 					in_buf.buffer.length), true);
136 	}
137 
138 	out_obj = acpi_evaluate_dsm(handle, uuid, 1, cmd, &in_obj);
139 	if (!out_obj) {
140 		dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name,
141 				cmd_name);
142 		return -EINVAL;
143 	}
144 
145 	if (out_obj->package.type != ACPI_TYPE_BUFFER) {
146 		dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n",
147 				__func__, dimm_name, cmd_name, out_obj->type);
148 		rc = -EINVAL;
149 		goto out;
150 	}
151 
152 	if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
153 		dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__,
154 				dimm_name, cmd_name, out_obj->buffer.length);
155 		print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4,
156 				4, out_obj->buffer.pointer, min_t(u32, 128,
157 					out_obj->buffer.length), true);
158 	}
159 
160 	for (i = 0, offset = 0; i < desc->out_num; i++) {
161 		u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
162 				(u32 *) out_obj->buffer.pointer);
163 
164 		if (offset + out_size > out_obj->buffer.length) {
165 			dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n",
166 					__func__, dimm_name, cmd_name, i);
167 			break;
168 		}
169 
170 		if (in_buf.buffer.length + offset + out_size > buf_len) {
171 			dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n",
172 					__func__, dimm_name, cmd_name, i);
173 			rc = -ENXIO;
174 			goto out;
175 		}
176 		memcpy(buf + in_buf.buffer.length + offset,
177 				out_obj->buffer.pointer + offset, out_size);
178 		offset += out_size;
179 	}
180 	if (offset + in_buf.buffer.length < buf_len) {
181 		if (i >= 1) {
182 			/*
183 			 * status valid, return the number of bytes left
184 			 * unfilled in the output buffer
185 			 */
186 			rc = buf_len - offset - in_buf.buffer.length;
187 		} else {
188 			dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
189 					__func__, dimm_name, cmd_name, buf_len,
190 					offset);
191 			rc = -ENXIO;
192 		}
193 	} else
194 		rc = 0;
195 
196  out:
197 	ACPI_FREE(out_obj);
198 
199 	return rc;
200 }
201 
spa_type_name(u16 type)202 static const char *spa_type_name(u16 type)
203 {
204 	static const char *to_name[] = {
205 		[NFIT_SPA_VOLATILE] = "volatile",
206 		[NFIT_SPA_PM] = "pmem",
207 		[NFIT_SPA_DCR] = "dimm-control-region",
208 		[NFIT_SPA_BDW] = "block-data-window",
209 		[NFIT_SPA_VDISK] = "volatile-disk",
210 		[NFIT_SPA_VCD] = "volatile-cd",
211 		[NFIT_SPA_PDISK] = "persistent-disk",
212 		[NFIT_SPA_PCD] = "persistent-cd",
213 
214 	};
215 
216 	if (type > NFIT_SPA_PCD)
217 		return "unknown";
218 
219 	return to_name[type];
220 }
221 
nfit_spa_type(struct acpi_nfit_system_address * spa)222 static int nfit_spa_type(struct acpi_nfit_system_address *spa)
223 {
224 	int i;
225 
226 	for (i = 0; i < NFIT_UUID_MAX; i++)
227 		if (memcmp(to_nfit_uuid(i), spa->range_guid, 16) == 0)
228 			return i;
229 	return -1;
230 }
231 
add_spa(struct acpi_nfit_desc * acpi_desc,struct nfit_table_prev * prev,struct acpi_nfit_system_address * spa)232 static bool add_spa(struct acpi_nfit_desc *acpi_desc,
233 		struct nfit_table_prev *prev,
234 		struct acpi_nfit_system_address *spa)
235 {
236 	size_t length = min_t(size_t, sizeof(*spa), spa->header.length);
237 	struct device *dev = acpi_desc->dev;
238 	struct nfit_spa *nfit_spa;
239 
240 	list_for_each_entry(nfit_spa, &prev->spas, list) {
241 		if (memcmp(nfit_spa->spa, spa, length) == 0) {
242 			list_move_tail(&nfit_spa->list, &acpi_desc->spas);
243 			return true;
244 		}
245 	}
246 
247 	nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa), GFP_KERNEL);
248 	if (!nfit_spa)
249 		return false;
250 	INIT_LIST_HEAD(&nfit_spa->list);
251 	nfit_spa->spa = spa;
252 	list_add_tail(&nfit_spa->list, &acpi_desc->spas);
253 	dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__,
254 			spa->range_index,
255 			spa_type_name(nfit_spa_type(spa)));
256 	return true;
257 }
258 
add_memdev(struct acpi_nfit_desc * acpi_desc,struct nfit_table_prev * prev,struct acpi_nfit_memory_map * memdev)259 static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
260 		struct nfit_table_prev *prev,
261 		struct acpi_nfit_memory_map *memdev)
262 {
263 	size_t length = min_t(size_t, sizeof(*memdev), memdev->header.length);
264 	struct device *dev = acpi_desc->dev;
265 	struct nfit_memdev *nfit_memdev;
266 
267 	list_for_each_entry(nfit_memdev, &prev->memdevs, list)
268 		if (memcmp(nfit_memdev->memdev, memdev, length) == 0) {
269 			list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
270 			return true;
271 		}
272 
273 	nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev), GFP_KERNEL);
274 	if (!nfit_memdev)
275 		return false;
276 	INIT_LIST_HEAD(&nfit_memdev->list);
277 	nfit_memdev->memdev = memdev;
278 	list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
279 	dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d\n",
280 			__func__, memdev->device_handle, memdev->range_index,
281 			memdev->region_index);
282 	return true;
283 }
284 
add_dcr(struct acpi_nfit_desc * acpi_desc,struct nfit_table_prev * prev,struct acpi_nfit_control_region * dcr)285 static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
286 		struct nfit_table_prev *prev,
287 		struct acpi_nfit_control_region *dcr)
288 {
289 	size_t length = min_t(size_t, sizeof(*dcr), dcr->header.length);
290 	struct device *dev = acpi_desc->dev;
291 	struct nfit_dcr *nfit_dcr;
292 
293 	list_for_each_entry(nfit_dcr, &prev->dcrs, list)
294 		if (memcmp(nfit_dcr->dcr, dcr, length) == 0) {
295 			list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
296 			return true;
297 		}
298 
299 	nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr), GFP_KERNEL);
300 	if (!nfit_dcr)
301 		return false;
302 	INIT_LIST_HEAD(&nfit_dcr->list);
303 	nfit_dcr->dcr = dcr;
304 	list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
305 	dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__,
306 			dcr->region_index, dcr->windows);
307 	return true;
308 }
309 
add_bdw(struct acpi_nfit_desc * acpi_desc,struct nfit_table_prev * prev,struct acpi_nfit_data_region * bdw)310 static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
311 		struct nfit_table_prev *prev,
312 		struct acpi_nfit_data_region *bdw)
313 {
314 	size_t length = min_t(size_t, sizeof(*bdw), bdw->header.length);
315 	struct device *dev = acpi_desc->dev;
316 	struct nfit_bdw *nfit_bdw;
317 
318 	list_for_each_entry(nfit_bdw, &prev->bdws, list)
319 		if (memcmp(nfit_bdw->bdw, bdw, length) == 0) {
320 			list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
321 			return true;
322 		}
323 
324 	nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw), GFP_KERNEL);
325 	if (!nfit_bdw)
326 		return false;
327 	INIT_LIST_HEAD(&nfit_bdw->list);
328 	nfit_bdw->bdw = bdw;
329 	list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
330 	dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__,
331 			bdw->region_index, bdw->windows);
332 	return true;
333 }
334 
add_idt(struct acpi_nfit_desc * acpi_desc,struct nfit_table_prev * prev,struct acpi_nfit_interleave * idt)335 static bool add_idt(struct acpi_nfit_desc *acpi_desc,
336 		struct nfit_table_prev *prev,
337 		struct acpi_nfit_interleave *idt)
338 {
339 	size_t length = min_t(size_t, sizeof(*idt), idt->header.length);
340 	struct device *dev = acpi_desc->dev;
341 	struct nfit_idt *nfit_idt;
342 
343 	list_for_each_entry(nfit_idt, &prev->idts, list)
344 		if (memcmp(nfit_idt->idt, idt, length) == 0) {
345 			list_move_tail(&nfit_idt->list, &acpi_desc->idts);
346 			return true;
347 		}
348 
349 	nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt), GFP_KERNEL);
350 	if (!nfit_idt)
351 		return false;
352 	INIT_LIST_HEAD(&nfit_idt->list);
353 	nfit_idt->idt = idt;
354 	list_add_tail(&nfit_idt->list, &acpi_desc->idts);
355 	dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__,
356 			idt->interleave_index, idt->line_count);
357 	return true;
358 }
359 
add_flush(struct acpi_nfit_desc * acpi_desc,struct nfit_table_prev * prev,struct acpi_nfit_flush_address * flush)360 static bool add_flush(struct acpi_nfit_desc *acpi_desc,
361 		struct nfit_table_prev *prev,
362 		struct acpi_nfit_flush_address *flush)
363 {
364 	size_t length = min_t(size_t, sizeof(*flush), flush->header.length);
365 	struct device *dev = acpi_desc->dev;
366 	struct nfit_flush *nfit_flush;
367 
368 	list_for_each_entry(nfit_flush, &prev->flushes, list)
369 		if (memcmp(nfit_flush->flush, flush, length) == 0) {
370 			list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
371 			return true;
372 		}
373 
374 	nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush), GFP_KERNEL);
375 	if (!nfit_flush)
376 		return false;
377 	INIT_LIST_HEAD(&nfit_flush->list);
378 	nfit_flush->flush = flush;
379 	list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
380 	dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__,
381 			flush->device_handle, flush->hint_count);
382 	return true;
383 }
384 
add_table(struct acpi_nfit_desc * acpi_desc,struct nfit_table_prev * prev,void * table,const void * end)385 static void *add_table(struct acpi_nfit_desc *acpi_desc,
386 		struct nfit_table_prev *prev, void *table, const void *end)
387 {
388 	struct device *dev = acpi_desc->dev;
389 	struct acpi_nfit_header *hdr;
390 	void *err = ERR_PTR(-ENOMEM);
391 
392 	if (table >= end)
393 		return NULL;
394 
395 	hdr = table;
396 	if (!hdr->length) {
397 		dev_warn(dev, "found a zero length table '%d' parsing nfit\n",
398 			hdr->type);
399 		return NULL;
400 	}
401 
402 	switch (hdr->type) {
403 	case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
404 		if (!add_spa(acpi_desc, prev, table))
405 			return err;
406 		break;
407 	case ACPI_NFIT_TYPE_MEMORY_MAP:
408 		if (!add_memdev(acpi_desc, prev, table))
409 			return err;
410 		break;
411 	case ACPI_NFIT_TYPE_CONTROL_REGION:
412 		if (!add_dcr(acpi_desc, prev, table))
413 			return err;
414 		break;
415 	case ACPI_NFIT_TYPE_DATA_REGION:
416 		if (!add_bdw(acpi_desc, prev, table))
417 			return err;
418 		break;
419 	case ACPI_NFIT_TYPE_INTERLEAVE:
420 		if (!add_idt(acpi_desc, prev, table))
421 			return err;
422 		break;
423 	case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
424 		if (!add_flush(acpi_desc, prev, table))
425 			return err;
426 		break;
427 	case ACPI_NFIT_TYPE_SMBIOS:
428 		dev_dbg(dev, "%s: smbios\n", __func__);
429 		break;
430 	default:
431 		dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
432 		break;
433 	}
434 
435 	return table + hdr->length;
436 }
437 
nfit_mem_find_spa_bdw(struct acpi_nfit_desc * acpi_desc,struct nfit_mem * nfit_mem)438 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
439 		struct nfit_mem *nfit_mem)
440 {
441 	u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
442 	u16 dcr = nfit_mem->dcr->region_index;
443 	struct nfit_spa *nfit_spa;
444 
445 	list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
446 		u16 range_index = nfit_spa->spa->range_index;
447 		int type = nfit_spa_type(nfit_spa->spa);
448 		struct nfit_memdev *nfit_memdev;
449 
450 		if (type != NFIT_SPA_BDW)
451 			continue;
452 
453 		list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
454 			if (nfit_memdev->memdev->range_index != range_index)
455 				continue;
456 			if (nfit_memdev->memdev->device_handle != device_handle)
457 				continue;
458 			if (nfit_memdev->memdev->region_index != dcr)
459 				continue;
460 
461 			nfit_mem->spa_bdw = nfit_spa->spa;
462 			return;
463 		}
464 	}
465 
466 	dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n",
467 			nfit_mem->spa_dcr->range_index);
468 	nfit_mem->bdw = NULL;
469 }
470 
nfit_mem_init_bdw(struct acpi_nfit_desc * acpi_desc,struct nfit_mem * nfit_mem,struct acpi_nfit_system_address * spa)471 static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
472 		struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
473 {
474 	u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
475 	struct nfit_memdev *nfit_memdev;
476 	struct nfit_flush *nfit_flush;
477 	struct nfit_bdw *nfit_bdw;
478 	struct nfit_idt *nfit_idt;
479 	u16 idt_idx, range_index;
480 
481 	list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
482 		if (nfit_bdw->bdw->region_index != dcr)
483 			continue;
484 		nfit_mem->bdw = nfit_bdw->bdw;
485 		break;
486 	}
487 
488 	if (!nfit_mem->bdw)
489 		return;
490 
491 	nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
492 
493 	if (!nfit_mem->spa_bdw)
494 		return;
495 
496 	range_index = nfit_mem->spa_bdw->range_index;
497 	list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
498 		if (nfit_memdev->memdev->range_index != range_index ||
499 				nfit_memdev->memdev->region_index != dcr)
500 			continue;
501 		nfit_mem->memdev_bdw = nfit_memdev->memdev;
502 		idt_idx = nfit_memdev->memdev->interleave_index;
503 		list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
504 			if (nfit_idt->idt->interleave_index != idt_idx)
505 				continue;
506 			nfit_mem->idt_bdw = nfit_idt->idt;
507 			break;
508 		}
509 
510 		list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
511 			if (nfit_flush->flush->device_handle !=
512 					nfit_memdev->memdev->device_handle)
513 				continue;
514 			nfit_mem->nfit_flush = nfit_flush;
515 			break;
516 		}
517 		break;
518 	}
519 }
520 
nfit_mem_dcr_init(struct acpi_nfit_desc * acpi_desc,struct acpi_nfit_system_address * spa)521 static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
522 		struct acpi_nfit_system_address *spa)
523 {
524 	struct nfit_mem *nfit_mem, *found;
525 	struct nfit_memdev *nfit_memdev;
526 	int type = nfit_spa_type(spa);
527 
528 	switch (type) {
529 	case NFIT_SPA_DCR:
530 	case NFIT_SPA_PM:
531 		break;
532 	default:
533 		return 0;
534 	}
535 
536 	list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
537 		struct nfit_dcr *nfit_dcr;
538 		u32 device_handle;
539 		u16 dcr;
540 
541 		if (nfit_memdev->memdev->range_index != spa->range_index)
542 			continue;
543 		found = NULL;
544 		dcr = nfit_memdev->memdev->region_index;
545 		device_handle = nfit_memdev->memdev->device_handle;
546 		list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
547 			if (__to_nfit_memdev(nfit_mem)->device_handle
548 					== device_handle) {
549 				found = nfit_mem;
550 				break;
551 			}
552 
553 		if (found)
554 			nfit_mem = found;
555 		else {
556 			nfit_mem = devm_kzalloc(acpi_desc->dev,
557 					sizeof(*nfit_mem), GFP_KERNEL);
558 			if (!nfit_mem)
559 				return -ENOMEM;
560 			INIT_LIST_HEAD(&nfit_mem->list);
561 			list_add(&nfit_mem->list, &acpi_desc->dimms);
562 		}
563 
564 		list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
565 			if (nfit_dcr->dcr->region_index != dcr)
566 				continue;
567 			/*
568 			 * Record the control region for the dimm.  For
569 			 * the ACPI 6.1 case, where there are separate
570 			 * control regions for the pmem vs blk
571 			 * interfaces, be sure to record the extended
572 			 * blk details.
573 			 */
574 			if (!nfit_mem->dcr)
575 				nfit_mem->dcr = nfit_dcr->dcr;
576 			else if (nfit_mem->dcr->windows == 0
577 					&& nfit_dcr->dcr->windows)
578 				nfit_mem->dcr = nfit_dcr->dcr;
579 			break;
580 		}
581 
582 		if (dcr && !nfit_mem->dcr) {
583 			dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
584 					spa->range_index, dcr);
585 			return -ENODEV;
586 		}
587 
588 		if (type == NFIT_SPA_DCR) {
589 			struct nfit_idt *nfit_idt;
590 			u16 idt_idx;
591 
592 			/* multiple dimms may share a SPA when interleaved */
593 			nfit_mem->spa_dcr = spa;
594 			nfit_mem->memdev_dcr = nfit_memdev->memdev;
595 			idt_idx = nfit_memdev->memdev->interleave_index;
596 			list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
597 				if (nfit_idt->idt->interleave_index != idt_idx)
598 					continue;
599 				nfit_mem->idt_dcr = nfit_idt->idt;
600 				break;
601 			}
602 			nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
603 		} else {
604 			/*
605 			 * A single dimm may belong to multiple SPA-PM
606 			 * ranges, record at least one in addition to
607 			 * any SPA-DCR range.
608 			 */
609 			nfit_mem->memdev_pmem = nfit_memdev->memdev;
610 		}
611 	}
612 
613 	return 0;
614 }
615 
nfit_mem_cmp(void * priv,struct list_head * _a,struct list_head * _b)616 static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
617 {
618 	struct nfit_mem *a = container_of(_a, typeof(*a), list);
619 	struct nfit_mem *b = container_of(_b, typeof(*b), list);
620 	u32 handleA, handleB;
621 
622 	handleA = __to_nfit_memdev(a)->device_handle;
623 	handleB = __to_nfit_memdev(b)->device_handle;
624 	if (handleA < handleB)
625 		return -1;
626 	else if (handleA > handleB)
627 		return 1;
628 	return 0;
629 }
630 
nfit_mem_init(struct acpi_nfit_desc * acpi_desc)631 static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
632 {
633 	struct nfit_spa *nfit_spa;
634 
635 	/*
636 	 * For each SPA-DCR or SPA-PMEM address range find its
637 	 * corresponding MEMDEV(s).  From each MEMDEV find the
638 	 * corresponding DCR.  Then, if we're operating on a SPA-DCR,
639 	 * try to find a SPA-BDW and a corresponding BDW that references
640 	 * the DCR.  Throw it all into an nfit_mem object.  Note, that
641 	 * BDWs are optional.
642 	 */
643 	list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
644 		int rc;
645 
646 		rc = nfit_mem_dcr_init(acpi_desc, nfit_spa->spa);
647 		if (rc)
648 			return rc;
649 	}
650 
651 	list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);
652 
653 	return 0;
654 }
655 
revision_show(struct device * dev,struct device_attribute * attr,char * buf)656 static ssize_t revision_show(struct device *dev,
657 		struct device_attribute *attr, char *buf)
658 {
659 	struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
660 	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
661 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
662 
663 	return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
664 }
665 static DEVICE_ATTR_RO(revision);
666 
667 static struct attribute *acpi_nfit_attributes[] = {
668 	&dev_attr_revision.attr,
669 	NULL,
670 };
671 
672 static struct attribute_group acpi_nfit_attribute_group = {
673 	.name = "nfit",
674 	.attrs = acpi_nfit_attributes,
675 };
676 
677 const struct attribute_group *acpi_nfit_attribute_groups[] = {
678 	&nvdimm_bus_attribute_group,
679 	&acpi_nfit_attribute_group,
680 	NULL,
681 };
682 EXPORT_SYMBOL_GPL(acpi_nfit_attribute_groups);
683 
to_nfit_memdev(struct device * dev)684 static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
685 {
686 	struct nvdimm *nvdimm = to_nvdimm(dev);
687 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
688 
689 	return __to_nfit_memdev(nfit_mem);
690 }
691 
to_nfit_dcr(struct device * dev)692 static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev)
693 {
694 	struct nvdimm *nvdimm = to_nvdimm(dev);
695 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
696 
697 	return nfit_mem->dcr;
698 }
699 
handle_show(struct device * dev,struct device_attribute * attr,char * buf)700 static ssize_t handle_show(struct device *dev,
701 		struct device_attribute *attr, char *buf)
702 {
703 	struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
704 
705 	return sprintf(buf, "%#x\n", memdev->device_handle);
706 }
707 static DEVICE_ATTR_RO(handle);
708 
phys_id_show(struct device * dev,struct device_attribute * attr,char * buf)709 static ssize_t phys_id_show(struct device *dev,
710 		struct device_attribute *attr, char *buf)
711 {
712 	struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
713 
714 	return sprintf(buf, "%#x\n", memdev->physical_id);
715 }
716 static DEVICE_ATTR_RO(phys_id);
717 
vendor_show(struct device * dev,struct device_attribute * attr,char * buf)718 static ssize_t vendor_show(struct device *dev,
719 		struct device_attribute *attr, char *buf)
720 {
721 	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
722 
723 	return sprintf(buf, "%#x\n", dcr->vendor_id);
724 }
725 static DEVICE_ATTR_RO(vendor);
726 
rev_id_show(struct device * dev,struct device_attribute * attr,char * buf)727 static ssize_t rev_id_show(struct device *dev,
728 		struct device_attribute *attr, char *buf)
729 {
730 	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
731 
732 	return sprintf(buf, "%#x\n", dcr->revision_id);
733 }
734 static DEVICE_ATTR_RO(rev_id);
735 
device_show(struct device * dev,struct device_attribute * attr,char * buf)736 static ssize_t device_show(struct device *dev,
737 		struct device_attribute *attr, char *buf)
738 {
739 	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
740 
741 	return sprintf(buf, "%#x\n", dcr->device_id);
742 }
743 static DEVICE_ATTR_RO(device);
744 
format_show(struct device * dev,struct device_attribute * attr,char * buf)745 static ssize_t format_show(struct device *dev,
746 		struct device_attribute *attr, char *buf)
747 {
748 	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
749 
750 	return sprintf(buf, "%#x\n", dcr->code);
751 }
752 static DEVICE_ATTR_RO(format);
753 
serial_show(struct device * dev,struct device_attribute * attr,char * buf)754 static ssize_t serial_show(struct device *dev,
755 		struct device_attribute *attr, char *buf)
756 {
757 	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
758 
759 	return sprintf(buf, "%#x\n", dcr->serial_number);
760 }
761 static DEVICE_ATTR_RO(serial);
762 
flags_show(struct device * dev,struct device_attribute * attr,char * buf)763 static ssize_t flags_show(struct device *dev,
764 		struct device_attribute *attr, char *buf)
765 {
766 	u16 flags = to_nfit_memdev(dev)->flags;
767 
768 	return sprintf(buf, "%s%s%s%s%s\n",
769 		flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
770 		flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
771 		flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
772 		flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "",
773 		flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "");
774 }
775 static DEVICE_ATTR_RO(flags);
776 
777 static struct attribute *acpi_nfit_dimm_attributes[] = {
778 	&dev_attr_handle.attr,
779 	&dev_attr_phys_id.attr,
780 	&dev_attr_vendor.attr,
781 	&dev_attr_device.attr,
782 	&dev_attr_format.attr,
783 	&dev_attr_serial.attr,
784 	&dev_attr_rev_id.attr,
785 	&dev_attr_flags.attr,
786 	NULL,
787 };
788 
acpi_nfit_dimm_attr_visible(struct kobject * kobj,struct attribute * a,int n)789 static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
790 		struct attribute *a, int n)
791 {
792 	struct device *dev = container_of(kobj, struct device, kobj);
793 
794 	if (to_nfit_dcr(dev))
795 		return a->mode;
796 	else
797 		return 0;
798 }
799 
800 static struct attribute_group acpi_nfit_dimm_attribute_group = {
801 	.name = "nfit",
802 	.attrs = acpi_nfit_dimm_attributes,
803 	.is_visible = acpi_nfit_dimm_attr_visible,
804 };
805 
806 static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
807 	&nvdimm_attribute_group,
808 	&nd_device_attribute_group,
809 	&acpi_nfit_dimm_attribute_group,
810 	NULL,
811 };
812 
acpi_nfit_dimm_by_handle(struct acpi_nfit_desc * acpi_desc,u32 device_handle)813 static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
814 		u32 device_handle)
815 {
816 	struct nfit_mem *nfit_mem;
817 
818 	list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
819 		if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle)
820 			return nfit_mem->nvdimm;
821 
822 	return NULL;
823 }
824 
acpi_nfit_add_dimm(struct acpi_nfit_desc * acpi_desc,struct nfit_mem * nfit_mem,u32 device_handle)825 static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
826 		struct nfit_mem *nfit_mem, u32 device_handle)
827 {
828 	struct acpi_device *adev, *adev_dimm;
829 	struct device *dev = acpi_desc->dev;
830 	const u8 *uuid = to_nfit_uuid(NFIT_DEV_DIMM);
831 	int i;
832 
833 	nfit_mem->dsm_mask = acpi_desc->dimm_dsm_force_en;
834 	adev = to_acpi_dev(acpi_desc);
835 	if (!adev)
836 		return 0;
837 
838 	adev_dimm = acpi_find_child_device(adev, device_handle, false);
839 	nfit_mem->adev = adev_dimm;
840 	if (!adev_dimm) {
841 		dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
842 				device_handle);
843 		return force_enable_dimms ? 0 : -ENODEV;
844 	}
845 
846 	for (i = ND_CMD_SMART; i <= ND_CMD_VENDOR; i++)
847 		if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i))
848 			set_bit(i, &nfit_mem->dsm_mask);
849 
850 	return 0;
851 }
852 
acpi_nfit_register_dimms(struct acpi_nfit_desc * acpi_desc)853 static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
854 {
855 	struct nfit_mem *nfit_mem;
856 	int dimm_count = 0;
857 
858 	list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
859 		struct nvdimm *nvdimm;
860 		unsigned long flags = 0;
861 		u32 device_handle;
862 		u16 mem_flags;
863 		int rc;
864 
865 		device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
866 		nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
867 		if (nvdimm) {
868 			dimm_count++;
869 			continue;
870 		}
871 
872 		if (nfit_mem->bdw && nfit_mem->memdev_pmem)
873 			flags |= NDD_ALIASING;
874 
875 		mem_flags = __to_nfit_memdev(nfit_mem)->flags;
876 		if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED)
877 			flags |= NDD_UNARMED;
878 
879 		rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
880 		if (rc)
881 			continue;
882 
883 		nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
884 				acpi_nfit_dimm_attribute_groups,
885 				flags, &nfit_mem->dsm_mask);
886 		if (!nvdimm)
887 			return -ENOMEM;
888 
889 		nfit_mem->nvdimm = nvdimm;
890 		dimm_count++;
891 
892 		if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
893 			continue;
894 
895 		dev_info(acpi_desc->dev, "%s flags:%s%s%s%s\n",
896 				nvdimm_name(nvdimm),
897 		  mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
898 		  mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
899 		  mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
900 		  mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "");
901 
902 	}
903 
904 	return nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
905 }
906 
acpi_nfit_init_dsms(struct acpi_nfit_desc * acpi_desc)907 static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
908 {
909 	struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
910 	const u8 *uuid = to_nfit_uuid(NFIT_DEV_BUS);
911 	struct acpi_device *adev;
912 	int i;
913 
914 	nd_desc->dsm_mask = acpi_desc->bus_dsm_force_en;
915 	adev = to_acpi_dev(acpi_desc);
916 	if (!adev)
917 		return;
918 
919 	for (i = ND_CMD_ARS_CAP; i <= ND_CMD_ARS_STATUS; i++)
920 		if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i))
921 			set_bit(i, &nd_desc->dsm_mask);
922 }
923 
range_index_show(struct device * dev,struct device_attribute * attr,char * buf)924 static ssize_t range_index_show(struct device *dev,
925 		struct device_attribute *attr, char *buf)
926 {
927 	struct nd_region *nd_region = to_nd_region(dev);
928 	struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
929 
930 	return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
931 }
932 static DEVICE_ATTR_RO(range_index);
933 
934 static struct attribute *acpi_nfit_region_attributes[] = {
935 	&dev_attr_range_index.attr,
936 	NULL,
937 };
938 
939 static struct attribute_group acpi_nfit_region_attribute_group = {
940 	.name = "nfit",
941 	.attrs = acpi_nfit_region_attributes,
942 };
943 
944 static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
945 	&nd_region_attribute_group,
946 	&nd_mapping_attribute_group,
947 	&nd_device_attribute_group,
948 	&nd_numa_attribute_group,
949 	&acpi_nfit_region_attribute_group,
950 	NULL,
951 };
952 
953 /* enough info to uniquely specify an interleave set */
954 struct nfit_set_info {
955 	struct nfit_set_info_map {
956 		u64 region_offset;
957 		u32 serial_number;
958 		u32 pad;
959 	} mapping[0];
960 };
961 
sizeof_nfit_set_info(int num_mappings)962 static size_t sizeof_nfit_set_info(int num_mappings)
963 {
964 	return sizeof(struct nfit_set_info)
965 		+ num_mappings * sizeof(struct nfit_set_info_map);
966 }
967 
cmp_map(const void * m0,const void * m1)968 static int cmp_map(const void *m0, const void *m1)
969 {
970 	const struct nfit_set_info_map *map0 = m0;
971 	const struct nfit_set_info_map *map1 = m1;
972 
973 	return memcmp(&map0->region_offset, &map1->region_offset,
974 			sizeof(u64));
975 }
976 
977 /* Retrieve the nth entry referencing this spa */
memdev_from_spa(struct acpi_nfit_desc * acpi_desc,u16 range_index,int n)978 static struct acpi_nfit_memory_map *memdev_from_spa(
979 		struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
980 {
981 	struct nfit_memdev *nfit_memdev;
982 
983 	list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list)
984 		if (nfit_memdev->memdev->range_index == range_index)
985 			if (n-- == 0)
986 				return nfit_memdev->memdev;
987 	return NULL;
988 }
989 
acpi_nfit_init_interleave_set(struct acpi_nfit_desc * acpi_desc,struct nd_region_desc * ndr_desc,struct acpi_nfit_system_address * spa)990 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
991 		struct nd_region_desc *ndr_desc,
992 		struct acpi_nfit_system_address *spa)
993 {
994 	int i, spa_type = nfit_spa_type(spa);
995 	struct device *dev = acpi_desc->dev;
996 	struct nd_interleave_set *nd_set;
997 	u16 nr = ndr_desc->num_mappings;
998 	struct nfit_set_info *info;
999 
1000 	if (spa_type == NFIT_SPA_PM || spa_type == NFIT_SPA_VOLATILE)
1001 		/* pass */;
1002 	else
1003 		return 0;
1004 
1005 	nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
1006 	if (!nd_set)
1007 		return -ENOMEM;
1008 
1009 	info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
1010 	if (!info)
1011 		return -ENOMEM;
1012 	for (i = 0; i < nr; i++) {
1013 		struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
1014 		struct nfit_set_info_map *map = &info->mapping[i];
1015 		struct nvdimm *nvdimm = nd_mapping->nvdimm;
1016 		struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1017 		struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
1018 				spa->range_index, i);
1019 
1020 		if (!memdev || !nfit_mem->dcr) {
1021 			dev_err(dev, "%s: failed to find DCR\n", __func__);
1022 			return -ENODEV;
1023 		}
1024 
1025 		map->region_offset = memdev->region_offset;
1026 		map->serial_number = nfit_mem->dcr->serial_number;
1027 	}
1028 
1029 	sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
1030 			cmp_map, NULL);
1031 	nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
1032 	ndr_desc->nd_set = nd_set;
1033 	devm_kfree(dev, info);
1034 
1035 	return 0;
1036 }
1037 
to_interleave_offset(u64 offset,struct nfit_blk_mmio * mmio)1038 static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
1039 {
1040 	struct acpi_nfit_interleave *idt = mmio->idt;
1041 	u32 sub_line_offset, line_index, line_offset;
1042 	u64 line_no, table_skip_count, table_offset;
1043 
1044 	line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset);
1045 	table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index);
1046 	line_offset = idt->line_offset[line_index]
1047 		* mmio->line_size;
1048 	table_offset = table_skip_count * mmio->table_size;
1049 
1050 	return mmio->base_offset + line_offset + table_offset + sub_line_offset;
1051 }
1052 
wmb_blk(struct nfit_blk * nfit_blk)1053 static void wmb_blk(struct nfit_blk *nfit_blk)
1054 {
1055 
1056 	if (nfit_blk->nvdimm_flush) {
1057 		/*
1058 		 * The first wmb() is needed to 'sfence' all previous writes
1059 		 * such that they are architecturally visible for the platform
1060 		 * buffer flush.  Note that we've already arranged for pmem
1061 		 * writes to avoid the cache via arch_memcpy_to_pmem().  The
1062 		 * final wmb() ensures ordering for the NVDIMM flush write.
1063 		 */
1064 		wmb();
1065 		writeq(1, nfit_blk->nvdimm_flush);
1066 		wmb();
1067 	} else
1068 		wmb_pmem();
1069 }
1070 
read_blk_stat(struct nfit_blk * nfit_blk,unsigned int bw)1071 static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
1072 {
1073 	struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
1074 	u64 offset = nfit_blk->stat_offset + mmio->size * bw;
1075 
1076 	if (mmio->num_lines)
1077 		offset = to_interleave_offset(offset, mmio);
1078 
1079 	return readl(mmio->addr.base + offset);
1080 }
1081 
write_blk_ctl(struct nfit_blk * nfit_blk,unsigned int bw,resource_size_t dpa,unsigned int len,unsigned int write)1082 static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
1083 		resource_size_t dpa, unsigned int len, unsigned int write)
1084 {
1085 	u64 cmd, offset;
1086 	struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
1087 
1088 	enum {
1089 		BCW_OFFSET_MASK = (1ULL << 48)-1,
1090 		BCW_LEN_SHIFT = 48,
1091 		BCW_LEN_MASK = (1ULL << 8) - 1,
1092 		BCW_CMD_SHIFT = 56,
1093 	};
1094 
1095 	cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK;
1096 	len = len >> L1_CACHE_SHIFT;
1097 	cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT;
1098 	cmd |= ((u64) write) << BCW_CMD_SHIFT;
1099 
1100 	offset = nfit_blk->cmd_offset + mmio->size * bw;
1101 	if (mmio->num_lines)
1102 		offset = to_interleave_offset(offset, mmio);
1103 
1104 	writeq(cmd, mmio->addr.base + offset);
1105 	wmb_blk(nfit_blk);
1106 
1107 	if (nfit_blk->dimm_flags & ND_BLK_DCR_LATCH)
1108 		readq(mmio->addr.base + offset);
1109 }
1110 
acpi_nfit_blk_single_io(struct nfit_blk * nfit_blk,resource_size_t dpa,void * iobuf,size_t len,int rw,unsigned int lane)1111 static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
1112 		resource_size_t dpa, void *iobuf, size_t len, int rw,
1113 		unsigned int lane)
1114 {
1115 	struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1116 	unsigned int copied = 0;
1117 	u64 base_offset;
1118 	int rc;
1119 
1120 	base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
1121 		+ lane * mmio->size;
1122 	write_blk_ctl(nfit_blk, lane, dpa, len, rw);
1123 	while (len) {
1124 		unsigned int c;
1125 		u64 offset;
1126 
1127 		if (mmio->num_lines) {
1128 			u32 line_offset;
1129 
1130 			offset = to_interleave_offset(base_offset + copied,
1131 					mmio);
1132 			div_u64_rem(offset, mmio->line_size, &line_offset);
1133 			c = min_t(size_t, len, mmio->line_size - line_offset);
1134 		} else {
1135 			offset = base_offset + nfit_blk->bdw_offset;
1136 			c = len;
1137 		}
1138 
1139 		if (rw)
1140 			memcpy_to_pmem(mmio->addr.aperture + offset,
1141 					iobuf + copied, c);
1142 		else {
1143 			if (nfit_blk->dimm_flags & ND_BLK_READ_FLUSH)
1144 				mmio_flush_range((void __force *)
1145 					mmio->addr.aperture + offset, c);
1146 
1147 			memcpy_from_pmem(iobuf + copied,
1148 					mmio->addr.aperture + offset, c);
1149 		}
1150 
1151 		copied += c;
1152 		len -= c;
1153 	}
1154 
1155 	if (rw)
1156 		wmb_blk(nfit_blk);
1157 
1158 	rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
1159 	return rc;
1160 }
1161 
acpi_nfit_blk_region_do_io(struct nd_blk_region * ndbr,resource_size_t dpa,void * iobuf,u64 len,int rw)1162 static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
1163 		resource_size_t dpa, void *iobuf, u64 len, int rw)
1164 {
1165 	struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
1166 	struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1167 	struct nd_region *nd_region = nfit_blk->nd_region;
1168 	unsigned int lane, copied = 0;
1169 	int rc = 0;
1170 
1171 	lane = nd_region_acquire_lane(nd_region);
1172 	while (len) {
1173 		u64 c = min(len, mmio->size);
1174 
1175 		rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied,
1176 				iobuf + copied, c, rw, lane);
1177 		if (rc)
1178 			break;
1179 
1180 		copied += c;
1181 		len -= c;
1182 	}
1183 	nd_region_release_lane(nd_region, lane);
1184 
1185 	return rc;
1186 }
1187 
nfit_spa_mapping_release(struct kref * kref)1188 static void nfit_spa_mapping_release(struct kref *kref)
1189 {
1190 	struct nfit_spa_mapping *spa_map = to_spa_map(kref);
1191 	struct acpi_nfit_system_address *spa = spa_map->spa;
1192 	struct acpi_nfit_desc *acpi_desc = spa_map->acpi_desc;
1193 
1194 	WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
1195 	dev_dbg(acpi_desc->dev, "%s: SPA%d\n", __func__, spa->range_index);
1196 	if (spa_map->type == SPA_MAP_APERTURE)
1197 		memunmap((void __force *)spa_map->addr.aperture);
1198 	else
1199 		iounmap(spa_map->addr.base);
1200 	release_mem_region(spa->address, spa->length);
1201 	list_del(&spa_map->list);
1202 	kfree(spa_map);
1203 }
1204 
find_spa_mapping(struct acpi_nfit_desc * acpi_desc,struct acpi_nfit_system_address * spa)1205 static struct nfit_spa_mapping *find_spa_mapping(
1206 		struct acpi_nfit_desc *acpi_desc,
1207 		struct acpi_nfit_system_address *spa)
1208 {
1209 	struct nfit_spa_mapping *spa_map;
1210 
1211 	WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
1212 	list_for_each_entry(spa_map, &acpi_desc->spa_maps, list)
1213 		if (spa_map->spa == spa)
1214 			return spa_map;
1215 
1216 	return NULL;
1217 }
1218 
nfit_spa_unmap(struct acpi_nfit_desc * acpi_desc,struct acpi_nfit_system_address * spa)1219 static void nfit_spa_unmap(struct acpi_nfit_desc *acpi_desc,
1220 		struct acpi_nfit_system_address *spa)
1221 {
1222 	struct nfit_spa_mapping *spa_map;
1223 
1224 	mutex_lock(&acpi_desc->spa_map_mutex);
1225 	spa_map = find_spa_mapping(acpi_desc, spa);
1226 
1227 	if (spa_map)
1228 		kref_put(&spa_map->kref, nfit_spa_mapping_release);
1229 	mutex_unlock(&acpi_desc->spa_map_mutex);
1230 }
1231 
__nfit_spa_map(struct acpi_nfit_desc * acpi_desc,struct acpi_nfit_system_address * spa,enum spa_map_type type)1232 static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
1233 		struct acpi_nfit_system_address *spa, enum spa_map_type type)
1234 {
1235 	resource_size_t start = spa->address;
1236 	resource_size_t n = spa->length;
1237 	struct nfit_spa_mapping *spa_map;
1238 	struct resource *res;
1239 
1240 	WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
1241 
1242 	spa_map = find_spa_mapping(acpi_desc, spa);
1243 	if (spa_map) {
1244 		kref_get(&spa_map->kref);
1245 		return spa_map->addr.base;
1246 	}
1247 
1248 	spa_map = kzalloc(sizeof(*spa_map), GFP_KERNEL);
1249 	if (!spa_map)
1250 		return NULL;
1251 
1252 	INIT_LIST_HEAD(&spa_map->list);
1253 	spa_map->spa = spa;
1254 	kref_init(&spa_map->kref);
1255 	spa_map->acpi_desc = acpi_desc;
1256 
1257 	res = request_mem_region(start, n, dev_name(acpi_desc->dev));
1258 	if (!res)
1259 		goto err_mem;
1260 
1261 	spa_map->type = type;
1262 	if (type == SPA_MAP_APERTURE)
1263 		spa_map->addr.aperture = (void __pmem *)memremap(start, n,
1264 							ARCH_MEMREMAP_PMEM);
1265 	else
1266 		spa_map->addr.base = ioremap_nocache(start, n);
1267 
1268 
1269 	if (!spa_map->addr.base)
1270 		goto err_map;
1271 
1272 	list_add_tail(&spa_map->list, &acpi_desc->spa_maps);
1273 	return spa_map->addr.base;
1274 
1275  err_map:
1276 	release_mem_region(start, n);
1277  err_mem:
1278 	kfree(spa_map);
1279 	return NULL;
1280 }
1281 
1282 /**
1283  * nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges
1284  * @nvdimm_bus: NFIT-bus that provided the spa table entry
1285  * @nfit_spa: spa table to map
1286  * @type: aperture or control region
1287  *
1288  * In the case where block-data-window apertures and
1289  * dimm-control-regions are interleaved they will end up sharing a
1290  * single request_mem_region() + ioremap() for the address range.  In
1291  * the style of devm nfit_spa_map() mappings are automatically dropped
1292  * when all region devices referencing the same mapping are disabled /
1293  * unbound.
1294  */
nfit_spa_map(struct acpi_nfit_desc * acpi_desc,struct acpi_nfit_system_address * spa,enum spa_map_type type)1295 static void __iomem *nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
1296 		struct acpi_nfit_system_address *spa, enum spa_map_type type)
1297 {
1298 	void __iomem *iomem;
1299 
1300 	mutex_lock(&acpi_desc->spa_map_mutex);
1301 	iomem = __nfit_spa_map(acpi_desc, spa, type);
1302 	mutex_unlock(&acpi_desc->spa_map_mutex);
1303 
1304 	return iomem;
1305 }
1306 
nfit_blk_init_interleave(struct nfit_blk_mmio * mmio,struct acpi_nfit_interleave * idt,u16 interleave_ways)1307 static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
1308 		struct acpi_nfit_interleave *idt, u16 interleave_ways)
1309 {
1310 	if (idt) {
1311 		mmio->num_lines = idt->line_count;
1312 		mmio->line_size = idt->line_size;
1313 		if (interleave_ways == 0)
1314 			return -ENXIO;
1315 		mmio->table_size = mmio->num_lines * interleave_ways
1316 			* mmio->line_size;
1317 	}
1318 
1319 	return 0;
1320 }
1321 
acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor * nd_desc,struct nvdimm * nvdimm,struct nfit_blk * nfit_blk)1322 static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
1323 		struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
1324 {
1325 	struct nd_cmd_dimm_flags flags;
1326 	int rc;
1327 
1328 	memset(&flags, 0, sizeof(flags));
1329 	rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
1330 			sizeof(flags));
1331 
1332 	if (rc >= 0 && flags.status == 0)
1333 		nfit_blk->dimm_flags = flags.flags;
1334 	else if (rc == -ENOTTY) {
1335 		/* fall back to a conservative default */
1336 		nfit_blk->dimm_flags = ND_BLK_DCR_LATCH | ND_BLK_READ_FLUSH;
1337 		rc = 0;
1338 	} else
1339 		rc = -ENXIO;
1340 
1341 	return rc;
1342 }
1343 
acpi_nfit_blk_region_enable(struct nvdimm_bus * nvdimm_bus,struct device * dev)1344 static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
1345 		struct device *dev)
1346 {
1347 	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1348 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1349 	struct nd_blk_region *ndbr = to_nd_blk_region(dev);
1350 	struct nfit_flush *nfit_flush;
1351 	struct nfit_blk_mmio *mmio;
1352 	struct nfit_blk *nfit_blk;
1353 	struct nfit_mem *nfit_mem;
1354 	struct nvdimm *nvdimm;
1355 	int rc;
1356 
1357 	nvdimm = nd_blk_region_to_dimm(ndbr);
1358 	nfit_mem = nvdimm_provider_data(nvdimm);
1359 	if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
1360 		dev_dbg(dev, "%s: missing%s%s%s\n", __func__,
1361 				nfit_mem ? "" : " nfit_mem",
1362 				(nfit_mem && nfit_mem->dcr) ? "" : " dcr",
1363 				(nfit_mem && nfit_mem->bdw) ? "" : " bdw");
1364 		return -ENXIO;
1365 	}
1366 
1367 	nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL);
1368 	if (!nfit_blk)
1369 		return -ENOMEM;
1370 	nd_blk_region_set_provider_data(ndbr, nfit_blk);
1371 	nfit_blk->nd_region = to_nd_region(dev);
1372 
1373 	/* map block aperture memory */
1374 	nfit_blk->bdw_offset = nfit_mem->bdw->offset;
1375 	mmio = &nfit_blk->mmio[BDW];
1376 	mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw,
1377 			SPA_MAP_APERTURE);
1378 	if (!mmio->addr.base) {
1379 		dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
1380 				nvdimm_name(nvdimm));
1381 		return -ENOMEM;
1382 	}
1383 	mmio->size = nfit_mem->bdw->size;
1384 	mmio->base_offset = nfit_mem->memdev_bdw->region_offset;
1385 	mmio->idt = nfit_mem->idt_bdw;
1386 	mmio->spa = nfit_mem->spa_bdw;
1387 	rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw,
1388 			nfit_mem->memdev_bdw->interleave_ways);
1389 	if (rc) {
1390 		dev_dbg(dev, "%s: %s failed to init bdw interleave\n",
1391 				__func__, nvdimm_name(nvdimm));
1392 		return rc;
1393 	}
1394 
1395 	/* map block control memory */
1396 	nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
1397 	nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
1398 	mmio = &nfit_blk->mmio[DCR];
1399 	mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr,
1400 			SPA_MAP_CONTROL);
1401 	if (!mmio->addr.base) {
1402 		dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,
1403 				nvdimm_name(nvdimm));
1404 		return -ENOMEM;
1405 	}
1406 	mmio->size = nfit_mem->dcr->window_size;
1407 	mmio->base_offset = nfit_mem->memdev_dcr->region_offset;
1408 	mmio->idt = nfit_mem->idt_dcr;
1409 	mmio->spa = nfit_mem->spa_dcr;
1410 	rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr,
1411 			nfit_mem->memdev_dcr->interleave_ways);
1412 	if (rc) {
1413 		dev_dbg(dev, "%s: %s failed to init dcr interleave\n",
1414 				__func__, nvdimm_name(nvdimm));
1415 		return rc;
1416 	}
1417 
1418 	rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
1419 	if (rc < 0) {
1420 		dev_dbg(dev, "%s: %s failed get DIMM flags\n",
1421 				__func__, nvdimm_name(nvdimm));
1422 		return rc;
1423 	}
1424 
1425 	nfit_flush = nfit_mem->nfit_flush;
1426 	if (nfit_flush && nfit_flush->flush->hint_count != 0) {
1427 		nfit_blk->nvdimm_flush = devm_ioremap_nocache(dev,
1428 				nfit_flush->flush->hint_address[0], 8);
1429 		if (!nfit_blk->nvdimm_flush)
1430 			return -ENOMEM;
1431 	}
1432 
1433 	if (!arch_has_wmb_pmem() && !nfit_blk->nvdimm_flush)
1434 		dev_warn(dev, "unable to guarantee persistence of writes\n");
1435 
1436 	if (mmio->line_size == 0)
1437 		return 0;
1438 
1439 	if ((u32) nfit_blk->cmd_offset % mmio->line_size
1440 			+ 8 > mmio->line_size) {
1441 		dev_dbg(dev, "cmd_offset crosses interleave boundary\n");
1442 		return -ENXIO;
1443 	} else if ((u32) nfit_blk->stat_offset % mmio->line_size
1444 			+ 8 > mmio->line_size) {
1445 		dev_dbg(dev, "stat_offset crosses interleave boundary\n");
1446 		return -ENXIO;
1447 	}
1448 
1449 	return 0;
1450 }
1451 
acpi_nfit_blk_region_disable(struct nvdimm_bus * nvdimm_bus,struct device * dev)1452 static void acpi_nfit_blk_region_disable(struct nvdimm_bus *nvdimm_bus,
1453 		struct device *dev)
1454 {
1455 	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1456 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1457 	struct nd_blk_region *ndbr = to_nd_blk_region(dev);
1458 	struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
1459 	int i;
1460 
1461 	if (!nfit_blk)
1462 		return; /* never enabled */
1463 
1464 	/* auto-free BLK spa mappings */
1465 	for (i = 0; i < 2; i++) {
1466 		struct nfit_blk_mmio *mmio = &nfit_blk->mmio[i];
1467 
1468 		if (mmio->addr.base)
1469 			nfit_spa_unmap(acpi_desc, mmio->spa);
1470 	}
1471 	nd_blk_region_set_provider_data(ndbr, NULL);
1472 	/* devm will free nfit_blk */
1473 }
1474 
acpi_nfit_init_mapping(struct acpi_nfit_desc * acpi_desc,struct nd_mapping * nd_mapping,struct nd_region_desc * ndr_desc,struct acpi_nfit_memory_map * memdev,struct acpi_nfit_system_address * spa)1475 static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
1476 		struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc,
1477 		struct acpi_nfit_memory_map *memdev,
1478 		struct acpi_nfit_system_address *spa)
1479 {
1480 	struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
1481 			memdev->device_handle);
1482 	struct nd_blk_region_desc *ndbr_desc;
1483 	struct nfit_mem *nfit_mem;
1484 	int blk_valid = 0;
1485 
1486 	if (!nvdimm) {
1487 		dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
1488 				spa->range_index, memdev->device_handle);
1489 		return -ENODEV;
1490 	}
1491 
1492 	nd_mapping->nvdimm = nvdimm;
1493 	switch (nfit_spa_type(spa)) {
1494 	case NFIT_SPA_PM:
1495 	case NFIT_SPA_VOLATILE:
1496 		nd_mapping->start = memdev->address;
1497 		nd_mapping->size = memdev->region_size;
1498 		break;
1499 	case NFIT_SPA_DCR:
1500 		nfit_mem = nvdimm_provider_data(nvdimm);
1501 		if (!nfit_mem || !nfit_mem->bdw) {
1502 			dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
1503 					spa->range_index, nvdimm_name(nvdimm));
1504 		} else {
1505 			nd_mapping->size = nfit_mem->bdw->capacity;
1506 			nd_mapping->start = nfit_mem->bdw->start_address;
1507 			ndr_desc->num_lanes = nfit_mem->bdw->windows;
1508 			blk_valid = 1;
1509 		}
1510 
1511 		ndr_desc->nd_mapping = nd_mapping;
1512 		ndr_desc->num_mappings = blk_valid;
1513 		ndbr_desc = to_blk_region_desc(ndr_desc);
1514 		ndbr_desc->enable = acpi_nfit_blk_region_enable;
1515 		ndbr_desc->disable = acpi_nfit_blk_region_disable;
1516 		ndbr_desc->do_io = acpi_desc->blk_do_io;
1517 		if (!nvdimm_blk_region_create(acpi_desc->nvdimm_bus, ndr_desc))
1518 			return -ENOMEM;
1519 		break;
1520 	}
1521 
1522 	return 0;
1523 }
1524 
acpi_nfit_register_region(struct acpi_nfit_desc * acpi_desc,struct nfit_spa * nfit_spa)1525 static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
1526 		struct nfit_spa *nfit_spa)
1527 {
1528 	static struct nd_mapping nd_mappings[ND_MAX_MAPPINGS];
1529 	struct acpi_nfit_system_address *spa = nfit_spa->spa;
1530 	struct nd_blk_region_desc ndbr_desc;
1531 	struct nd_region_desc *ndr_desc;
1532 	struct nfit_memdev *nfit_memdev;
1533 	struct nvdimm_bus *nvdimm_bus;
1534 	struct resource res;
1535 	int count = 0, rc;
1536 
1537 	if (nfit_spa->is_registered)
1538 		return 0;
1539 
1540 	if (spa->range_index == 0) {
1541 		dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n",
1542 				__func__);
1543 		return 0;
1544 	}
1545 
1546 	memset(&res, 0, sizeof(res));
1547 	memset(&nd_mappings, 0, sizeof(nd_mappings));
1548 	memset(&ndbr_desc, 0, sizeof(ndbr_desc));
1549 	res.start = spa->address;
1550 	res.end = res.start + spa->length - 1;
1551 	ndr_desc = &ndbr_desc.ndr_desc;
1552 	ndr_desc->res = &res;
1553 	ndr_desc->provider_data = nfit_spa;
1554 	ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
1555 	if (spa->flags & ACPI_NFIT_PROXIMITY_VALID)
1556 		ndr_desc->numa_node = acpi_map_pxm_to_online_node(
1557 						spa->proximity_domain);
1558 	else
1559 		ndr_desc->numa_node = NUMA_NO_NODE;
1560 
1561 	list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1562 		struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
1563 		struct nd_mapping *nd_mapping;
1564 
1565 		if (memdev->range_index != spa->range_index)
1566 			continue;
1567 		if (count >= ND_MAX_MAPPINGS) {
1568 			dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n",
1569 					spa->range_index, ND_MAX_MAPPINGS);
1570 			return -ENXIO;
1571 		}
1572 		nd_mapping = &nd_mappings[count++];
1573 		rc = acpi_nfit_init_mapping(acpi_desc, nd_mapping, ndr_desc,
1574 				memdev, spa);
1575 		if (rc)
1576 			return rc;
1577 	}
1578 
1579 	ndr_desc->nd_mapping = nd_mappings;
1580 	ndr_desc->num_mappings = count;
1581 	rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
1582 	if (rc)
1583 		return rc;
1584 
1585 	nvdimm_bus = acpi_desc->nvdimm_bus;
1586 	if (nfit_spa_type(spa) == NFIT_SPA_PM) {
1587 		if (!nvdimm_pmem_region_create(nvdimm_bus, ndr_desc))
1588 			return -ENOMEM;
1589 	} else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) {
1590 		if (!nvdimm_volatile_region_create(nvdimm_bus, ndr_desc))
1591 			return -ENOMEM;
1592 	}
1593 
1594 	nfit_spa->is_registered = 1;
1595 	return 0;
1596 }
1597 
acpi_nfit_register_regions(struct acpi_nfit_desc * acpi_desc)1598 static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
1599 {
1600 	struct nfit_spa *nfit_spa;
1601 
1602 	list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
1603 		int rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
1604 
1605 		if (rc)
1606 			return rc;
1607 	}
1608 	return 0;
1609 }
1610 
acpi_nfit_check_deletions(struct acpi_nfit_desc * acpi_desc,struct nfit_table_prev * prev)1611 static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
1612 		struct nfit_table_prev *prev)
1613 {
1614 	struct device *dev = acpi_desc->dev;
1615 
1616 	if (!list_empty(&prev->spas) ||
1617 			!list_empty(&prev->memdevs) ||
1618 			!list_empty(&prev->dcrs) ||
1619 			!list_empty(&prev->bdws) ||
1620 			!list_empty(&prev->idts) ||
1621 			!list_empty(&prev->flushes)) {
1622 		dev_err(dev, "new nfit deletes entries (unsupported)\n");
1623 		return -ENXIO;
1624 	}
1625 	return 0;
1626 }
1627 
acpi_nfit_init(struct acpi_nfit_desc * acpi_desc,acpi_size sz)1628 int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
1629 {
1630 	struct device *dev = acpi_desc->dev;
1631 	struct nfit_table_prev prev;
1632 	const void *end;
1633 	u8 *data;
1634 	int rc;
1635 
1636 	mutex_lock(&acpi_desc->init_mutex);
1637 
1638 	INIT_LIST_HEAD(&prev.spas);
1639 	INIT_LIST_HEAD(&prev.memdevs);
1640 	INIT_LIST_HEAD(&prev.dcrs);
1641 	INIT_LIST_HEAD(&prev.bdws);
1642 	INIT_LIST_HEAD(&prev.idts);
1643 	INIT_LIST_HEAD(&prev.flushes);
1644 
1645 	list_cut_position(&prev.spas, &acpi_desc->spas,
1646 				acpi_desc->spas.prev);
1647 	list_cut_position(&prev.memdevs, &acpi_desc->memdevs,
1648 				acpi_desc->memdevs.prev);
1649 	list_cut_position(&prev.dcrs, &acpi_desc->dcrs,
1650 				acpi_desc->dcrs.prev);
1651 	list_cut_position(&prev.bdws, &acpi_desc->bdws,
1652 				acpi_desc->bdws.prev);
1653 	list_cut_position(&prev.idts, &acpi_desc->idts,
1654 				acpi_desc->idts.prev);
1655 	list_cut_position(&prev.flushes, &acpi_desc->flushes,
1656 				acpi_desc->flushes.prev);
1657 
1658 	data = (u8 *) acpi_desc->nfit;
1659 	end = data + sz;
1660 	while (!IS_ERR_OR_NULL(data))
1661 		data = add_table(acpi_desc, &prev, data, end);
1662 
1663 	if (IS_ERR(data)) {
1664 		dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__,
1665 				PTR_ERR(data));
1666 		rc = PTR_ERR(data);
1667 		goto out_unlock;
1668 	}
1669 
1670 	rc = acpi_nfit_check_deletions(acpi_desc, &prev);
1671 	if (rc)
1672 		goto out_unlock;
1673 
1674 	if (nfit_mem_init(acpi_desc) != 0) {
1675 		rc = -ENOMEM;
1676 		goto out_unlock;
1677 	}
1678 
1679 	acpi_nfit_init_dsms(acpi_desc);
1680 
1681 	rc = acpi_nfit_register_dimms(acpi_desc);
1682 	if (rc)
1683 		goto out_unlock;
1684 
1685 	rc = acpi_nfit_register_regions(acpi_desc);
1686 
1687  out_unlock:
1688 	mutex_unlock(&acpi_desc->init_mutex);
1689 	return rc;
1690 }
1691 EXPORT_SYMBOL_GPL(acpi_nfit_init);
1692 
acpi_nfit_desc_init(struct acpi_device * adev)1693 static struct acpi_nfit_desc *acpi_nfit_desc_init(struct acpi_device *adev)
1694 {
1695 	struct nvdimm_bus_descriptor *nd_desc;
1696 	struct acpi_nfit_desc *acpi_desc;
1697 	struct device *dev = &adev->dev;
1698 
1699 	acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
1700 	if (!acpi_desc)
1701 		return ERR_PTR(-ENOMEM);
1702 
1703 	dev_set_drvdata(dev, acpi_desc);
1704 	acpi_desc->dev = dev;
1705 	acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io;
1706 	nd_desc = &acpi_desc->nd_desc;
1707 	nd_desc->provider_name = "ACPI.NFIT";
1708 	nd_desc->ndctl = acpi_nfit_ctl;
1709 	nd_desc->attr_groups = acpi_nfit_attribute_groups;
1710 
1711 	acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, nd_desc);
1712 	if (!acpi_desc->nvdimm_bus) {
1713 		devm_kfree(dev, acpi_desc);
1714 		return ERR_PTR(-ENXIO);
1715 	}
1716 
1717 	INIT_LIST_HEAD(&acpi_desc->spa_maps);
1718 	INIT_LIST_HEAD(&acpi_desc->spas);
1719 	INIT_LIST_HEAD(&acpi_desc->dcrs);
1720 	INIT_LIST_HEAD(&acpi_desc->bdws);
1721 	INIT_LIST_HEAD(&acpi_desc->idts);
1722 	INIT_LIST_HEAD(&acpi_desc->flushes);
1723 	INIT_LIST_HEAD(&acpi_desc->memdevs);
1724 	INIT_LIST_HEAD(&acpi_desc->dimms);
1725 	mutex_init(&acpi_desc->spa_map_mutex);
1726 	mutex_init(&acpi_desc->init_mutex);
1727 
1728 	return acpi_desc;
1729 }
1730 
acpi_nfit_add(struct acpi_device * adev)1731 static int acpi_nfit_add(struct acpi_device *adev)
1732 {
1733 	struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
1734 	struct acpi_nfit_desc *acpi_desc;
1735 	struct device *dev = &adev->dev;
1736 	struct acpi_table_header *tbl;
1737 	acpi_status status = AE_OK;
1738 	acpi_size sz;
1739 	int rc;
1740 
1741 	status = acpi_get_table_with_size("NFIT", 0, &tbl, &sz);
1742 	if (ACPI_FAILURE(status)) {
1743 		/* This is ok, we could have an nvdimm hotplugged later */
1744 		dev_dbg(dev, "failed to find NFIT at startup\n");
1745 		return 0;
1746 	}
1747 
1748 	acpi_desc = acpi_nfit_desc_init(adev);
1749 	if (IS_ERR(acpi_desc)) {
1750 		dev_err(dev, "%s: error initializing acpi_desc: %ld\n",
1751 				__func__, PTR_ERR(acpi_desc));
1752 		return PTR_ERR(acpi_desc);
1753 	}
1754 
1755 	/*
1756 	 * Save the acpi header for later and then skip it,
1757 	 * making nfit point to the first nfit table header.
1758 	 */
1759 	acpi_desc->acpi_header = *tbl;
1760 	acpi_desc->nfit = (void *) tbl + sizeof(struct acpi_table_nfit);
1761 	sz -= sizeof(struct acpi_table_nfit);
1762 
1763 	/* Evaluate _FIT and override with that if present */
1764 	status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
1765 	if (ACPI_SUCCESS(status) && buf.length > 0) {
1766 		union acpi_object *obj;
1767 		/*
1768 		 * Adjust for the acpi_object header of the _FIT
1769 		 */
1770 		obj = buf.pointer;
1771 		if (obj->type == ACPI_TYPE_BUFFER) {
1772 			acpi_desc->nfit =
1773 				(struct acpi_nfit_header *)obj->buffer.pointer;
1774 			sz = obj->buffer.length;
1775 		} else
1776 			dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n",
1777 				 __func__, (int) obj->type);
1778 	}
1779 
1780 	rc = acpi_nfit_init(acpi_desc, sz);
1781 	if (rc) {
1782 		nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
1783 		return rc;
1784 	}
1785 	return 0;
1786 }
1787 
acpi_nfit_remove(struct acpi_device * adev)1788 static int acpi_nfit_remove(struct acpi_device *adev)
1789 {
1790 	struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
1791 
1792 	nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
1793 	return 0;
1794 }
1795 
acpi_nfit_notify(struct acpi_device * adev,u32 event)1796 static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
1797 {
1798 	struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
1799 	struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
1800 	struct acpi_nfit_header *nfit_saved;
1801 	union acpi_object *obj;
1802 	struct device *dev = &adev->dev;
1803 	acpi_status status;
1804 	int ret;
1805 
1806 	dev_dbg(dev, "%s: event: %d\n", __func__, event);
1807 
1808 	device_lock(dev);
1809 	if (!dev->driver) {
1810 		/* dev->driver may be null if we're being removed */
1811 		dev_dbg(dev, "%s: no driver found for dev\n", __func__);
1812 		goto out_unlock;
1813 	}
1814 
1815 	if (!acpi_desc) {
1816 		acpi_desc = acpi_nfit_desc_init(adev);
1817 		if (IS_ERR(acpi_desc)) {
1818 			dev_err(dev, "%s: error initializing acpi_desc: %ld\n",
1819 				__func__, PTR_ERR(acpi_desc));
1820 			goto out_unlock;
1821 		}
1822 	}
1823 
1824 	/* Evaluate _FIT */
1825 	status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
1826 	if (ACPI_FAILURE(status)) {
1827 		dev_err(dev, "failed to evaluate _FIT\n");
1828 		goto out_unlock;
1829 	}
1830 
1831 	nfit_saved = acpi_desc->nfit;
1832 	obj = buf.pointer;
1833 	if (obj->type == ACPI_TYPE_BUFFER) {
1834 		acpi_desc->nfit =
1835 			(struct acpi_nfit_header *)obj->buffer.pointer;
1836 		ret = acpi_nfit_init(acpi_desc, obj->buffer.length);
1837 		if (ret) {
1838 			/* Merge failed, restore old nfit, and exit */
1839 			acpi_desc->nfit = nfit_saved;
1840 			dev_err(dev, "failed to merge updated NFIT\n");
1841 		}
1842 	} else {
1843 		/* Bad _FIT, restore old nfit */
1844 		dev_err(dev, "Invalid _FIT\n");
1845 	}
1846 	kfree(buf.pointer);
1847 
1848  out_unlock:
1849 	device_unlock(dev);
1850 }
1851 
1852 static const struct acpi_device_id acpi_nfit_ids[] = {
1853 	{ "ACPI0012", 0 },
1854 	{ "", 0 },
1855 };
1856 MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids);
1857 
1858 static struct acpi_driver acpi_nfit_driver = {
1859 	.name = KBUILD_MODNAME,
1860 	.ids = acpi_nfit_ids,
1861 	.ops = {
1862 		.add = acpi_nfit_add,
1863 		.remove = acpi_nfit_remove,
1864 		.notify = acpi_nfit_notify,
1865 	},
1866 };
1867 
nfit_init(void)1868 static __init int nfit_init(void)
1869 {
1870 	BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
1871 	BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
1872 	BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
1873 	BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
1874 	BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
1875 	BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
1876 	BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
1877 
1878 	acpi_str_to_uuid(UUID_VOLATILE_MEMORY, nfit_uuid[NFIT_SPA_VOLATILE]);
1879 	acpi_str_to_uuid(UUID_PERSISTENT_MEMORY, nfit_uuid[NFIT_SPA_PM]);
1880 	acpi_str_to_uuid(UUID_CONTROL_REGION, nfit_uuid[NFIT_SPA_DCR]);
1881 	acpi_str_to_uuid(UUID_DATA_REGION, nfit_uuid[NFIT_SPA_BDW]);
1882 	acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_VDISK]);
1883 	acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD, nfit_uuid[NFIT_SPA_VCD]);
1884 	acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_PDISK]);
1885 	acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]);
1886 	acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]);
1887 	acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]);
1888 
1889 	return acpi_bus_register_driver(&acpi_nfit_driver);
1890 }
1891 
nfit_exit(void)1892 static __exit void nfit_exit(void)
1893 {
1894 	acpi_bus_unregister_driver(&acpi_nfit_driver);
1895 }
1896 
1897 module_init(nfit_init);
1898 module_exit(nfit_exit);
1899 MODULE_LICENSE("GPL v2");
1900 MODULE_AUTHOR("Intel Corporation");
1901