xref: /OK3568_Linux_fs/kernel/drivers/acpi/nfit/core.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun #include <linux/list_sort.h>
6*4882a593Smuzhiyun #include <linux/libnvdimm.h>
7*4882a593Smuzhiyun #include <linux/module.h>
8*4882a593Smuzhiyun #include <linux/nospec.h>
9*4882a593Smuzhiyun #include <linux/mutex.h>
10*4882a593Smuzhiyun #include <linux/ndctl.h>
11*4882a593Smuzhiyun #include <linux/sysfs.h>
12*4882a593Smuzhiyun #include <linux/delay.h>
13*4882a593Smuzhiyun #include <linux/list.h>
14*4882a593Smuzhiyun #include <linux/acpi.h>
15*4882a593Smuzhiyun #include <linux/sort.h>
16*4882a593Smuzhiyun #include <linux/io.h>
17*4882a593Smuzhiyun #include <linux/nd.h>
18*4882a593Smuzhiyun #include <asm/cacheflush.h>
19*4882a593Smuzhiyun #include <acpi/nfit.h>
20*4882a593Smuzhiyun #include "intel.h"
21*4882a593Smuzhiyun #include "nfit.h"
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun /*
24*4882a593Smuzhiyun  * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
25*4882a593Smuzhiyun  * irrelevant.
26*4882a593Smuzhiyun  */
27*4882a593Smuzhiyun #include <linux/io-64-nonatomic-hi-lo.h>
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun static bool force_enable_dimms;
30*4882a593Smuzhiyun module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
31*4882a593Smuzhiyun MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun static bool disable_vendor_specific;
34*4882a593Smuzhiyun module_param(disable_vendor_specific, bool, S_IRUGO);
35*4882a593Smuzhiyun MODULE_PARM_DESC(disable_vendor_specific,
36*4882a593Smuzhiyun 		"Limit commands to the publicly specified set");
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun static unsigned long override_dsm_mask;
39*4882a593Smuzhiyun module_param(override_dsm_mask, ulong, S_IRUGO);
40*4882a593Smuzhiyun MODULE_PARM_DESC(override_dsm_mask, "Bitmask of allowed NVDIMM DSM functions");
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun static int default_dsm_family = -1;
43*4882a593Smuzhiyun module_param(default_dsm_family, int, S_IRUGO);
44*4882a593Smuzhiyun MODULE_PARM_DESC(default_dsm_family,
45*4882a593Smuzhiyun 		"Try this DSM type first when identifying NVDIMM family");
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun static bool no_init_ars;
48*4882a593Smuzhiyun module_param(no_init_ars, bool, 0644);
49*4882a593Smuzhiyun MODULE_PARM_DESC(no_init_ars, "Skip ARS run at nfit init time");
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun static bool force_labels;
52*4882a593Smuzhiyun module_param(force_labels, bool, 0444);
53*4882a593Smuzhiyun MODULE_PARM_DESC(force_labels, "Opt-in to labels despite missing methods");
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun LIST_HEAD(acpi_descs);
56*4882a593Smuzhiyun DEFINE_MUTEX(acpi_desc_lock);
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun static struct workqueue_struct *nfit_wq;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun struct nfit_table_prev {
61*4882a593Smuzhiyun 	struct list_head spas;
62*4882a593Smuzhiyun 	struct list_head memdevs;
63*4882a593Smuzhiyun 	struct list_head dcrs;
64*4882a593Smuzhiyun 	struct list_head bdws;
65*4882a593Smuzhiyun 	struct list_head idts;
66*4882a593Smuzhiyun 	struct list_head flushes;
67*4882a593Smuzhiyun };
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun static guid_t nfit_uuid[NFIT_UUID_MAX];
70*4882a593Smuzhiyun 
to_nfit_uuid(enum nfit_uuids id)71*4882a593Smuzhiyun const guid_t *to_nfit_uuid(enum nfit_uuids id)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun 	return &nfit_uuid[id];
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun EXPORT_SYMBOL(to_nfit_uuid);
76*4882a593Smuzhiyun 
to_nfit_bus_uuid(int family)77*4882a593Smuzhiyun static const guid_t *to_nfit_bus_uuid(int family)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun 	if (WARN_ONCE(family == NVDIMM_BUS_FAMILY_NFIT,
80*4882a593Smuzhiyun 			"only secondary bus families can be translated\n"))
81*4882a593Smuzhiyun 		return NULL;
82*4882a593Smuzhiyun 	/*
83*4882a593Smuzhiyun 	 * The index of bus UUIDs starts immediately following the last
84*4882a593Smuzhiyun 	 * NVDIMM/leaf family.
85*4882a593Smuzhiyun 	 */
86*4882a593Smuzhiyun 	return to_nfit_uuid(family + NVDIMM_FAMILY_MAX);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun 
to_acpi_dev(struct acpi_nfit_desc * acpi_desc)89*4882a593Smuzhiyun static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	/*
94*4882a593Smuzhiyun 	 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
95*4882a593Smuzhiyun 	 * acpi_device.
96*4882a593Smuzhiyun 	 */
97*4882a593Smuzhiyun 	if (!nd_desc->provider_name
98*4882a593Smuzhiyun 			|| strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0)
99*4882a593Smuzhiyun 		return NULL;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	return to_acpi_device(acpi_desc->dev);
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun 
xlat_bus_status(void * buf,unsigned int cmd,u32 status)104*4882a593Smuzhiyun static int xlat_bus_status(void *buf, unsigned int cmd, u32 status)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	struct nd_cmd_clear_error *clear_err;
107*4882a593Smuzhiyun 	struct nd_cmd_ars_status *ars_status;
108*4882a593Smuzhiyun 	u16 flags;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	switch (cmd) {
111*4882a593Smuzhiyun 	case ND_CMD_ARS_CAP:
112*4882a593Smuzhiyun 		if ((status & 0xffff) == NFIT_ARS_CAP_NONE)
113*4882a593Smuzhiyun 			return -ENOTTY;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 		/* Command failed */
116*4882a593Smuzhiyun 		if (status & 0xffff)
117*4882a593Smuzhiyun 			return -EIO;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 		/* No supported scan types for this range */
120*4882a593Smuzhiyun 		flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
121*4882a593Smuzhiyun 		if ((status >> 16 & flags) == 0)
122*4882a593Smuzhiyun 			return -ENOTTY;
123*4882a593Smuzhiyun 		return 0;
124*4882a593Smuzhiyun 	case ND_CMD_ARS_START:
125*4882a593Smuzhiyun 		/* ARS is in progress */
126*4882a593Smuzhiyun 		if ((status & 0xffff) == NFIT_ARS_START_BUSY)
127*4882a593Smuzhiyun 			return -EBUSY;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 		/* Command failed */
130*4882a593Smuzhiyun 		if (status & 0xffff)
131*4882a593Smuzhiyun 			return -EIO;
132*4882a593Smuzhiyun 		return 0;
133*4882a593Smuzhiyun 	case ND_CMD_ARS_STATUS:
134*4882a593Smuzhiyun 		ars_status = buf;
135*4882a593Smuzhiyun 		/* Command failed */
136*4882a593Smuzhiyun 		if (status & 0xffff)
137*4882a593Smuzhiyun 			return -EIO;
138*4882a593Smuzhiyun 		/* Check extended status (Upper two bytes) */
139*4882a593Smuzhiyun 		if (status == NFIT_ARS_STATUS_DONE)
140*4882a593Smuzhiyun 			return 0;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 		/* ARS is in progress */
143*4882a593Smuzhiyun 		if (status == NFIT_ARS_STATUS_BUSY)
144*4882a593Smuzhiyun 			return -EBUSY;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 		/* No ARS performed for the current boot */
147*4882a593Smuzhiyun 		if (status == NFIT_ARS_STATUS_NONE)
148*4882a593Smuzhiyun 			return -EAGAIN;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 		/*
151*4882a593Smuzhiyun 		 * ARS interrupted, either we overflowed or some other
152*4882a593Smuzhiyun 		 * agent wants the scan to stop.  If we didn't overflow
153*4882a593Smuzhiyun 		 * then just continue with the returned results.
154*4882a593Smuzhiyun 		 */
155*4882a593Smuzhiyun 		if (status == NFIT_ARS_STATUS_INTR) {
156*4882a593Smuzhiyun 			if (ars_status->out_length >= 40 && (ars_status->flags
157*4882a593Smuzhiyun 						& NFIT_ARS_F_OVERFLOW))
158*4882a593Smuzhiyun 				return -ENOSPC;
159*4882a593Smuzhiyun 			return 0;
160*4882a593Smuzhiyun 		}
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 		/* Unknown status */
163*4882a593Smuzhiyun 		if (status >> 16)
164*4882a593Smuzhiyun 			return -EIO;
165*4882a593Smuzhiyun 		return 0;
166*4882a593Smuzhiyun 	case ND_CMD_CLEAR_ERROR:
167*4882a593Smuzhiyun 		clear_err = buf;
168*4882a593Smuzhiyun 		if (status & 0xffff)
169*4882a593Smuzhiyun 			return -EIO;
170*4882a593Smuzhiyun 		if (!clear_err->cleared)
171*4882a593Smuzhiyun 			return -EIO;
172*4882a593Smuzhiyun 		if (clear_err->length > clear_err->cleared)
173*4882a593Smuzhiyun 			return clear_err->cleared;
174*4882a593Smuzhiyun 		return 0;
175*4882a593Smuzhiyun 	default:
176*4882a593Smuzhiyun 		break;
177*4882a593Smuzhiyun 	}
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	/* all other non-zero status results in an error */
180*4882a593Smuzhiyun 	if (status)
181*4882a593Smuzhiyun 		return -EIO;
182*4882a593Smuzhiyun 	return 0;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun #define ACPI_LABELS_LOCKED 3
186*4882a593Smuzhiyun 
xlat_nvdimm_status(struct nvdimm * nvdimm,void * buf,unsigned int cmd,u32 status)187*4882a593Smuzhiyun static int xlat_nvdimm_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
188*4882a593Smuzhiyun 		u32 status)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	switch (cmd) {
193*4882a593Smuzhiyun 	case ND_CMD_GET_CONFIG_SIZE:
194*4882a593Smuzhiyun 		/*
195*4882a593Smuzhiyun 		 * In the _LSI, _LSR, _LSW case the locked status is
196*4882a593Smuzhiyun 		 * communicated via the read/write commands
197*4882a593Smuzhiyun 		 */
198*4882a593Smuzhiyun 		if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags))
199*4882a593Smuzhiyun 			break;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 		if (status >> 16 & ND_CONFIG_LOCKED)
202*4882a593Smuzhiyun 			return -EACCES;
203*4882a593Smuzhiyun 		break;
204*4882a593Smuzhiyun 	case ND_CMD_GET_CONFIG_DATA:
205*4882a593Smuzhiyun 		if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
206*4882a593Smuzhiyun 				&& status == ACPI_LABELS_LOCKED)
207*4882a593Smuzhiyun 			return -EACCES;
208*4882a593Smuzhiyun 		break;
209*4882a593Smuzhiyun 	case ND_CMD_SET_CONFIG_DATA:
210*4882a593Smuzhiyun 		if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags)
211*4882a593Smuzhiyun 				&& status == ACPI_LABELS_LOCKED)
212*4882a593Smuzhiyun 			return -EACCES;
213*4882a593Smuzhiyun 		break;
214*4882a593Smuzhiyun 	default:
215*4882a593Smuzhiyun 		break;
216*4882a593Smuzhiyun 	}
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	/* all other non-zero status results in an error */
219*4882a593Smuzhiyun 	if (status)
220*4882a593Smuzhiyun 		return -EIO;
221*4882a593Smuzhiyun 	return 0;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun 
xlat_status(struct nvdimm * nvdimm,void * buf,unsigned int cmd,u32 status)224*4882a593Smuzhiyun static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
225*4882a593Smuzhiyun 		u32 status)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun 	if (!nvdimm)
228*4882a593Smuzhiyun 		return xlat_bus_status(buf, cmd, status);
229*4882a593Smuzhiyun 	return xlat_nvdimm_status(nvdimm, buf, cmd, status);
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun /* convert _LS{I,R} packages to the buffer object acpi_nfit_ctl expects */
pkg_to_buf(union acpi_object * pkg)233*4882a593Smuzhiyun static union acpi_object *pkg_to_buf(union acpi_object *pkg)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun 	int i;
236*4882a593Smuzhiyun 	void *dst;
237*4882a593Smuzhiyun 	size_t size = 0;
238*4882a593Smuzhiyun 	union acpi_object *buf = NULL;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	if (pkg->type != ACPI_TYPE_PACKAGE) {
241*4882a593Smuzhiyun 		WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
242*4882a593Smuzhiyun 				pkg->type);
243*4882a593Smuzhiyun 		goto err;
244*4882a593Smuzhiyun 	}
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	for (i = 0; i < pkg->package.count; i++) {
247*4882a593Smuzhiyun 		union acpi_object *obj = &pkg->package.elements[i];
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 		if (obj->type == ACPI_TYPE_INTEGER)
250*4882a593Smuzhiyun 			size += 4;
251*4882a593Smuzhiyun 		else if (obj->type == ACPI_TYPE_BUFFER)
252*4882a593Smuzhiyun 			size += obj->buffer.length;
253*4882a593Smuzhiyun 		else {
254*4882a593Smuzhiyun 			WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
255*4882a593Smuzhiyun 					obj->type);
256*4882a593Smuzhiyun 			goto err;
257*4882a593Smuzhiyun 		}
258*4882a593Smuzhiyun 	}
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	buf = ACPI_ALLOCATE(sizeof(*buf) + size);
261*4882a593Smuzhiyun 	if (!buf)
262*4882a593Smuzhiyun 		goto err;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	dst = buf + 1;
265*4882a593Smuzhiyun 	buf->type = ACPI_TYPE_BUFFER;
266*4882a593Smuzhiyun 	buf->buffer.length = size;
267*4882a593Smuzhiyun 	buf->buffer.pointer = dst;
268*4882a593Smuzhiyun 	for (i = 0; i < pkg->package.count; i++) {
269*4882a593Smuzhiyun 		union acpi_object *obj = &pkg->package.elements[i];
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 		if (obj->type == ACPI_TYPE_INTEGER) {
272*4882a593Smuzhiyun 			memcpy(dst, &obj->integer.value, 4);
273*4882a593Smuzhiyun 			dst += 4;
274*4882a593Smuzhiyun 		} else if (obj->type == ACPI_TYPE_BUFFER) {
275*4882a593Smuzhiyun 			memcpy(dst, obj->buffer.pointer, obj->buffer.length);
276*4882a593Smuzhiyun 			dst += obj->buffer.length;
277*4882a593Smuzhiyun 		}
278*4882a593Smuzhiyun 	}
279*4882a593Smuzhiyun err:
280*4882a593Smuzhiyun 	ACPI_FREE(pkg);
281*4882a593Smuzhiyun 	return buf;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun 
int_to_buf(union acpi_object * integer)284*4882a593Smuzhiyun static union acpi_object *int_to_buf(union acpi_object *integer)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun 	union acpi_object *buf = ACPI_ALLOCATE(sizeof(*buf) + 4);
287*4882a593Smuzhiyun 	void *dst = NULL;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	if (!buf)
290*4882a593Smuzhiyun 		goto err;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	if (integer->type != ACPI_TYPE_INTEGER) {
293*4882a593Smuzhiyun 		WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
294*4882a593Smuzhiyun 				integer->type);
295*4882a593Smuzhiyun 		goto err;
296*4882a593Smuzhiyun 	}
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	dst = buf + 1;
299*4882a593Smuzhiyun 	buf->type = ACPI_TYPE_BUFFER;
300*4882a593Smuzhiyun 	buf->buffer.length = 4;
301*4882a593Smuzhiyun 	buf->buffer.pointer = dst;
302*4882a593Smuzhiyun 	memcpy(dst, &integer->integer.value, 4);
303*4882a593Smuzhiyun err:
304*4882a593Smuzhiyun 	ACPI_FREE(integer);
305*4882a593Smuzhiyun 	return buf;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun 
acpi_label_write(acpi_handle handle,u32 offset,u32 len,void * data)308*4882a593Smuzhiyun static union acpi_object *acpi_label_write(acpi_handle handle, u32 offset,
309*4882a593Smuzhiyun 		u32 len, void *data)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun 	acpi_status rc;
312*4882a593Smuzhiyun 	struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
313*4882a593Smuzhiyun 	struct acpi_object_list input = {
314*4882a593Smuzhiyun 		.count = 3,
315*4882a593Smuzhiyun 		.pointer = (union acpi_object []) {
316*4882a593Smuzhiyun 			[0] = {
317*4882a593Smuzhiyun 				.integer.type = ACPI_TYPE_INTEGER,
318*4882a593Smuzhiyun 				.integer.value = offset,
319*4882a593Smuzhiyun 			},
320*4882a593Smuzhiyun 			[1] = {
321*4882a593Smuzhiyun 				.integer.type = ACPI_TYPE_INTEGER,
322*4882a593Smuzhiyun 				.integer.value = len,
323*4882a593Smuzhiyun 			},
324*4882a593Smuzhiyun 			[2] = {
325*4882a593Smuzhiyun 				.buffer.type = ACPI_TYPE_BUFFER,
326*4882a593Smuzhiyun 				.buffer.pointer = data,
327*4882a593Smuzhiyun 				.buffer.length = len,
328*4882a593Smuzhiyun 			},
329*4882a593Smuzhiyun 		},
330*4882a593Smuzhiyun 	};
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	rc = acpi_evaluate_object(handle, "_LSW", &input, &buf);
333*4882a593Smuzhiyun 	if (ACPI_FAILURE(rc))
334*4882a593Smuzhiyun 		return NULL;
335*4882a593Smuzhiyun 	return int_to_buf(buf.pointer);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun 
acpi_label_read(acpi_handle handle,u32 offset,u32 len)338*4882a593Smuzhiyun static union acpi_object *acpi_label_read(acpi_handle handle, u32 offset,
339*4882a593Smuzhiyun 		u32 len)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun 	acpi_status rc;
342*4882a593Smuzhiyun 	struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
343*4882a593Smuzhiyun 	struct acpi_object_list input = {
344*4882a593Smuzhiyun 		.count = 2,
345*4882a593Smuzhiyun 		.pointer = (union acpi_object []) {
346*4882a593Smuzhiyun 			[0] = {
347*4882a593Smuzhiyun 				.integer.type = ACPI_TYPE_INTEGER,
348*4882a593Smuzhiyun 				.integer.value = offset,
349*4882a593Smuzhiyun 			},
350*4882a593Smuzhiyun 			[1] = {
351*4882a593Smuzhiyun 				.integer.type = ACPI_TYPE_INTEGER,
352*4882a593Smuzhiyun 				.integer.value = len,
353*4882a593Smuzhiyun 			},
354*4882a593Smuzhiyun 		},
355*4882a593Smuzhiyun 	};
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	rc = acpi_evaluate_object(handle, "_LSR", &input, &buf);
358*4882a593Smuzhiyun 	if (ACPI_FAILURE(rc))
359*4882a593Smuzhiyun 		return NULL;
360*4882a593Smuzhiyun 	return pkg_to_buf(buf.pointer);
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun 
acpi_label_info(acpi_handle handle)363*4882a593Smuzhiyun static union acpi_object *acpi_label_info(acpi_handle handle)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun 	acpi_status rc;
366*4882a593Smuzhiyun 	struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	rc = acpi_evaluate_object(handle, "_LSI", NULL, &buf);
369*4882a593Smuzhiyun 	if (ACPI_FAILURE(rc))
370*4882a593Smuzhiyun 		return NULL;
371*4882a593Smuzhiyun 	return pkg_to_buf(buf.pointer);
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun 
nfit_dsm_revid(unsigned family,unsigned func)374*4882a593Smuzhiyun static u8 nfit_dsm_revid(unsigned family, unsigned func)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun 	static const u8 revid_table[NVDIMM_FAMILY_MAX+1][NVDIMM_CMD_MAX+1] = {
377*4882a593Smuzhiyun 		[NVDIMM_FAMILY_INTEL] = {
378*4882a593Smuzhiyun 			[NVDIMM_INTEL_GET_MODES ...
379*4882a593Smuzhiyun 				NVDIMM_INTEL_FW_ACTIVATE_ARM] = 2,
380*4882a593Smuzhiyun 		},
381*4882a593Smuzhiyun 	};
382*4882a593Smuzhiyun 	u8 id;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	if (family > NVDIMM_FAMILY_MAX)
385*4882a593Smuzhiyun 		return 0;
386*4882a593Smuzhiyun 	if (func > NVDIMM_CMD_MAX)
387*4882a593Smuzhiyun 		return 0;
388*4882a593Smuzhiyun 	id = revid_table[family][func];
389*4882a593Smuzhiyun 	if (id == 0)
390*4882a593Smuzhiyun 		return 1; /* default */
391*4882a593Smuzhiyun 	return id;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun 
payload_dumpable(struct nvdimm * nvdimm,unsigned int func)394*4882a593Smuzhiyun static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	if (nfit_mem && nfit_mem->family == NVDIMM_FAMILY_INTEL
399*4882a593Smuzhiyun 			&& func >= NVDIMM_INTEL_GET_SECURITY_STATE
400*4882a593Smuzhiyun 			&& func <= NVDIMM_INTEL_MASTER_SECURE_ERASE)
401*4882a593Smuzhiyun 		return IS_ENABLED(CONFIG_NFIT_SECURITY_DEBUG);
402*4882a593Smuzhiyun 	return true;
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun 
cmd_to_func(struct nfit_mem * nfit_mem,unsigned int cmd,struct nd_cmd_pkg * call_pkg,int * family)405*4882a593Smuzhiyun static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
406*4882a593Smuzhiyun 		struct nd_cmd_pkg *call_pkg, int *family)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun 	if (call_pkg) {
409*4882a593Smuzhiyun 		int i;
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 		if (nfit_mem && nfit_mem->family != call_pkg->nd_family)
412*4882a593Smuzhiyun 			return -ENOTTY;
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 		for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
415*4882a593Smuzhiyun 			if (call_pkg->nd_reserved2[i])
416*4882a593Smuzhiyun 				return -EINVAL;
417*4882a593Smuzhiyun 		*family = call_pkg->nd_family;
418*4882a593Smuzhiyun 		return call_pkg->nd_command;
419*4882a593Smuzhiyun 	}
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	/* In the !call_pkg case, bus commands == bus functions */
422*4882a593Smuzhiyun 	if (!nfit_mem)
423*4882a593Smuzhiyun 		return cmd;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	/* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */
426*4882a593Smuzhiyun 	if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
427*4882a593Smuzhiyun 		return cmd;
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	/*
430*4882a593Smuzhiyun 	 * Force function number validation to fail since 0 is never
431*4882a593Smuzhiyun 	 * published as a valid function in dsm_mask.
432*4882a593Smuzhiyun 	 */
433*4882a593Smuzhiyun 	return 0;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun 
acpi_nfit_ctl(struct nvdimm_bus_descriptor * nd_desc,struct nvdimm * nvdimm,unsigned int cmd,void * buf,unsigned int buf_len,int * cmd_rc)436*4882a593Smuzhiyun int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
437*4882a593Smuzhiyun 		unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
438*4882a593Smuzhiyun {
439*4882a593Smuzhiyun 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
440*4882a593Smuzhiyun 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
441*4882a593Smuzhiyun 	union acpi_object in_obj, in_buf, *out_obj;
442*4882a593Smuzhiyun 	const struct nd_cmd_desc *desc = NULL;
443*4882a593Smuzhiyun 	struct device *dev = acpi_desc->dev;
444*4882a593Smuzhiyun 	struct nd_cmd_pkg *call_pkg = NULL;
445*4882a593Smuzhiyun 	const char *cmd_name, *dimm_name;
446*4882a593Smuzhiyun 	unsigned long cmd_mask, dsm_mask;
447*4882a593Smuzhiyun 	u32 offset, fw_status = 0;
448*4882a593Smuzhiyun 	acpi_handle handle;
449*4882a593Smuzhiyun 	const guid_t *guid;
450*4882a593Smuzhiyun 	int func, rc, i;
451*4882a593Smuzhiyun 	int family = 0;
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	if (cmd_rc)
454*4882a593Smuzhiyun 		*cmd_rc = -EINVAL;
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	if (cmd == ND_CMD_CALL)
457*4882a593Smuzhiyun 		call_pkg = buf;
458*4882a593Smuzhiyun 	func = cmd_to_func(nfit_mem, cmd, call_pkg, &family);
459*4882a593Smuzhiyun 	if (func < 0)
460*4882a593Smuzhiyun 		return func;
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	if (nvdimm) {
463*4882a593Smuzhiyun 		struct acpi_device *adev = nfit_mem->adev;
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 		if (!adev)
466*4882a593Smuzhiyun 			return -ENOTTY;
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 		dimm_name = nvdimm_name(nvdimm);
469*4882a593Smuzhiyun 		cmd_name = nvdimm_cmd_name(cmd);
470*4882a593Smuzhiyun 		cmd_mask = nvdimm_cmd_mask(nvdimm);
471*4882a593Smuzhiyun 		dsm_mask = nfit_mem->dsm_mask;
472*4882a593Smuzhiyun 		desc = nd_cmd_dimm_desc(cmd);
473*4882a593Smuzhiyun 		guid = to_nfit_uuid(nfit_mem->family);
474*4882a593Smuzhiyun 		handle = adev->handle;
475*4882a593Smuzhiyun 	} else {
476*4882a593Smuzhiyun 		struct acpi_device *adev = to_acpi_dev(acpi_desc);
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 		cmd_name = nvdimm_bus_cmd_name(cmd);
479*4882a593Smuzhiyun 		cmd_mask = nd_desc->cmd_mask;
480*4882a593Smuzhiyun 		if (cmd == ND_CMD_CALL && call_pkg->nd_family) {
481*4882a593Smuzhiyun 			family = call_pkg->nd_family;
482*4882a593Smuzhiyun 			if (family > NVDIMM_BUS_FAMILY_MAX ||
483*4882a593Smuzhiyun 			    !test_bit(family, &nd_desc->bus_family_mask))
484*4882a593Smuzhiyun 				return -EINVAL;
485*4882a593Smuzhiyun 			family = array_index_nospec(family,
486*4882a593Smuzhiyun 						    NVDIMM_BUS_FAMILY_MAX + 1);
487*4882a593Smuzhiyun 			dsm_mask = acpi_desc->family_dsm_mask[family];
488*4882a593Smuzhiyun 			guid = to_nfit_bus_uuid(family);
489*4882a593Smuzhiyun 		} else {
490*4882a593Smuzhiyun 			dsm_mask = acpi_desc->bus_dsm_mask;
491*4882a593Smuzhiyun 			guid = to_nfit_uuid(NFIT_DEV_BUS);
492*4882a593Smuzhiyun 		}
493*4882a593Smuzhiyun 		desc = nd_cmd_bus_desc(cmd);
494*4882a593Smuzhiyun 		handle = adev->handle;
495*4882a593Smuzhiyun 		dimm_name = "bus";
496*4882a593Smuzhiyun 	}
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
499*4882a593Smuzhiyun 		return -ENOTTY;
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	/*
502*4882a593Smuzhiyun 	 * Check for a valid command.  For ND_CMD_CALL, we also have to
503*4882a593Smuzhiyun 	 * make sure that the DSM function is supported.
504*4882a593Smuzhiyun 	 */
505*4882a593Smuzhiyun 	if (cmd == ND_CMD_CALL &&
506*4882a593Smuzhiyun 	    (func > NVDIMM_CMD_MAX || !test_bit(func, &dsm_mask)))
507*4882a593Smuzhiyun 		return -ENOTTY;
508*4882a593Smuzhiyun 	else if (!test_bit(cmd, &cmd_mask))
509*4882a593Smuzhiyun 		return -ENOTTY;
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	in_obj.type = ACPI_TYPE_PACKAGE;
512*4882a593Smuzhiyun 	in_obj.package.count = 1;
513*4882a593Smuzhiyun 	in_obj.package.elements = &in_buf;
514*4882a593Smuzhiyun 	in_buf.type = ACPI_TYPE_BUFFER;
515*4882a593Smuzhiyun 	in_buf.buffer.pointer = buf;
516*4882a593Smuzhiyun 	in_buf.buffer.length = 0;
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	/* libnvdimm has already validated the input envelope */
519*4882a593Smuzhiyun 	for (i = 0; i < desc->in_num; i++)
520*4882a593Smuzhiyun 		in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
521*4882a593Smuzhiyun 				i, buf);
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	if (call_pkg) {
524*4882a593Smuzhiyun 		/* skip over package wrapper */
525*4882a593Smuzhiyun 		in_buf.buffer.pointer = (void *) &call_pkg->nd_payload;
526*4882a593Smuzhiyun 		in_buf.buffer.length = call_pkg->nd_size_in;
527*4882a593Smuzhiyun 	}
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	dev_dbg(dev, "%s cmd: %d: family: %d func: %d input length: %d\n",
530*4882a593Smuzhiyun 		dimm_name, cmd, family, func, in_buf.buffer.length);
531*4882a593Smuzhiyun 	if (payload_dumpable(nvdimm, func))
532*4882a593Smuzhiyun 		print_hex_dump_debug("nvdimm in  ", DUMP_PREFIX_OFFSET, 4, 4,
533*4882a593Smuzhiyun 				in_buf.buffer.pointer,
534*4882a593Smuzhiyun 				min_t(u32, 256, in_buf.buffer.length), true);
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	/* call the BIOS, prefer the named methods over _DSM if available */
537*4882a593Smuzhiyun 	if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE
538*4882a593Smuzhiyun 			&& test_bit(NFIT_MEM_LSR, &nfit_mem->flags))
539*4882a593Smuzhiyun 		out_obj = acpi_label_info(handle);
540*4882a593Smuzhiyun 	else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA
541*4882a593Smuzhiyun 			&& test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) {
542*4882a593Smuzhiyun 		struct nd_cmd_get_config_data_hdr *p = buf;
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 		out_obj = acpi_label_read(handle, p->in_offset, p->in_length);
545*4882a593Smuzhiyun 	} else if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA
546*4882a593Smuzhiyun 			&& test_bit(NFIT_MEM_LSW, &nfit_mem->flags)) {
547*4882a593Smuzhiyun 		struct nd_cmd_set_config_hdr *p = buf;
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 		out_obj = acpi_label_write(handle, p->in_offset, p->in_length,
550*4882a593Smuzhiyun 				p->in_buf);
551*4882a593Smuzhiyun 	} else {
552*4882a593Smuzhiyun 		u8 revid;
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 		if (nvdimm)
555*4882a593Smuzhiyun 			revid = nfit_dsm_revid(nfit_mem->family, func);
556*4882a593Smuzhiyun 		else
557*4882a593Smuzhiyun 			revid = 1;
558*4882a593Smuzhiyun 		out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
559*4882a593Smuzhiyun 	}
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	if (!out_obj) {
562*4882a593Smuzhiyun 		dev_dbg(dev, "%s _DSM failed cmd: %s\n", dimm_name, cmd_name);
563*4882a593Smuzhiyun 		return -EINVAL;
564*4882a593Smuzhiyun 	}
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	if (out_obj->type != ACPI_TYPE_BUFFER) {
567*4882a593Smuzhiyun 		dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n",
568*4882a593Smuzhiyun 				dimm_name, cmd_name, out_obj->type);
569*4882a593Smuzhiyun 		rc = -EINVAL;
570*4882a593Smuzhiyun 		goto out;
571*4882a593Smuzhiyun 	}
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
574*4882a593Smuzhiyun 			cmd_name, out_obj->buffer.length);
575*4882a593Smuzhiyun 	print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
576*4882a593Smuzhiyun 			out_obj->buffer.pointer,
577*4882a593Smuzhiyun 			min_t(u32, 128, out_obj->buffer.length), true);
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	if (call_pkg) {
580*4882a593Smuzhiyun 		call_pkg->nd_fw_size = out_obj->buffer.length;
581*4882a593Smuzhiyun 		memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
582*4882a593Smuzhiyun 			out_obj->buffer.pointer,
583*4882a593Smuzhiyun 			min(call_pkg->nd_fw_size, call_pkg->nd_size_out));
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 		ACPI_FREE(out_obj);
586*4882a593Smuzhiyun 		/*
587*4882a593Smuzhiyun 		 * Need to support FW function w/o known size in advance.
588*4882a593Smuzhiyun 		 * Caller can determine required size based upon nd_fw_size.
589*4882a593Smuzhiyun 		 * If we return an error (like elsewhere) then caller wouldn't
590*4882a593Smuzhiyun 		 * be able to rely upon data returned to make calculation.
591*4882a593Smuzhiyun 		 */
592*4882a593Smuzhiyun 		if (cmd_rc)
593*4882a593Smuzhiyun 			*cmd_rc = 0;
594*4882a593Smuzhiyun 		return 0;
595*4882a593Smuzhiyun 	}
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	for (i = 0, offset = 0; i < desc->out_num; i++) {
598*4882a593Smuzhiyun 		u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
599*4882a593Smuzhiyun 				(u32 *) out_obj->buffer.pointer,
600*4882a593Smuzhiyun 				out_obj->buffer.length - offset);
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 		if (offset + out_size > out_obj->buffer.length) {
603*4882a593Smuzhiyun 			dev_dbg(dev, "%s output object underflow cmd: %s field: %d\n",
604*4882a593Smuzhiyun 					dimm_name, cmd_name, i);
605*4882a593Smuzhiyun 			break;
606*4882a593Smuzhiyun 		}
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 		if (in_buf.buffer.length + offset + out_size > buf_len) {
609*4882a593Smuzhiyun 			dev_dbg(dev, "%s output overrun cmd: %s field: %d\n",
610*4882a593Smuzhiyun 					dimm_name, cmd_name, i);
611*4882a593Smuzhiyun 			rc = -ENXIO;
612*4882a593Smuzhiyun 			goto out;
613*4882a593Smuzhiyun 		}
614*4882a593Smuzhiyun 		memcpy(buf + in_buf.buffer.length + offset,
615*4882a593Smuzhiyun 				out_obj->buffer.pointer + offset, out_size);
616*4882a593Smuzhiyun 		offset += out_size;
617*4882a593Smuzhiyun 	}
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 	/*
620*4882a593Smuzhiyun 	 * Set fw_status for all the commands with a known format to be
621*4882a593Smuzhiyun 	 * later interpreted by xlat_status().
622*4882a593Smuzhiyun 	 */
623*4882a593Smuzhiyun 	if (i >= 1 && ((!nvdimm && cmd >= ND_CMD_ARS_CAP
624*4882a593Smuzhiyun 					&& cmd <= ND_CMD_CLEAR_ERROR)
625*4882a593Smuzhiyun 				|| (nvdimm && cmd >= ND_CMD_SMART
626*4882a593Smuzhiyun 					&& cmd <= ND_CMD_VENDOR)))
627*4882a593Smuzhiyun 		fw_status = *(u32 *) out_obj->buffer.pointer;
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	if (offset + in_buf.buffer.length < buf_len) {
630*4882a593Smuzhiyun 		if (i >= 1) {
631*4882a593Smuzhiyun 			/*
632*4882a593Smuzhiyun 			 * status valid, return the number of bytes left
633*4882a593Smuzhiyun 			 * unfilled in the output buffer
634*4882a593Smuzhiyun 			 */
635*4882a593Smuzhiyun 			rc = buf_len - offset - in_buf.buffer.length;
636*4882a593Smuzhiyun 			if (cmd_rc)
637*4882a593Smuzhiyun 				*cmd_rc = xlat_status(nvdimm, buf, cmd,
638*4882a593Smuzhiyun 						fw_status);
639*4882a593Smuzhiyun 		} else {
640*4882a593Smuzhiyun 			dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
641*4882a593Smuzhiyun 					__func__, dimm_name, cmd_name, buf_len,
642*4882a593Smuzhiyun 					offset);
643*4882a593Smuzhiyun 			rc = -ENXIO;
644*4882a593Smuzhiyun 		}
645*4882a593Smuzhiyun 	} else {
646*4882a593Smuzhiyun 		rc = 0;
647*4882a593Smuzhiyun 		if (cmd_rc)
648*4882a593Smuzhiyun 			*cmd_rc = xlat_status(nvdimm, buf, cmd, fw_status);
649*4882a593Smuzhiyun 	}
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun  out:
652*4882a593Smuzhiyun 	ACPI_FREE(out_obj);
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	return rc;
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(acpi_nfit_ctl);
657*4882a593Smuzhiyun 
spa_type_name(u16 type)658*4882a593Smuzhiyun static const char *spa_type_name(u16 type)
659*4882a593Smuzhiyun {
660*4882a593Smuzhiyun 	static const char *to_name[] = {
661*4882a593Smuzhiyun 		[NFIT_SPA_VOLATILE] = "volatile",
662*4882a593Smuzhiyun 		[NFIT_SPA_PM] = "pmem",
663*4882a593Smuzhiyun 		[NFIT_SPA_DCR] = "dimm-control-region",
664*4882a593Smuzhiyun 		[NFIT_SPA_BDW] = "block-data-window",
665*4882a593Smuzhiyun 		[NFIT_SPA_VDISK] = "volatile-disk",
666*4882a593Smuzhiyun 		[NFIT_SPA_VCD] = "volatile-cd",
667*4882a593Smuzhiyun 		[NFIT_SPA_PDISK] = "persistent-disk",
668*4882a593Smuzhiyun 		[NFIT_SPA_PCD] = "persistent-cd",
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	};
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	if (type > NFIT_SPA_PCD)
673*4882a593Smuzhiyun 		return "unknown";
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	return to_name[type];
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun 
nfit_spa_type(struct acpi_nfit_system_address * spa)678*4882a593Smuzhiyun int nfit_spa_type(struct acpi_nfit_system_address *spa)
679*4882a593Smuzhiyun {
680*4882a593Smuzhiyun 	int i;
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	for (i = 0; i < NFIT_UUID_MAX; i++)
683*4882a593Smuzhiyun 		if (guid_equal(to_nfit_uuid(i), (guid_t *)&spa->range_guid))
684*4882a593Smuzhiyun 			return i;
685*4882a593Smuzhiyun 	return -1;
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun 
add_spa(struct acpi_nfit_desc * acpi_desc,struct nfit_table_prev * prev,struct acpi_nfit_system_address * spa)688*4882a593Smuzhiyun static bool add_spa(struct acpi_nfit_desc *acpi_desc,
689*4882a593Smuzhiyun 		struct nfit_table_prev *prev,
690*4882a593Smuzhiyun 		struct acpi_nfit_system_address *spa)
691*4882a593Smuzhiyun {
692*4882a593Smuzhiyun 	struct device *dev = acpi_desc->dev;
693*4882a593Smuzhiyun 	struct nfit_spa *nfit_spa;
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 	if (spa->header.length != sizeof(*spa))
696*4882a593Smuzhiyun 		return false;
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 	list_for_each_entry(nfit_spa, &prev->spas, list) {
699*4882a593Smuzhiyun 		if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) {
700*4882a593Smuzhiyun 			list_move_tail(&nfit_spa->list, &acpi_desc->spas);
701*4882a593Smuzhiyun 			return true;
702*4882a593Smuzhiyun 		}
703*4882a593Smuzhiyun 	}
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 	nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa),
706*4882a593Smuzhiyun 			GFP_KERNEL);
707*4882a593Smuzhiyun 	if (!nfit_spa)
708*4882a593Smuzhiyun 		return false;
709*4882a593Smuzhiyun 	INIT_LIST_HEAD(&nfit_spa->list);
710*4882a593Smuzhiyun 	memcpy(nfit_spa->spa, spa, sizeof(*spa));
711*4882a593Smuzhiyun 	list_add_tail(&nfit_spa->list, &acpi_desc->spas);
712*4882a593Smuzhiyun 	dev_dbg(dev, "spa index: %d type: %s\n",
713*4882a593Smuzhiyun 			spa->range_index,
714*4882a593Smuzhiyun 			spa_type_name(nfit_spa_type(spa)));
715*4882a593Smuzhiyun 	return true;
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun 
add_memdev(struct acpi_nfit_desc * acpi_desc,struct nfit_table_prev * prev,struct acpi_nfit_memory_map * memdev)718*4882a593Smuzhiyun static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
719*4882a593Smuzhiyun 		struct nfit_table_prev *prev,
720*4882a593Smuzhiyun 		struct acpi_nfit_memory_map *memdev)
721*4882a593Smuzhiyun {
722*4882a593Smuzhiyun 	struct device *dev = acpi_desc->dev;
723*4882a593Smuzhiyun 	struct nfit_memdev *nfit_memdev;
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	if (memdev->header.length != sizeof(*memdev))
726*4882a593Smuzhiyun 		return false;
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 	list_for_each_entry(nfit_memdev, &prev->memdevs, list)
729*4882a593Smuzhiyun 		if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) {
730*4882a593Smuzhiyun 			list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
731*4882a593Smuzhiyun 			return true;
732*4882a593Smuzhiyun 		}
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev),
735*4882a593Smuzhiyun 			GFP_KERNEL);
736*4882a593Smuzhiyun 	if (!nfit_memdev)
737*4882a593Smuzhiyun 		return false;
738*4882a593Smuzhiyun 	INIT_LIST_HEAD(&nfit_memdev->list);
739*4882a593Smuzhiyun 	memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev));
740*4882a593Smuzhiyun 	list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
741*4882a593Smuzhiyun 	dev_dbg(dev, "memdev handle: %#x spa: %d dcr: %d flags: %#x\n",
742*4882a593Smuzhiyun 			memdev->device_handle, memdev->range_index,
743*4882a593Smuzhiyun 			memdev->region_index, memdev->flags);
744*4882a593Smuzhiyun 	return true;
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun 
nfit_get_smbios_id(u32 device_handle,u16 * flags)747*4882a593Smuzhiyun int nfit_get_smbios_id(u32 device_handle, u16 *flags)
748*4882a593Smuzhiyun {
749*4882a593Smuzhiyun 	struct acpi_nfit_memory_map *memdev;
750*4882a593Smuzhiyun 	struct acpi_nfit_desc *acpi_desc;
751*4882a593Smuzhiyun 	struct nfit_mem *nfit_mem;
752*4882a593Smuzhiyun 	u16 physical_id;
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 	mutex_lock(&acpi_desc_lock);
755*4882a593Smuzhiyun 	list_for_each_entry(acpi_desc, &acpi_descs, list) {
756*4882a593Smuzhiyun 		mutex_lock(&acpi_desc->init_mutex);
757*4882a593Smuzhiyun 		list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
758*4882a593Smuzhiyun 			memdev = __to_nfit_memdev(nfit_mem);
759*4882a593Smuzhiyun 			if (memdev->device_handle == device_handle) {
760*4882a593Smuzhiyun 				*flags = memdev->flags;
761*4882a593Smuzhiyun 				physical_id = memdev->physical_id;
762*4882a593Smuzhiyun 				mutex_unlock(&acpi_desc->init_mutex);
763*4882a593Smuzhiyun 				mutex_unlock(&acpi_desc_lock);
764*4882a593Smuzhiyun 				return physical_id;
765*4882a593Smuzhiyun 			}
766*4882a593Smuzhiyun 		}
767*4882a593Smuzhiyun 		mutex_unlock(&acpi_desc->init_mutex);
768*4882a593Smuzhiyun 	}
769*4882a593Smuzhiyun 	mutex_unlock(&acpi_desc_lock);
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	return -ENODEV;
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nfit_get_smbios_id);
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun /*
776*4882a593Smuzhiyun  * An implementation may provide a truncated control region if no block windows
777*4882a593Smuzhiyun  * are defined.
778*4882a593Smuzhiyun  */
sizeof_dcr(struct acpi_nfit_control_region * dcr)779*4882a593Smuzhiyun static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr)
780*4882a593Smuzhiyun {
781*4882a593Smuzhiyun 	if (dcr->header.length < offsetof(struct acpi_nfit_control_region,
782*4882a593Smuzhiyun 				window_size))
783*4882a593Smuzhiyun 		return 0;
784*4882a593Smuzhiyun 	if (dcr->windows)
785*4882a593Smuzhiyun 		return sizeof(*dcr);
786*4882a593Smuzhiyun 	return offsetof(struct acpi_nfit_control_region, window_size);
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun 
add_dcr(struct acpi_nfit_desc * acpi_desc,struct nfit_table_prev * prev,struct acpi_nfit_control_region * dcr)789*4882a593Smuzhiyun static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
790*4882a593Smuzhiyun 		struct nfit_table_prev *prev,
791*4882a593Smuzhiyun 		struct acpi_nfit_control_region *dcr)
792*4882a593Smuzhiyun {
793*4882a593Smuzhiyun 	struct device *dev = acpi_desc->dev;
794*4882a593Smuzhiyun 	struct nfit_dcr *nfit_dcr;
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 	if (!sizeof_dcr(dcr))
797*4882a593Smuzhiyun 		return false;
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 	list_for_each_entry(nfit_dcr, &prev->dcrs, list)
800*4882a593Smuzhiyun 		if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) {
801*4882a593Smuzhiyun 			list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
802*4882a593Smuzhiyun 			return true;
803*4882a593Smuzhiyun 		}
804*4882a593Smuzhiyun 
805*4882a593Smuzhiyun 	nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr),
806*4882a593Smuzhiyun 			GFP_KERNEL);
807*4882a593Smuzhiyun 	if (!nfit_dcr)
808*4882a593Smuzhiyun 		return false;
809*4882a593Smuzhiyun 	INIT_LIST_HEAD(&nfit_dcr->list);
810*4882a593Smuzhiyun 	memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr));
811*4882a593Smuzhiyun 	list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
812*4882a593Smuzhiyun 	dev_dbg(dev, "dcr index: %d windows: %d\n",
813*4882a593Smuzhiyun 			dcr->region_index, dcr->windows);
814*4882a593Smuzhiyun 	return true;
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun 
add_bdw(struct acpi_nfit_desc * acpi_desc,struct nfit_table_prev * prev,struct acpi_nfit_data_region * bdw)817*4882a593Smuzhiyun static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
818*4882a593Smuzhiyun 		struct nfit_table_prev *prev,
819*4882a593Smuzhiyun 		struct acpi_nfit_data_region *bdw)
820*4882a593Smuzhiyun {
821*4882a593Smuzhiyun 	struct device *dev = acpi_desc->dev;
822*4882a593Smuzhiyun 	struct nfit_bdw *nfit_bdw;
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 	if (bdw->header.length != sizeof(*bdw))
825*4882a593Smuzhiyun 		return false;
826*4882a593Smuzhiyun 	list_for_each_entry(nfit_bdw, &prev->bdws, list)
827*4882a593Smuzhiyun 		if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) {
828*4882a593Smuzhiyun 			list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
829*4882a593Smuzhiyun 			return true;
830*4882a593Smuzhiyun 		}
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 	nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw),
833*4882a593Smuzhiyun 			GFP_KERNEL);
834*4882a593Smuzhiyun 	if (!nfit_bdw)
835*4882a593Smuzhiyun 		return false;
836*4882a593Smuzhiyun 	INIT_LIST_HEAD(&nfit_bdw->list);
837*4882a593Smuzhiyun 	memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw));
838*4882a593Smuzhiyun 	list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
839*4882a593Smuzhiyun 	dev_dbg(dev, "bdw dcr: %d windows: %d\n",
840*4882a593Smuzhiyun 			bdw->region_index, bdw->windows);
841*4882a593Smuzhiyun 	return true;
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun 
sizeof_idt(struct acpi_nfit_interleave * idt)844*4882a593Smuzhiyun static size_t sizeof_idt(struct acpi_nfit_interleave *idt)
845*4882a593Smuzhiyun {
846*4882a593Smuzhiyun 	if (idt->header.length < sizeof(*idt))
847*4882a593Smuzhiyun 		return 0;
848*4882a593Smuzhiyun 	return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1);
849*4882a593Smuzhiyun }
850*4882a593Smuzhiyun 
add_idt(struct acpi_nfit_desc * acpi_desc,struct nfit_table_prev * prev,struct acpi_nfit_interleave * idt)851*4882a593Smuzhiyun static bool add_idt(struct acpi_nfit_desc *acpi_desc,
852*4882a593Smuzhiyun 		struct nfit_table_prev *prev,
853*4882a593Smuzhiyun 		struct acpi_nfit_interleave *idt)
854*4882a593Smuzhiyun {
855*4882a593Smuzhiyun 	struct device *dev = acpi_desc->dev;
856*4882a593Smuzhiyun 	struct nfit_idt *nfit_idt;
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 	if (!sizeof_idt(idt))
859*4882a593Smuzhiyun 		return false;
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	list_for_each_entry(nfit_idt, &prev->idts, list) {
862*4882a593Smuzhiyun 		if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt))
863*4882a593Smuzhiyun 			continue;
864*4882a593Smuzhiyun 
865*4882a593Smuzhiyun 		if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) {
866*4882a593Smuzhiyun 			list_move_tail(&nfit_idt->list, &acpi_desc->idts);
867*4882a593Smuzhiyun 			return true;
868*4882a593Smuzhiyun 		}
869*4882a593Smuzhiyun 	}
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 	nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt),
872*4882a593Smuzhiyun 			GFP_KERNEL);
873*4882a593Smuzhiyun 	if (!nfit_idt)
874*4882a593Smuzhiyun 		return false;
875*4882a593Smuzhiyun 	INIT_LIST_HEAD(&nfit_idt->list);
876*4882a593Smuzhiyun 	memcpy(nfit_idt->idt, idt, sizeof_idt(idt));
877*4882a593Smuzhiyun 	list_add_tail(&nfit_idt->list, &acpi_desc->idts);
878*4882a593Smuzhiyun 	dev_dbg(dev, "idt index: %d num_lines: %d\n",
879*4882a593Smuzhiyun 			idt->interleave_index, idt->line_count);
880*4882a593Smuzhiyun 	return true;
881*4882a593Smuzhiyun }
882*4882a593Smuzhiyun 
sizeof_flush(struct acpi_nfit_flush_address * flush)883*4882a593Smuzhiyun static size_t sizeof_flush(struct acpi_nfit_flush_address *flush)
884*4882a593Smuzhiyun {
885*4882a593Smuzhiyun 	if (flush->header.length < sizeof(*flush))
886*4882a593Smuzhiyun 		return 0;
887*4882a593Smuzhiyun 	return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1);
888*4882a593Smuzhiyun }
889*4882a593Smuzhiyun 
add_flush(struct acpi_nfit_desc * acpi_desc,struct nfit_table_prev * prev,struct acpi_nfit_flush_address * flush)890*4882a593Smuzhiyun static bool add_flush(struct acpi_nfit_desc *acpi_desc,
891*4882a593Smuzhiyun 		struct nfit_table_prev *prev,
892*4882a593Smuzhiyun 		struct acpi_nfit_flush_address *flush)
893*4882a593Smuzhiyun {
894*4882a593Smuzhiyun 	struct device *dev = acpi_desc->dev;
895*4882a593Smuzhiyun 	struct nfit_flush *nfit_flush;
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 	if (!sizeof_flush(flush))
898*4882a593Smuzhiyun 		return false;
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 	list_for_each_entry(nfit_flush, &prev->flushes, list) {
901*4882a593Smuzhiyun 		if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush))
902*4882a593Smuzhiyun 			continue;
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 		if (memcmp(nfit_flush->flush, flush,
905*4882a593Smuzhiyun 					sizeof_flush(flush)) == 0) {
906*4882a593Smuzhiyun 			list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
907*4882a593Smuzhiyun 			return true;
908*4882a593Smuzhiyun 		}
909*4882a593Smuzhiyun 	}
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 	nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush)
912*4882a593Smuzhiyun 			+ sizeof_flush(flush), GFP_KERNEL);
913*4882a593Smuzhiyun 	if (!nfit_flush)
914*4882a593Smuzhiyun 		return false;
915*4882a593Smuzhiyun 	INIT_LIST_HEAD(&nfit_flush->list);
916*4882a593Smuzhiyun 	memcpy(nfit_flush->flush, flush, sizeof_flush(flush));
917*4882a593Smuzhiyun 	list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
918*4882a593Smuzhiyun 	dev_dbg(dev, "nfit_flush handle: %d hint_count: %d\n",
919*4882a593Smuzhiyun 			flush->device_handle, flush->hint_count);
920*4882a593Smuzhiyun 	return true;
921*4882a593Smuzhiyun }
922*4882a593Smuzhiyun 
add_platform_cap(struct acpi_nfit_desc * acpi_desc,struct acpi_nfit_capabilities * pcap)923*4882a593Smuzhiyun static bool add_platform_cap(struct acpi_nfit_desc *acpi_desc,
924*4882a593Smuzhiyun 		struct acpi_nfit_capabilities *pcap)
925*4882a593Smuzhiyun {
926*4882a593Smuzhiyun 	struct device *dev = acpi_desc->dev;
927*4882a593Smuzhiyun 	u32 mask;
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	mask = (1 << (pcap->highest_capability + 1)) - 1;
930*4882a593Smuzhiyun 	acpi_desc->platform_cap = pcap->capabilities & mask;
931*4882a593Smuzhiyun 	dev_dbg(dev, "cap: %#x\n", acpi_desc->platform_cap);
932*4882a593Smuzhiyun 	return true;
933*4882a593Smuzhiyun }
934*4882a593Smuzhiyun 
add_table(struct acpi_nfit_desc * acpi_desc,struct nfit_table_prev * prev,void * table,const void * end)935*4882a593Smuzhiyun static void *add_table(struct acpi_nfit_desc *acpi_desc,
936*4882a593Smuzhiyun 		struct nfit_table_prev *prev, void *table, const void *end)
937*4882a593Smuzhiyun {
938*4882a593Smuzhiyun 	struct device *dev = acpi_desc->dev;
939*4882a593Smuzhiyun 	struct acpi_nfit_header *hdr;
940*4882a593Smuzhiyun 	void *err = ERR_PTR(-ENOMEM);
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 	if (table >= end)
943*4882a593Smuzhiyun 		return NULL;
944*4882a593Smuzhiyun 
945*4882a593Smuzhiyun 	hdr = table;
946*4882a593Smuzhiyun 	if (!hdr->length) {
947*4882a593Smuzhiyun 		dev_warn(dev, "found a zero length table '%d' parsing nfit\n",
948*4882a593Smuzhiyun 			hdr->type);
949*4882a593Smuzhiyun 		return NULL;
950*4882a593Smuzhiyun 	}
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun 	switch (hdr->type) {
953*4882a593Smuzhiyun 	case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
954*4882a593Smuzhiyun 		if (!add_spa(acpi_desc, prev, table))
955*4882a593Smuzhiyun 			return err;
956*4882a593Smuzhiyun 		break;
957*4882a593Smuzhiyun 	case ACPI_NFIT_TYPE_MEMORY_MAP:
958*4882a593Smuzhiyun 		if (!add_memdev(acpi_desc, prev, table))
959*4882a593Smuzhiyun 			return err;
960*4882a593Smuzhiyun 		break;
961*4882a593Smuzhiyun 	case ACPI_NFIT_TYPE_CONTROL_REGION:
962*4882a593Smuzhiyun 		if (!add_dcr(acpi_desc, prev, table))
963*4882a593Smuzhiyun 			return err;
964*4882a593Smuzhiyun 		break;
965*4882a593Smuzhiyun 	case ACPI_NFIT_TYPE_DATA_REGION:
966*4882a593Smuzhiyun 		if (!add_bdw(acpi_desc, prev, table))
967*4882a593Smuzhiyun 			return err;
968*4882a593Smuzhiyun 		break;
969*4882a593Smuzhiyun 	case ACPI_NFIT_TYPE_INTERLEAVE:
970*4882a593Smuzhiyun 		if (!add_idt(acpi_desc, prev, table))
971*4882a593Smuzhiyun 			return err;
972*4882a593Smuzhiyun 		break;
973*4882a593Smuzhiyun 	case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
974*4882a593Smuzhiyun 		if (!add_flush(acpi_desc, prev, table))
975*4882a593Smuzhiyun 			return err;
976*4882a593Smuzhiyun 		break;
977*4882a593Smuzhiyun 	case ACPI_NFIT_TYPE_SMBIOS:
978*4882a593Smuzhiyun 		dev_dbg(dev, "smbios\n");
979*4882a593Smuzhiyun 		break;
980*4882a593Smuzhiyun 	case ACPI_NFIT_TYPE_CAPABILITIES:
981*4882a593Smuzhiyun 		if (!add_platform_cap(acpi_desc, table))
982*4882a593Smuzhiyun 			return err;
983*4882a593Smuzhiyun 		break;
984*4882a593Smuzhiyun 	default:
985*4882a593Smuzhiyun 		dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
986*4882a593Smuzhiyun 		break;
987*4882a593Smuzhiyun 	}
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun 	return table + hdr->length;
990*4882a593Smuzhiyun }
991*4882a593Smuzhiyun 
nfit_mem_find_spa_bdw(struct acpi_nfit_desc * acpi_desc,struct nfit_mem * nfit_mem)992*4882a593Smuzhiyun static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
993*4882a593Smuzhiyun 		struct nfit_mem *nfit_mem)
994*4882a593Smuzhiyun {
995*4882a593Smuzhiyun 	u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
996*4882a593Smuzhiyun 	u16 dcr = nfit_mem->dcr->region_index;
997*4882a593Smuzhiyun 	struct nfit_spa *nfit_spa;
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 	list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
1000*4882a593Smuzhiyun 		u16 range_index = nfit_spa->spa->range_index;
1001*4882a593Smuzhiyun 		int type = nfit_spa_type(nfit_spa->spa);
1002*4882a593Smuzhiyun 		struct nfit_memdev *nfit_memdev;
1003*4882a593Smuzhiyun 
1004*4882a593Smuzhiyun 		if (type != NFIT_SPA_BDW)
1005*4882a593Smuzhiyun 			continue;
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun 		list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1008*4882a593Smuzhiyun 			if (nfit_memdev->memdev->range_index != range_index)
1009*4882a593Smuzhiyun 				continue;
1010*4882a593Smuzhiyun 			if (nfit_memdev->memdev->device_handle != device_handle)
1011*4882a593Smuzhiyun 				continue;
1012*4882a593Smuzhiyun 			if (nfit_memdev->memdev->region_index != dcr)
1013*4882a593Smuzhiyun 				continue;
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun 			nfit_mem->spa_bdw = nfit_spa->spa;
1016*4882a593Smuzhiyun 			return;
1017*4882a593Smuzhiyun 		}
1018*4882a593Smuzhiyun 	}
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun 	dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n",
1021*4882a593Smuzhiyun 			nfit_mem->spa_dcr->range_index);
1022*4882a593Smuzhiyun 	nfit_mem->bdw = NULL;
1023*4882a593Smuzhiyun }
1024*4882a593Smuzhiyun 
nfit_mem_init_bdw(struct acpi_nfit_desc * acpi_desc,struct nfit_mem * nfit_mem,struct acpi_nfit_system_address * spa)1025*4882a593Smuzhiyun static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
1026*4882a593Smuzhiyun 		struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
1027*4882a593Smuzhiyun {
1028*4882a593Smuzhiyun 	u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
1029*4882a593Smuzhiyun 	struct nfit_memdev *nfit_memdev;
1030*4882a593Smuzhiyun 	struct nfit_bdw *nfit_bdw;
1031*4882a593Smuzhiyun 	struct nfit_idt *nfit_idt;
1032*4882a593Smuzhiyun 	u16 idt_idx, range_index;
1033*4882a593Smuzhiyun 
1034*4882a593Smuzhiyun 	list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
1035*4882a593Smuzhiyun 		if (nfit_bdw->bdw->region_index != dcr)
1036*4882a593Smuzhiyun 			continue;
1037*4882a593Smuzhiyun 		nfit_mem->bdw = nfit_bdw->bdw;
1038*4882a593Smuzhiyun 		break;
1039*4882a593Smuzhiyun 	}
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun 	if (!nfit_mem->bdw)
1042*4882a593Smuzhiyun 		return;
1043*4882a593Smuzhiyun 
1044*4882a593Smuzhiyun 	nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
1045*4882a593Smuzhiyun 
1046*4882a593Smuzhiyun 	if (!nfit_mem->spa_bdw)
1047*4882a593Smuzhiyun 		return;
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun 	range_index = nfit_mem->spa_bdw->range_index;
1050*4882a593Smuzhiyun 	list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1051*4882a593Smuzhiyun 		if (nfit_memdev->memdev->range_index != range_index ||
1052*4882a593Smuzhiyun 				nfit_memdev->memdev->region_index != dcr)
1053*4882a593Smuzhiyun 			continue;
1054*4882a593Smuzhiyun 		nfit_mem->memdev_bdw = nfit_memdev->memdev;
1055*4882a593Smuzhiyun 		idt_idx = nfit_memdev->memdev->interleave_index;
1056*4882a593Smuzhiyun 		list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
1057*4882a593Smuzhiyun 			if (nfit_idt->idt->interleave_index != idt_idx)
1058*4882a593Smuzhiyun 				continue;
1059*4882a593Smuzhiyun 			nfit_mem->idt_bdw = nfit_idt->idt;
1060*4882a593Smuzhiyun 			break;
1061*4882a593Smuzhiyun 		}
1062*4882a593Smuzhiyun 		break;
1063*4882a593Smuzhiyun 	}
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun 
__nfit_mem_init(struct acpi_nfit_desc * acpi_desc,struct acpi_nfit_system_address * spa)1066*4882a593Smuzhiyun static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc,
1067*4882a593Smuzhiyun 		struct acpi_nfit_system_address *spa)
1068*4882a593Smuzhiyun {
1069*4882a593Smuzhiyun 	struct nfit_mem *nfit_mem, *found;
1070*4882a593Smuzhiyun 	struct nfit_memdev *nfit_memdev;
1071*4882a593Smuzhiyun 	int type = spa ? nfit_spa_type(spa) : 0;
1072*4882a593Smuzhiyun 
1073*4882a593Smuzhiyun 	switch (type) {
1074*4882a593Smuzhiyun 	case NFIT_SPA_DCR:
1075*4882a593Smuzhiyun 	case NFIT_SPA_PM:
1076*4882a593Smuzhiyun 		break;
1077*4882a593Smuzhiyun 	default:
1078*4882a593Smuzhiyun 		if (spa)
1079*4882a593Smuzhiyun 			return 0;
1080*4882a593Smuzhiyun 	}
1081*4882a593Smuzhiyun 
1082*4882a593Smuzhiyun 	/*
1083*4882a593Smuzhiyun 	 * This loop runs in two modes, when a dimm is mapped the loop
1084*4882a593Smuzhiyun 	 * adds memdev associations to an existing dimm, or creates a
1085*4882a593Smuzhiyun 	 * dimm. In the unmapped dimm case this loop sweeps for memdev
1086*4882a593Smuzhiyun 	 * instances with an invalid / zero range_index and adds those
1087*4882a593Smuzhiyun 	 * dimms without spa associations.
1088*4882a593Smuzhiyun 	 */
1089*4882a593Smuzhiyun 	list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1090*4882a593Smuzhiyun 		struct nfit_flush *nfit_flush;
1091*4882a593Smuzhiyun 		struct nfit_dcr *nfit_dcr;
1092*4882a593Smuzhiyun 		u32 device_handle;
1093*4882a593Smuzhiyun 		u16 dcr;
1094*4882a593Smuzhiyun 
1095*4882a593Smuzhiyun 		if (spa && nfit_memdev->memdev->range_index != spa->range_index)
1096*4882a593Smuzhiyun 			continue;
1097*4882a593Smuzhiyun 		if (!spa && nfit_memdev->memdev->range_index)
1098*4882a593Smuzhiyun 			continue;
1099*4882a593Smuzhiyun 		found = NULL;
1100*4882a593Smuzhiyun 		dcr = nfit_memdev->memdev->region_index;
1101*4882a593Smuzhiyun 		device_handle = nfit_memdev->memdev->device_handle;
1102*4882a593Smuzhiyun 		list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
1103*4882a593Smuzhiyun 			if (__to_nfit_memdev(nfit_mem)->device_handle
1104*4882a593Smuzhiyun 					== device_handle) {
1105*4882a593Smuzhiyun 				found = nfit_mem;
1106*4882a593Smuzhiyun 				break;
1107*4882a593Smuzhiyun 			}
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun 		if (found)
1110*4882a593Smuzhiyun 			nfit_mem = found;
1111*4882a593Smuzhiyun 		else {
1112*4882a593Smuzhiyun 			nfit_mem = devm_kzalloc(acpi_desc->dev,
1113*4882a593Smuzhiyun 					sizeof(*nfit_mem), GFP_KERNEL);
1114*4882a593Smuzhiyun 			if (!nfit_mem)
1115*4882a593Smuzhiyun 				return -ENOMEM;
1116*4882a593Smuzhiyun 			INIT_LIST_HEAD(&nfit_mem->list);
1117*4882a593Smuzhiyun 			nfit_mem->acpi_desc = acpi_desc;
1118*4882a593Smuzhiyun 			list_add(&nfit_mem->list, &acpi_desc->dimms);
1119*4882a593Smuzhiyun 		}
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun 		list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
1122*4882a593Smuzhiyun 			if (nfit_dcr->dcr->region_index != dcr)
1123*4882a593Smuzhiyun 				continue;
1124*4882a593Smuzhiyun 			/*
1125*4882a593Smuzhiyun 			 * Record the control region for the dimm.  For
1126*4882a593Smuzhiyun 			 * the ACPI 6.1 case, where there are separate
1127*4882a593Smuzhiyun 			 * control regions for the pmem vs blk
1128*4882a593Smuzhiyun 			 * interfaces, be sure to record the extended
1129*4882a593Smuzhiyun 			 * blk details.
1130*4882a593Smuzhiyun 			 */
1131*4882a593Smuzhiyun 			if (!nfit_mem->dcr)
1132*4882a593Smuzhiyun 				nfit_mem->dcr = nfit_dcr->dcr;
1133*4882a593Smuzhiyun 			else if (nfit_mem->dcr->windows == 0
1134*4882a593Smuzhiyun 					&& nfit_dcr->dcr->windows)
1135*4882a593Smuzhiyun 				nfit_mem->dcr = nfit_dcr->dcr;
1136*4882a593Smuzhiyun 			break;
1137*4882a593Smuzhiyun 		}
1138*4882a593Smuzhiyun 
1139*4882a593Smuzhiyun 		list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
1140*4882a593Smuzhiyun 			struct acpi_nfit_flush_address *flush;
1141*4882a593Smuzhiyun 			u16 i;
1142*4882a593Smuzhiyun 
1143*4882a593Smuzhiyun 			if (nfit_flush->flush->device_handle != device_handle)
1144*4882a593Smuzhiyun 				continue;
1145*4882a593Smuzhiyun 			nfit_mem->nfit_flush = nfit_flush;
1146*4882a593Smuzhiyun 			flush = nfit_flush->flush;
1147*4882a593Smuzhiyun 			nfit_mem->flush_wpq = devm_kcalloc(acpi_desc->dev,
1148*4882a593Smuzhiyun 					flush->hint_count,
1149*4882a593Smuzhiyun 					sizeof(struct resource),
1150*4882a593Smuzhiyun 					GFP_KERNEL);
1151*4882a593Smuzhiyun 			if (!nfit_mem->flush_wpq)
1152*4882a593Smuzhiyun 				return -ENOMEM;
1153*4882a593Smuzhiyun 			for (i = 0; i < flush->hint_count; i++) {
1154*4882a593Smuzhiyun 				struct resource *res = &nfit_mem->flush_wpq[i];
1155*4882a593Smuzhiyun 
1156*4882a593Smuzhiyun 				res->start = flush->hint_address[i];
1157*4882a593Smuzhiyun 				res->end = res->start + 8 - 1;
1158*4882a593Smuzhiyun 			}
1159*4882a593Smuzhiyun 			break;
1160*4882a593Smuzhiyun 		}
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun 		if (dcr && !nfit_mem->dcr) {
1163*4882a593Smuzhiyun 			dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
1164*4882a593Smuzhiyun 					spa->range_index, dcr);
1165*4882a593Smuzhiyun 			return -ENODEV;
1166*4882a593Smuzhiyun 		}
1167*4882a593Smuzhiyun 
1168*4882a593Smuzhiyun 		if (type == NFIT_SPA_DCR) {
1169*4882a593Smuzhiyun 			struct nfit_idt *nfit_idt;
1170*4882a593Smuzhiyun 			u16 idt_idx;
1171*4882a593Smuzhiyun 
1172*4882a593Smuzhiyun 			/* multiple dimms may share a SPA when interleaved */
1173*4882a593Smuzhiyun 			nfit_mem->spa_dcr = spa;
1174*4882a593Smuzhiyun 			nfit_mem->memdev_dcr = nfit_memdev->memdev;
1175*4882a593Smuzhiyun 			idt_idx = nfit_memdev->memdev->interleave_index;
1176*4882a593Smuzhiyun 			list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
1177*4882a593Smuzhiyun 				if (nfit_idt->idt->interleave_index != idt_idx)
1178*4882a593Smuzhiyun 					continue;
1179*4882a593Smuzhiyun 				nfit_mem->idt_dcr = nfit_idt->idt;
1180*4882a593Smuzhiyun 				break;
1181*4882a593Smuzhiyun 			}
1182*4882a593Smuzhiyun 			nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
1183*4882a593Smuzhiyun 		} else if (type == NFIT_SPA_PM) {
1184*4882a593Smuzhiyun 			/*
1185*4882a593Smuzhiyun 			 * A single dimm may belong to multiple SPA-PM
1186*4882a593Smuzhiyun 			 * ranges, record at least one in addition to
1187*4882a593Smuzhiyun 			 * any SPA-DCR range.
1188*4882a593Smuzhiyun 			 */
1189*4882a593Smuzhiyun 			nfit_mem->memdev_pmem = nfit_memdev->memdev;
1190*4882a593Smuzhiyun 		} else
1191*4882a593Smuzhiyun 			nfit_mem->memdev_dcr = nfit_memdev->memdev;
1192*4882a593Smuzhiyun 	}
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 	return 0;
1195*4882a593Smuzhiyun }
1196*4882a593Smuzhiyun 
nfit_mem_cmp(void * priv,struct list_head * _a,struct list_head * _b)1197*4882a593Smuzhiyun static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
1198*4882a593Smuzhiyun {
1199*4882a593Smuzhiyun 	struct nfit_mem *a = container_of(_a, typeof(*a), list);
1200*4882a593Smuzhiyun 	struct nfit_mem *b = container_of(_b, typeof(*b), list);
1201*4882a593Smuzhiyun 	u32 handleA, handleB;
1202*4882a593Smuzhiyun 
1203*4882a593Smuzhiyun 	handleA = __to_nfit_memdev(a)->device_handle;
1204*4882a593Smuzhiyun 	handleB = __to_nfit_memdev(b)->device_handle;
1205*4882a593Smuzhiyun 	if (handleA < handleB)
1206*4882a593Smuzhiyun 		return -1;
1207*4882a593Smuzhiyun 	else if (handleA > handleB)
1208*4882a593Smuzhiyun 		return 1;
1209*4882a593Smuzhiyun 	return 0;
1210*4882a593Smuzhiyun }
1211*4882a593Smuzhiyun 
nfit_mem_init(struct acpi_nfit_desc * acpi_desc)1212*4882a593Smuzhiyun static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
1213*4882a593Smuzhiyun {
1214*4882a593Smuzhiyun 	struct nfit_spa *nfit_spa;
1215*4882a593Smuzhiyun 	int rc;
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun 
1218*4882a593Smuzhiyun 	/*
1219*4882a593Smuzhiyun 	 * For each SPA-DCR or SPA-PMEM address range find its
1220*4882a593Smuzhiyun 	 * corresponding MEMDEV(s).  From each MEMDEV find the
1221*4882a593Smuzhiyun 	 * corresponding DCR.  Then, if we're operating on a SPA-DCR,
1222*4882a593Smuzhiyun 	 * try to find a SPA-BDW and a corresponding BDW that references
1223*4882a593Smuzhiyun 	 * the DCR.  Throw it all into an nfit_mem object.  Note, that
1224*4882a593Smuzhiyun 	 * BDWs are optional.
1225*4882a593Smuzhiyun 	 */
1226*4882a593Smuzhiyun 	list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
1227*4882a593Smuzhiyun 		rc = __nfit_mem_init(acpi_desc, nfit_spa->spa);
1228*4882a593Smuzhiyun 		if (rc)
1229*4882a593Smuzhiyun 			return rc;
1230*4882a593Smuzhiyun 	}
1231*4882a593Smuzhiyun 
1232*4882a593Smuzhiyun 	/*
1233*4882a593Smuzhiyun 	 * If a DIMM has failed to be mapped into SPA there will be no
1234*4882a593Smuzhiyun 	 * SPA entries above. Find and register all the unmapped DIMMs
1235*4882a593Smuzhiyun 	 * for reporting and recovery purposes.
1236*4882a593Smuzhiyun 	 */
1237*4882a593Smuzhiyun 	rc = __nfit_mem_init(acpi_desc, NULL);
1238*4882a593Smuzhiyun 	if (rc)
1239*4882a593Smuzhiyun 		return rc;
1240*4882a593Smuzhiyun 
1241*4882a593Smuzhiyun 	list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);
1242*4882a593Smuzhiyun 
1243*4882a593Smuzhiyun 	return 0;
1244*4882a593Smuzhiyun }
1245*4882a593Smuzhiyun 
bus_dsm_mask_show(struct device * dev,struct device_attribute * attr,char * buf)1246*4882a593Smuzhiyun static ssize_t bus_dsm_mask_show(struct device *dev,
1247*4882a593Smuzhiyun 		struct device_attribute *attr, char *buf)
1248*4882a593Smuzhiyun {
1249*4882a593Smuzhiyun 	struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1250*4882a593Smuzhiyun 	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1251*4882a593Smuzhiyun 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1252*4882a593Smuzhiyun 
1253*4882a593Smuzhiyun 	return sprintf(buf, "%#lx\n", acpi_desc->bus_dsm_mask);
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun static struct device_attribute dev_attr_bus_dsm_mask =
1256*4882a593Smuzhiyun 		__ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL);
1257*4882a593Smuzhiyun 
revision_show(struct device * dev,struct device_attribute * attr,char * buf)1258*4882a593Smuzhiyun static ssize_t revision_show(struct device *dev,
1259*4882a593Smuzhiyun 		struct device_attribute *attr, char *buf)
1260*4882a593Smuzhiyun {
1261*4882a593Smuzhiyun 	struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1262*4882a593Smuzhiyun 	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1263*4882a593Smuzhiyun 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1264*4882a593Smuzhiyun 
1265*4882a593Smuzhiyun 	return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
1266*4882a593Smuzhiyun }
1267*4882a593Smuzhiyun static DEVICE_ATTR_RO(revision);
1268*4882a593Smuzhiyun 
hw_error_scrub_show(struct device * dev,struct device_attribute * attr,char * buf)1269*4882a593Smuzhiyun static ssize_t hw_error_scrub_show(struct device *dev,
1270*4882a593Smuzhiyun 		struct device_attribute *attr, char *buf)
1271*4882a593Smuzhiyun {
1272*4882a593Smuzhiyun 	struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1273*4882a593Smuzhiyun 	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1274*4882a593Smuzhiyun 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1275*4882a593Smuzhiyun 
1276*4882a593Smuzhiyun 	return sprintf(buf, "%d\n", acpi_desc->scrub_mode);
1277*4882a593Smuzhiyun }
1278*4882a593Smuzhiyun 
1279*4882a593Smuzhiyun /*
1280*4882a593Smuzhiyun  * The 'hw_error_scrub' attribute can have the following values written to it:
1281*4882a593Smuzhiyun  * '0': Switch to the default mode where an exception will only insert
1282*4882a593Smuzhiyun  *      the address of the memory error into the poison and badblocks lists.
1283*4882a593Smuzhiyun  * '1': Enable a full scrub to happen if an exception for a memory error is
1284*4882a593Smuzhiyun  *      received.
1285*4882a593Smuzhiyun  */
hw_error_scrub_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1286*4882a593Smuzhiyun static ssize_t hw_error_scrub_store(struct device *dev,
1287*4882a593Smuzhiyun 		struct device_attribute *attr, const char *buf, size_t size)
1288*4882a593Smuzhiyun {
1289*4882a593Smuzhiyun 	struct nvdimm_bus_descriptor *nd_desc;
1290*4882a593Smuzhiyun 	ssize_t rc;
1291*4882a593Smuzhiyun 	long val;
1292*4882a593Smuzhiyun 
1293*4882a593Smuzhiyun 	rc = kstrtol(buf, 0, &val);
1294*4882a593Smuzhiyun 	if (rc)
1295*4882a593Smuzhiyun 		return rc;
1296*4882a593Smuzhiyun 
1297*4882a593Smuzhiyun 	nfit_device_lock(dev);
1298*4882a593Smuzhiyun 	nd_desc = dev_get_drvdata(dev);
1299*4882a593Smuzhiyun 	if (nd_desc) {
1300*4882a593Smuzhiyun 		struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1301*4882a593Smuzhiyun 
1302*4882a593Smuzhiyun 		switch (val) {
1303*4882a593Smuzhiyun 		case HW_ERROR_SCRUB_ON:
1304*4882a593Smuzhiyun 			acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON;
1305*4882a593Smuzhiyun 			break;
1306*4882a593Smuzhiyun 		case HW_ERROR_SCRUB_OFF:
1307*4882a593Smuzhiyun 			acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF;
1308*4882a593Smuzhiyun 			break;
1309*4882a593Smuzhiyun 		default:
1310*4882a593Smuzhiyun 			rc = -EINVAL;
1311*4882a593Smuzhiyun 			break;
1312*4882a593Smuzhiyun 		}
1313*4882a593Smuzhiyun 	}
1314*4882a593Smuzhiyun 	nfit_device_unlock(dev);
1315*4882a593Smuzhiyun 	if (rc)
1316*4882a593Smuzhiyun 		return rc;
1317*4882a593Smuzhiyun 	return size;
1318*4882a593Smuzhiyun }
1319*4882a593Smuzhiyun static DEVICE_ATTR_RW(hw_error_scrub);
1320*4882a593Smuzhiyun 
1321*4882a593Smuzhiyun /*
1322*4882a593Smuzhiyun  * This shows the number of full Address Range Scrubs that have been
1323*4882a593Smuzhiyun  * completed since driver load time. Userspace can wait on this using
1324*4882a593Smuzhiyun  * select/poll etc. A '+' at the end indicates an ARS is in progress
1325*4882a593Smuzhiyun  */
scrub_show(struct device * dev,struct device_attribute * attr,char * buf)1326*4882a593Smuzhiyun static ssize_t scrub_show(struct device *dev,
1327*4882a593Smuzhiyun 		struct device_attribute *attr, char *buf)
1328*4882a593Smuzhiyun {
1329*4882a593Smuzhiyun 	struct nvdimm_bus_descriptor *nd_desc;
1330*4882a593Smuzhiyun 	struct acpi_nfit_desc *acpi_desc;
1331*4882a593Smuzhiyun 	ssize_t rc = -ENXIO;
1332*4882a593Smuzhiyun 	bool busy;
1333*4882a593Smuzhiyun 
1334*4882a593Smuzhiyun 	nfit_device_lock(dev);
1335*4882a593Smuzhiyun 	nd_desc = dev_get_drvdata(dev);
1336*4882a593Smuzhiyun 	if (!nd_desc) {
1337*4882a593Smuzhiyun 		nfit_device_unlock(dev);
1338*4882a593Smuzhiyun 		return rc;
1339*4882a593Smuzhiyun 	}
1340*4882a593Smuzhiyun 	acpi_desc = to_acpi_desc(nd_desc);
1341*4882a593Smuzhiyun 
1342*4882a593Smuzhiyun 	mutex_lock(&acpi_desc->init_mutex);
1343*4882a593Smuzhiyun 	busy = test_bit(ARS_BUSY, &acpi_desc->scrub_flags)
1344*4882a593Smuzhiyun 		&& !test_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
1345*4882a593Smuzhiyun 	rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, busy ? "+\n" : "\n");
1346*4882a593Smuzhiyun 	/* Allow an admin to poll the busy state at a higher rate */
1347*4882a593Smuzhiyun 	if (busy && capable(CAP_SYS_RAWIO) && !test_and_set_bit(ARS_POLL,
1348*4882a593Smuzhiyun 				&acpi_desc->scrub_flags)) {
1349*4882a593Smuzhiyun 		acpi_desc->scrub_tmo = 1;
1350*4882a593Smuzhiyun 		mod_delayed_work(nfit_wq, &acpi_desc->dwork, HZ);
1351*4882a593Smuzhiyun 	}
1352*4882a593Smuzhiyun 
1353*4882a593Smuzhiyun 	mutex_unlock(&acpi_desc->init_mutex);
1354*4882a593Smuzhiyun 	nfit_device_unlock(dev);
1355*4882a593Smuzhiyun 	return rc;
1356*4882a593Smuzhiyun }
1357*4882a593Smuzhiyun 
scrub_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)1358*4882a593Smuzhiyun static ssize_t scrub_store(struct device *dev,
1359*4882a593Smuzhiyun 		struct device_attribute *attr, const char *buf, size_t size)
1360*4882a593Smuzhiyun {
1361*4882a593Smuzhiyun 	struct nvdimm_bus_descriptor *nd_desc;
1362*4882a593Smuzhiyun 	ssize_t rc;
1363*4882a593Smuzhiyun 	long val;
1364*4882a593Smuzhiyun 
1365*4882a593Smuzhiyun 	rc = kstrtol(buf, 0, &val);
1366*4882a593Smuzhiyun 	if (rc)
1367*4882a593Smuzhiyun 		return rc;
1368*4882a593Smuzhiyun 	if (val != 1)
1369*4882a593Smuzhiyun 		return -EINVAL;
1370*4882a593Smuzhiyun 
1371*4882a593Smuzhiyun 	nfit_device_lock(dev);
1372*4882a593Smuzhiyun 	nd_desc = dev_get_drvdata(dev);
1373*4882a593Smuzhiyun 	if (nd_desc) {
1374*4882a593Smuzhiyun 		struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1375*4882a593Smuzhiyun 
1376*4882a593Smuzhiyun 		rc = acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
1377*4882a593Smuzhiyun 	}
1378*4882a593Smuzhiyun 	nfit_device_unlock(dev);
1379*4882a593Smuzhiyun 	if (rc)
1380*4882a593Smuzhiyun 		return rc;
1381*4882a593Smuzhiyun 	return size;
1382*4882a593Smuzhiyun }
1383*4882a593Smuzhiyun static DEVICE_ATTR_RW(scrub);
1384*4882a593Smuzhiyun 
ars_supported(struct nvdimm_bus * nvdimm_bus)1385*4882a593Smuzhiyun static bool ars_supported(struct nvdimm_bus *nvdimm_bus)
1386*4882a593Smuzhiyun {
1387*4882a593Smuzhiyun 	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1388*4882a593Smuzhiyun 	const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START
1389*4882a593Smuzhiyun 		| 1 << ND_CMD_ARS_STATUS;
1390*4882a593Smuzhiyun 
1391*4882a593Smuzhiyun 	return (nd_desc->cmd_mask & mask) == mask;
1392*4882a593Smuzhiyun }
1393*4882a593Smuzhiyun 
nfit_visible(struct kobject * kobj,struct attribute * a,int n)1394*4882a593Smuzhiyun static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n)
1395*4882a593Smuzhiyun {
1396*4882a593Smuzhiyun 	struct device *dev = kobj_to_dev(kobj);
1397*4882a593Smuzhiyun 	struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1398*4882a593Smuzhiyun 
1399*4882a593Smuzhiyun 	if (a == &dev_attr_scrub.attr)
1400*4882a593Smuzhiyun 		return ars_supported(nvdimm_bus) ? a->mode : 0;
1401*4882a593Smuzhiyun 
1402*4882a593Smuzhiyun 	if (a == &dev_attr_firmware_activate_noidle.attr)
1403*4882a593Smuzhiyun 		return intel_fwa_supported(nvdimm_bus) ? a->mode : 0;
1404*4882a593Smuzhiyun 
1405*4882a593Smuzhiyun 	return a->mode;
1406*4882a593Smuzhiyun }
1407*4882a593Smuzhiyun 
1408*4882a593Smuzhiyun static struct attribute *acpi_nfit_attributes[] = {
1409*4882a593Smuzhiyun 	&dev_attr_revision.attr,
1410*4882a593Smuzhiyun 	&dev_attr_scrub.attr,
1411*4882a593Smuzhiyun 	&dev_attr_hw_error_scrub.attr,
1412*4882a593Smuzhiyun 	&dev_attr_bus_dsm_mask.attr,
1413*4882a593Smuzhiyun 	&dev_attr_firmware_activate_noidle.attr,
1414*4882a593Smuzhiyun 	NULL,
1415*4882a593Smuzhiyun };
1416*4882a593Smuzhiyun 
1417*4882a593Smuzhiyun static const struct attribute_group acpi_nfit_attribute_group = {
1418*4882a593Smuzhiyun 	.name = "nfit",
1419*4882a593Smuzhiyun 	.attrs = acpi_nfit_attributes,
1420*4882a593Smuzhiyun 	.is_visible = nfit_visible,
1421*4882a593Smuzhiyun };
1422*4882a593Smuzhiyun 
1423*4882a593Smuzhiyun static const struct attribute_group *acpi_nfit_attribute_groups[] = {
1424*4882a593Smuzhiyun 	&acpi_nfit_attribute_group,
1425*4882a593Smuzhiyun 	NULL,
1426*4882a593Smuzhiyun };
1427*4882a593Smuzhiyun 
to_nfit_memdev(struct device * dev)1428*4882a593Smuzhiyun static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
1429*4882a593Smuzhiyun {
1430*4882a593Smuzhiyun 	struct nvdimm *nvdimm = to_nvdimm(dev);
1431*4882a593Smuzhiyun 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1432*4882a593Smuzhiyun 
1433*4882a593Smuzhiyun 	return __to_nfit_memdev(nfit_mem);
1434*4882a593Smuzhiyun }
1435*4882a593Smuzhiyun 
to_nfit_dcr(struct device * dev)1436*4882a593Smuzhiyun static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev)
1437*4882a593Smuzhiyun {
1438*4882a593Smuzhiyun 	struct nvdimm *nvdimm = to_nvdimm(dev);
1439*4882a593Smuzhiyun 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1440*4882a593Smuzhiyun 
1441*4882a593Smuzhiyun 	return nfit_mem->dcr;
1442*4882a593Smuzhiyun }
1443*4882a593Smuzhiyun 
handle_show(struct device * dev,struct device_attribute * attr,char * buf)1444*4882a593Smuzhiyun static ssize_t handle_show(struct device *dev,
1445*4882a593Smuzhiyun 		struct device_attribute *attr, char *buf)
1446*4882a593Smuzhiyun {
1447*4882a593Smuzhiyun 	struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
1448*4882a593Smuzhiyun 
1449*4882a593Smuzhiyun 	return sprintf(buf, "%#x\n", memdev->device_handle);
1450*4882a593Smuzhiyun }
1451*4882a593Smuzhiyun static DEVICE_ATTR_RO(handle);
1452*4882a593Smuzhiyun 
phys_id_show(struct device * dev,struct device_attribute * attr,char * buf)1453*4882a593Smuzhiyun static ssize_t phys_id_show(struct device *dev,
1454*4882a593Smuzhiyun 		struct device_attribute *attr, char *buf)
1455*4882a593Smuzhiyun {
1456*4882a593Smuzhiyun 	struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
1457*4882a593Smuzhiyun 
1458*4882a593Smuzhiyun 	return sprintf(buf, "%#x\n", memdev->physical_id);
1459*4882a593Smuzhiyun }
1460*4882a593Smuzhiyun static DEVICE_ATTR_RO(phys_id);
1461*4882a593Smuzhiyun 
vendor_show(struct device * dev,struct device_attribute * attr,char * buf)1462*4882a593Smuzhiyun static ssize_t vendor_show(struct device *dev,
1463*4882a593Smuzhiyun 		struct device_attribute *attr, char *buf)
1464*4882a593Smuzhiyun {
1465*4882a593Smuzhiyun 	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1466*4882a593Smuzhiyun 
1467*4882a593Smuzhiyun 	return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id));
1468*4882a593Smuzhiyun }
1469*4882a593Smuzhiyun static DEVICE_ATTR_RO(vendor);
1470*4882a593Smuzhiyun 
rev_id_show(struct device * dev,struct device_attribute * attr,char * buf)1471*4882a593Smuzhiyun static ssize_t rev_id_show(struct device *dev,
1472*4882a593Smuzhiyun 		struct device_attribute *attr, char *buf)
1473*4882a593Smuzhiyun {
1474*4882a593Smuzhiyun 	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1475*4882a593Smuzhiyun 
1476*4882a593Smuzhiyun 	return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id));
1477*4882a593Smuzhiyun }
1478*4882a593Smuzhiyun static DEVICE_ATTR_RO(rev_id);
1479*4882a593Smuzhiyun 
device_show(struct device * dev,struct device_attribute * attr,char * buf)1480*4882a593Smuzhiyun static ssize_t device_show(struct device *dev,
1481*4882a593Smuzhiyun 		struct device_attribute *attr, char *buf)
1482*4882a593Smuzhiyun {
1483*4882a593Smuzhiyun 	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1484*4882a593Smuzhiyun 
1485*4882a593Smuzhiyun 	return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id));
1486*4882a593Smuzhiyun }
1487*4882a593Smuzhiyun static DEVICE_ATTR_RO(device);
1488*4882a593Smuzhiyun 
subsystem_vendor_show(struct device * dev,struct device_attribute * attr,char * buf)1489*4882a593Smuzhiyun static ssize_t subsystem_vendor_show(struct device *dev,
1490*4882a593Smuzhiyun 		struct device_attribute *attr, char *buf)
1491*4882a593Smuzhiyun {
1492*4882a593Smuzhiyun 	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1493*4882a593Smuzhiyun 
1494*4882a593Smuzhiyun 	return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id));
1495*4882a593Smuzhiyun }
1496*4882a593Smuzhiyun static DEVICE_ATTR_RO(subsystem_vendor);
1497*4882a593Smuzhiyun 
subsystem_rev_id_show(struct device * dev,struct device_attribute * attr,char * buf)1498*4882a593Smuzhiyun static ssize_t subsystem_rev_id_show(struct device *dev,
1499*4882a593Smuzhiyun 		struct device_attribute *attr, char *buf)
1500*4882a593Smuzhiyun {
1501*4882a593Smuzhiyun 	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1502*4882a593Smuzhiyun 
1503*4882a593Smuzhiyun 	return sprintf(buf, "0x%04x\n",
1504*4882a593Smuzhiyun 			be16_to_cpu(dcr->subsystem_revision_id));
1505*4882a593Smuzhiyun }
1506*4882a593Smuzhiyun static DEVICE_ATTR_RO(subsystem_rev_id);
1507*4882a593Smuzhiyun 
subsystem_device_show(struct device * dev,struct device_attribute * attr,char * buf)1508*4882a593Smuzhiyun static ssize_t subsystem_device_show(struct device *dev,
1509*4882a593Smuzhiyun 		struct device_attribute *attr, char *buf)
1510*4882a593Smuzhiyun {
1511*4882a593Smuzhiyun 	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1512*4882a593Smuzhiyun 
1513*4882a593Smuzhiyun 	return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id));
1514*4882a593Smuzhiyun }
1515*4882a593Smuzhiyun static DEVICE_ATTR_RO(subsystem_device);
1516*4882a593Smuzhiyun 
num_nvdimm_formats(struct nvdimm * nvdimm)1517*4882a593Smuzhiyun static int num_nvdimm_formats(struct nvdimm *nvdimm)
1518*4882a593Smuzhiyun {
1519*4882a593Smuzhiyun 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1520*4882a593Smuzhiyun 	int formats = 0;
1521*4882a593Smuzhiyun 
1522*4882a593Smuzhiyun 	if (nfit_mem->memdev_pmem)
1523*4882a593Smuzhiyun 		formats++;
1524*4882a593Smuzhiyun 	if (nfit_mem->memdev_bdw)
1525*4882a593Smuzhiyun 		formats++;
1526*4882a593Smuzhiyun 	return formats;
1527*4882a593Smuzhiyun }
1528*4882a593Smuzhiyun 
format_show(struct device * dev,struct device_attribute * attr,char * buf)1529*4882a593Smuzhiyun static ssize_t format_show(struct device *dev,
1530*4882a593Smuzhiyun 		struct device_attribute *attr, char *buf)
1531*4882a593Smuzhiyun {
1532*4882a593Smuzhiyun 	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1533*4882a593Smuzhiyun 
1534*4882a593Smuzhiyun 	return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code));
1535*4882a593Smuzhiyun }
1536*4882a593Smuzhiyun static DEVICE_ATTR_RO(format);
1537*4882a593Smuzhiyun 
format1_show(struct device * dev,struct device_attribute * attr,char * buf)1538*4882a593Smuzhiyun static ssize_t format1_show(struct device *dev,
1539*4882a593Smuzhiyun 		struct device_attribute *attr, char *buf)
1540*4882a593Smuzhiyun {
1541*4882a593Smuzhiyun 	u32 handle;
1542*4882a593Smuzhiyun 	ssize_t rc = -ENXIO;
1543*4882a593Smuzhiyun 	struct nfit_mem *nfit_mem;
1544*4882a593Smuzhiyun 	struct nfit_memdev *nfit_memdev;
1545*4882a593Smuzhiyun 	struct acpi_nfit_desc *acpi_desc;
1546*4882a593Smuzhiyun 	struct nvdimm *nvdimm = to_nvdimm(dev);
1547*4882a593Smuzhiyun 	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1548*4882a593Smuzhiyun 
1549*4882a593Smuzhiyun 	nfit_mem = nvdimm_provider_data(nvdimm);
1550*4882a593Smuzhiyun 	acpi_desc = nfit_mem->acpi_desc;
1551*4882a593Smuzhiyun 	handle = to_nfit_memdev(dev)->device_handle;
1552*4882a593Smuzhiyun 
1553*4882a593Smuzhiyun 	/* assumes DIMMs have at most 2 published interface codes */
1554*4882a593Smuzhiyun 	mutex_lock(&acpi_desc->init_mutex);
1555*4882a593Smuzhiyun 	list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1556*4882a593Smuzhiyun 		struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
1557*4882a593Smuzhiyun 		struct nfit_dcr *nfit_dcr;
1558*4882a593Smuzhiyun 
1559*4882a593Smuzhiyun 		if (memdev->device_handle != handle)
1560*4882a593Smuzhiyun 			continue;
1561*4882a593Smuzhiyun 
1562*4882a593Smuzhiyun 		list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
1563*4882a593Smuzhiyun 			if (nfit_dcr->dcr->region_index != memdev->region_index)
1564*4882a593Smuzhiyun 				continue;
1565*4882a593Smuzhiyun 			if (nfit_dcr->dcr->code == dcr->code)
1566*4882a593Smuzhiyun 				continue;
1567*4882a593Smuzhiyun 			rc = sprintf(buf, "0x%04x\n",
1568*4882a593Smuzhiyun 					le16_to_cpu(nfit_dcr->dcr->code));
1569*4882a593Smuzhiyun 			break;
1570*4882a593Smuzhiyun 		}
1571*4882a593Smuzhiyun 		if (rc != -ENXIO)
1572*4882a593Smuzhiyun 			break;
1573*4882a593Smuzhiyun 	}
1574*4882a593Smuzhiyun 	mutex_unlock(&acpi_desc->init_mutex);
1575*4882a593Smuzhiyun 	return rc;
1576*4882a593Smuzhiyun }
1577*4882a593Smuzhiyun static DEVICE_ATTR_RO(format1);
1578*4882a593Smuzhiyun 
formats_show(struct device * dev,struct device_attribute * attr,char * buf)1579*4882a593Smuzhiyun static ssize_t formats_show(struct device *dev,
1580*4882a593Smuzhiyun 		struct device_attribute *attr, char *buf)
1581*4882a593Smuzhiyun {
1582*4882a593Smuzhiyun 	struct nvdimm *nvdimm = to_nvdimm(dev);
1583*4882a593Smuzhiyun 
1584*4882a593Smuzhiyun 	return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm));
1585*4882a593Smuzhiyun }
1586*4882a593Smuzhiyun static DEVICE_ATTR_RO(formats);
1587*4882a593Smuzhiyun 
serial_show(struct device * dev,struct device_attribute * attr,char * buf)1588*4882a593Smuzhiyun static ssize_t serial_show(struct device *dev,
1589*4882a593Smuzhiyun 		struct device_attribute *attr, char *buf)
1590*4882a593Smuzhiyun {
1591*4882a593Smuzhiyun 	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1592*4882a593Smuzhiyun 
1593*4882a593Smuzhiyun 	return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number));
1594*4882a593Smuzhiyun }
1595*4882a593Smuzhiyun static DEVICE_ATTR_RO(serial);
1596*4882a593Smuzhiyun 
family_show(struct device * dev,struct device_attribute * attr,char * buf)1597*4882a593Smuzhiyun static ssize_t family_show(struct device *dev,
1598*4882a593Smuzhiyun 		struct device_attribute *attr, char *buf)
1599*4882a593Smuzhiyun {
1600*4882a593Smuzhiyun 	struct nvdimm *nvdimm = to_nvdimm(dev);
1601*4882a593Smuzhiyun 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1602*4882a593Smuzhiyun 
1603*4882a593Smuzhiyun 	if (nfit_mem->family < 0)
1604*4882a593Smuzhiyun 		return -ENXIO;
1605*4882a593Smuzhiyun 	return sprintf(buf, "%d\n", nfit_mem->family);
1606*4882a593Smuzhiyun }
1607*4882a593Smuzhiyun static DEVICE_ATTR_RO(family);
1608*4882a593Smuzhiyun 
dsm_mask_show(struct device * dev,struct device_attribute * attr,char * buf)1609*4882a593Smuzhiyun static ssize_t dsm_mask_show(struct device *dev,
1610*4882a593Smuzhiyun 		struct device_attribute *attr, char *buf)
1611*4882a593Smuzhiyun {
1612*4882a593Smuzhiyun 	struct nvdimm *nvdimm = to_nvdimm(dev);
1613*4882a593Smuzhiyun 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1614*4882a593Smuzhiyun 
1615*4882a593Smuzhiyun 	if (nfit_mem->family < 0)
1616*4882a593Smuzhiyun 		return -ENXIO;
1617*4882a593Smuzhiyun 	return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask);
1618*4882a593Smuzhiyun }
1619*4882a593Smuzhiyun static DEVICE_ATTR_RO(dsm_mask);
1620*4882a593Smuzhiyun 
flags_show(struct device * dev,struct device_attribute * attr,char * buf)1621*4882a593Smuzhiyun static ssize_t flags_show(struct device *dev,
1622*4882a593Smuzhiyun 		struct device_attribute *attr, char *buf)
1623*4882a593Smuzhiyun {
1624*4882a593Smuzhiyun 	struct nvdimm *nvdimm = to_nvdimm(dev);
1625*4882a593Smuzhiyun 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1626*4882a593Smuzhiyun 	u16 flags = __to_nfit_memdev(nfit_mem)->flags;
1627*4882a593Smuzhiyun 
1628*4882a593Smuzhiyun 	if (test_bit(NFIT_MEM_DIRTY, &nfit_mem->flags))
1629*4882a593Smuzhiyun 		flags |= ACPI_NFIT_MEM_FLUSH_FAILED;
1630*4882a593Smuzhiyun 
1631*4882a593Smuzhiyun 	return sprintf(buf, "%s%s%s%s%s%s%s\n",
1632*4882a593Smuzhiyun 		flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
1633*4882a593Smuzhiyun 		flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
1634*4882a593Smuzhiyun 		flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
1635*4882a593Smuzhiyun 		flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "",
1636*4882a593Smuzhiyun 		flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "",
1637*4882a593Smuzhiyun 		flags & ACPI_NFIT_MEM_MAP_FAILED ? "map_fail " : "",
1638*4882a593Smuzhiyun 		flags & ACPI_NFIT_MEM_HEALTH_ENABLED ? "smart_notify " : "");
1639*4882a593Smuzhiyun }
1640*4882a593Smuzhiyun static DEVICE_ATTR_RO(flags);
1641*4882a593Smuzhiyun 
id_show(struct device * dev,struct device_attribute * attr,char * buf)1642*4882a593Smuzhiyun static ssize_t id_show(struct device *dev,
1643*4882a593Smuzhiyun 		struct device_attribute *attr, char *buf)
1644*4882a593Smuzhiyun {
1645*4882a593Smuzhiyun 	struct nvdimm *nvdimm = to_nvdimm(dev);
1646*4882a593Smuzhiyun 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1647*4882a593Smuzhiyun 
1648*4882a593Smuzhiyun 	return sprintf(buf, "%s\n", nfit_mem->id);
1649*4882a593Smuzhiyun }
1650*4882a593Smuzhiyun static DEVICE_ATTR_RO(id);
1651*4882a593Smuzhiyun 
dirty_shutdown_show(struct device * dev,struct device_attribute * attr,char * buf)1652*4882a593Smuzhiyun static ssize_t dirty_shutdown_show(struct device *dev,
1653*4882a593Smuzhiyun 		struct device_attribute *attr, char *buf)
1654*4882a593Smuzhiyun {
1655*4882a593Smuzhiyun 	struct nvdimm *nvdimm = to_nvdimm(dev);
1656*4882a593Smuzhiyun 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1657*4882a593Smuzhiyun 
1658*4882a593Smuzhiyun 	return sprintf(buf, "%d\n", nfit_mem->dirty_shutdown);
1659*4882a593Smuzhiyun }
1660*4882a593Smuzhiyun static DEVICE_ATTR_RO(dirty_shutdown);
1661*4882a593Smuzhiyun 
1662*4882a593Smuzhiyun static struct attribute *acpi_nfit_dimm_attributes[] = {
1663*4882a593Smuzhiyun 	&dev_attr_handle.attr,
1664*4882a593Smuzhiyun 	&dev_attr_phys_id.attr,
1665*4882a593Smuzhiyun 	&dev_attr_vendor.attr,
1666*4882a593Smuzhiyun 	&dev_attr_device.attr,
1667*4882a593Smuzhiyun 	&dev_attr_rev_id.attr,
1668*4882a593Smuzhiyun 	&dev_attr_subsystem_vendor.attr,
1669*4882a593Smuzhiyun 	&dev_attr_subsystem_device.attr,
1670*4882a593Smuzhiyun 	&dev_attr_subsystem_rev_id.attr,
1671*4882a593Smuzhiyun 	&dev_attr_format.attr,
1672*4882a593Smuzhiyun 	&dev_attr_formats.attr,
1673*4882a593Smuzhiyun 	&dev_attr_format1.attr,
1674*4882a593Smuzhiyun 	&dev_attr_serial.attr,
1675*4882a593Smuzhiyun 	&dev_attr_flags.attr,
1676*4882a593Smuzhiyun 	&dev_attr_id.attr,
1677*4882a593Smuzhiyun 	&dev_attr_family.attr,
1678*4882a593Smuzhiyun 	&dev_attr_dsm_mask.attr,
1679*4882a593Smuzhiyun 	&dev_attr_dirty_shutdown.attr,
1680*4882a593Smuzhiyun 	NULL,
1681*4882a593Smuzhiyun };
1682*4882a593Smuzhiyun 
acpi_nfit_dimm_attr_visible(struct kobject * kobj,struct attribute * a,int n)1683*4882a593Smuzhiyun static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
1684*4882a593Smuzhiyun 		struct attribute *a, int n)
1685*4882a593Smuzhiyun {
1686*4882a593Smuzhiyun 	struct device *dev = kobj_to_dev(kobj);
1687*4882a593Smuzhiyun 	struct nvdimm *nvdimm = to_nvdimm(dev);
1688*4882a593Smuzhiyun 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1689*4882a593Smuzhiyun 
1690*4882a593Smuzhiyun 	if (!to_nfit_dcr(dev)) {
1691*4882a593Smuzhiyun 		/* Without a dcr only the memdev attributes can be surfaced */
1692*4882a593Smuzhiyun 		if (a == &dev_attr_handle.attr || a == &dev_attr_phys_id.attr
1693*4882a593Smuzhiyun 				|| a == &dev_attr_flags.attr
1694*4882a593Smuzhiyun 				|| a == &dev_attr_family.attr
1695*4882a593Smuzhiyun 				|| a == &dev_attr_dsm_mask.attr)
1696*4882a593Smuzhiyun 			return a->mode;
1697*4882a593Smuzhiyun 		return 0;
1698*4882a593Smuzhiyun 	}
1699*4882a593Smuzhiyun 
1700*4882a593Smuzhiyun 	if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1)
1701*4882a593Smuzhiyun 		return 0;
1702*4882a593Smuzhiyun 
1703*4882a593Smuzhiyun 	if (!test_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags)
1704*4882a593Smuzhiyun 			&& a == &dev_attr_dirty_shutdown.attr)
1705*4882a593Smuzhiyun 		return 0;
1706*4882a593Smuzhiyun 
1707*4882a593Smuzhiyun 	return a->mode;
1708*4882a593Smuzhiyun }
1709*4882a593Smuzhiyun 
1710*4882a593Smuzhiyun static const struct attribute_group acpi_nfit_dimm_attribute_group = {
1711*4882a593Smuzhiyun 	.name = "nfit",
1712*4882a593Smuzhiyun 	.attrs = acpi_nfit_dimm_attributes,
1713*4882a593Smuzhiyun 	.is_visible = acpi_nfit_dimm_attr_visible,
1714*4882a593Smuzhiyun };
1715*4882a593Smuzhiyun 
1716*4882a593Smuzhiyun static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
1717*4882a593Smuzhiyun 	&acpi_nfit_dimm_attribute_group,
1718*4882a593Smuzhiyun 	NULL,
1719*4882a593Smuzhiyun };
1720*4882a593Smuzhiyun 
acpi_nfit_dimm_by_handle(struct acpi_nfit_desc * acpi_desc,u32 device_handle)1721*4882a593Smuzhiyun static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
1722*4882a593Smuzhiyun 		u32 device_handle)
1723*4882a593Smuzhiyun {
1724*4882a593Smuzhiyun 	struct nfit_mem *nfit_mem;
1725*4882a593Smuzhiyun 
1726*4882a593Smuzhiyun 	list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
1727*4882a593Smuzhiyun 		if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle)
1728*4882a593Smuzhiyun 			return nfit_mem->nvdimm;
1729*4882a593Smuzhiyun 
1730*4882a593Smuzhiyun 	return NULL;
1731*4882a593Smuzhiyun }
1732*4882a593Smuzhiyun 
__acpi_nvdimm_notify(struct device * dev,u32 event)1733*4882a593Smuzhiyun void __acpi_nvdimm_notify(struct device *dev, u32 event)
1734*4882a593Smuzhiyun {
1735*4882a593Smuzhiyun 	struct nfit_mem *nfit_mem;
1736*4882a593Smuzhiyun 	struct acpi_nfit_desc *acpi_desc;
1737*4882a593Smuzhiyun 
1738*4882a593Smuzhiyun 	dev_dbg(dev->parent, "%s: event: %d\n", dev_name(dev),
1739*4882a593Smuzhiyun 			event);
1740*4882a593Smuzhiyun 
1741*4882a593Smuzhiyun 	if (event != NFIT_NOTIFY_DIMM_HEALTH) {
1742*4882a593Smuzhiyun 		dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev),
1743*4882a593Smuzhiyun 				event);
1744*4882a593Smuzhiyun 		return;
1745*4882a593Smuzhiyun 	}
1746*4882a593Smuzhiyun 
1747*4882a593Smuzhiyun 	acpi_desc = dev_get_drvdata(dev->parent);
1748*4882a593Smuzhiyun 	if (!acpi_desc)
1749*4882a593Smuzhiyun 		return;
1750*4882a593Smuzhiyun 
1751*4882a593Smuzhiyun 	/*
1752*4882a593Smuzhiyun 	 * If we successfully retrieved acpi_desc, then we know nfit_mem data
1753*4882a593Smuzhiyun 	 * is still valid.
1754*4882a593Smuzhiyun 	 */
1755*4882a593Smuzhiyun 	nfit_mem = dev_get_drvdata(dev);
1756*4882a593Smuzhiyun 	if (nfit_mem && nfit_mem->flags_attr)
1757*4882a593Smuzhiyun 		sysfs_notify_dirent(nfit_mem->flags_attr);
1758*4882a593Smuzhiyun }
1759*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify);
1760*4882a593Smuzhiyun 
acpi_nvdimm_notify(acpi_handle handle,u32 event,void * data)1761*4882a593Smuzhiyun static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data)
1762*4882a593Smuzhiyun {
1763*4882a593Smuzhiyun 	struct acpi_device *adev = data;
1764*4882a593Smuzhiyun 	struct device *dev = &adev->dev;
1765*4882a593Smuzhiyun 
1766*4882a593Smuzhiyun 	nfit_device_lock(dev->parent);
1767*4882a593Smuzhiyun 	__acpi_nvdimm_notify(dev, event);
1768*4882a593Smuzhiyun 	nfit_device_unlock(dev->parent);
1769*4882a593Smuzhiyun }
1770*4882a593Smuzhiyun 
acpi_nvdimm_has_method(struct acpi_device * adev,char * method)1771*4882a593Smuzhiyun static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method)
1772*4882a593Smuzhiyun {
1773*4882a593Smuzhiyun 	acpi_handle handle;
1774*4882a593Smuzhiyun 	acpi_status status;
1775*4882a593Smuzhiyun 
1776*4882a593Smuzhiyun 	status = acpi_get_handle(adev->handle, method, &handle);
1777*4882a593Smuzhiyun 
1778*4882a593Smuzhiyun 	if (ACPI_SUCCESS(status))
1779*4882a593Smuzhiyun 		return true;
1780*4882a593Smuzhiyun 	return false;
1781*4882a593Smuzhiyun }
1782*4882a593Smuzhiyun 
nfit_intel_shutdown_status(struct nfit_mem * nfit_mem)1783*4882a593Smuzhiyun __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
1784*4882a593Smuzhiyun {
1785*4882a593Smuzhiyun 	struct device *dev = &nfit_mem->adev->dev;
1786*4882a593Smuzhiyun 	struct nd_intel_smart smart = { 0 };
1787*4882a593Smuzhiyun 	union acpi_object in_buf = {
1788*4882a593Smuzhiyun 		.buffer.type = ACPI_TYPE_BUFFER,
1789*4882a593Smuzhiyun 		.buffer.length = 0,
1790*4882a593Smuzhiyun 	};
1791*4882a593Smuzhiyun 	union acpi_object in_obj = {
1792*4882a593Smuzhiyun 		.package.type = ACPI_TYPE_PACKAGE,
1793*4882a593Smuzhiyun 		.package.count = 1,
1794*4882a593Smuzhiyun 		.package.elements = &in_buf,
1795*4882a593Smuzhiyun 	};
1796*4882a593Smuzhiyun 	const u8 func = ND_INTEL_SMART;
1797*4882a593Smuzhiyun 	const guid_t *guid = to_nfit_uuid(nfit_mem->family);
1798*4882a593Smuzhiyun 	u8 revid = nfit_dsm_revid(nfit_mem->family, func);
1799*4882a593Smuzhiyun 	struct acpi_device *adev = nfit_mem->adev;
1800*4882a593Smuzhiyun 	acpi_handle handle = adev->handle;
1801*4882a593Smuzhiyun 	union acpi_object *out_obj;
1802*4882a593Smuzhiyun 
1803*4882a593Smuzhiyun 	if ((nfit_mem->dsm_mask & (1 << func)) == 0)
1804*4882a593Smuzhiyun 		return;
1805*4882a593Smuzhiyun 
1806*4882a593Smuzhiyun 	out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
1807*4882a593Smuzhiyun 	if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER
1808*4882a593Smuzhiyun 			|| out_obj->buffer.length < sizeof(smart)) {
1809*4882a593Smuzhiyun 		dev_dbg(dev->parent, "%s: failed to retrieve initial health\n",
1810*4882a593Smuzhiyun 				dev_name(dev));
1811*4882a593Smuzhiyun 		ACPI_FREE(out_obj);
1812*4882a593Smuzhiyun 		return;
1813*4882a593Smuzhiyun 	}
1814*4882a593Smuzhiyun 	memcpy(&smart, out_obj->buffer.pointer, sizeof(smart));
1815*4882a593Smuzhiyun 	ACPI_FREE(out_obj);
1816*4882a593Smuzhiyun 
1817*4882a593Smuzhiyun 	if (smart.flags & ND_INTEL_SMART_SHUTDOWN_VALID) {
1818*4882a593Smuzhiyun 		if (smart.shutdown_state)
1819*4882a593Smuzhiyun 			set_bit(NFIT_MEM_DIRTY, &nfit_mem->flags);
1820*4882a593Smuzhiyun 	}
1821*4882a593Smuzhiyun 
1822*4882a593Smuzhiyun 	if (smart.flags & ND_INTEL_SMART_SHUTDOWN_COUNT_VALID) {
1823*4882a593Smuzhiyun 		set_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags);
1824*4882a593Smuzhiyun 		nfit_mem->dirty_shutdown = smart.shutdown_count;
1825*4882a593Smuzhiyun 	}
1826*4882a593Smuzhiyun }
1827*4882a593Smuzhiyun 
populate_shutdown_status(struct nfit_mem * nfit_mem)1828*4882a593Smuzhiyun static void populate_shutdown_status(struct nfit_mem *nfit_mem)
1829*4882a593Smuzhiyun {
1830*4882a593Smuzhiyun 	/*
1831*4882a593Smuzhiyun 	 * For DIMMs that provide a dynamic facility to retrieve a
1832*4882a593Smuzhiyun 	 * dirty-shutdown status and/or a dirty-shutdown count, cache
1833*4882a593Smuzhiyun 	 * these values in nfit_mem.
1834*4882a593Smuzhiyun 	 */
1835*4882a593Smuzhiyun 	if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
1836*4882a593Smuzhiyun 		nfit_intel_shutdown_status(nfit_mem);
1837*4882a593Smuzhiyun }
1838*4882a593Smuzhiyun 
acpi_nfit_add_dimm(struct acpi_nfit_desc * acpi_desc,struct nfit_mem * nfit_mem,u32 device_handle)1839*4882a593Smuzhiyun static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1840*4882a593Smuzhiyun 		struct nfit_mem *nfit_mem, u32 device_handle)
1841*4882a593Smuzhiyun {
1842*4882a593Smuzhiyun 	struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1843*4882a593Smuzhiyun 	struct acpi_device *adev, *adev_dimm;
1844*4882a593Smuzhiyun 	struct device *dev = acpi_desc->dev;
1845*4882a593Smuzhiyun 	unsigned long dsm_mask, label_mask;
1846*4882a593Smuzhiyun 	const guid_t *guid;
1847*4882a593Smuzhiyun 	int i;
1848*4882a593Smuzhiyun 	int family = -1;
1849*4882a593Smuzhiyun 	struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
1850*4882a593Smuzhiyun 
1851*4882a593Smuzhiyun 	/* nfit test assumes 1:1 relationship between commands and dsms */
1852*4882a593Smuzhiyun 	nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
1853*4882a593Smuzhiyun 	nfit_mem->family = NVDIMM_FAMILY_INTEL;
1854*4882a593Smuzhiyun 	set_bit(NVDIMM_FAMILY_INTEL, &nd_desc->dimm_family_mask);
1855*4882a593Smuzhiyun 
1856*4882a593Smuzhiyun 	if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID)
1857*4882a593Smuzhiyun 		sprintf(nfit_mem->id, "%04x-%02x-%04x-%08x",
1858*4882a593Smuzhiyun 				be16_to_cpu(dcr->vendor_id),
1859*4882a593Smuzhiyun 				dcr->manufacturing_location,
1860*4882a593Smuzhiyun 				be16_to_cpu(dcr->manufacturing_date),
1861*4882a593Smuzhiyun 				be32_to_cpu(dcr->serial_number));
1862*4882a593Smuzhiyun 	else
1863*4882a593Smuzhiyun 		sprintf(nfit_mem->id, "%04x-%08x",
1864*4882a593Smuzhiyun 				be16_to_cpu(dcr->vendor_id),
1865*4882a593Smuzhiyun 				be32_to_cpu(dcr->serial_number));
1866*4882a593Smuzhiyun 
1867*4882a593Smuzhiyun 	adev = to_acpi_dev(acpi_desc);
1868*4882a593Smuzhiyun 	if (!adev) {
1869*4882a593Smuzhiyun 		/* unit test case */
1870*4882a593Smuzhiyun 		populate_shutdown_status(nfit_mem);
1871*4882a593Smuzhiyun 		return 0;
1872*4882a593Smuzhiyun 	}
1873*4882a593Smuzhiyun 
1874*4882a593Smuzhiyun 	adev_dimm = acpi_find_child_device(adev, device_handle, false);
1875*4882a593Smuzhiyun 	nfit_mem->adev = adev_dimm;
1876*4882a593Smuzhiyun 	if (!adev_dimm) {
1877*4882a593Smuzhiyun 		dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
1878*4882a593Smuzhiyun 				device_handle);
1879*4882a593Smuzhiyun 		return force_enable_dimms ? 0 : -ENODEV;
1880*4882a593Smuzhiyun 	}
1881*4882a593Smuzhiyun 
1882*4882a593Smuzhiyun 	if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle,
1883*4882a593Smuzhiyun 		ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) {
1884*4882a593Smuzhiyun 		dev_err(dev, "%s: notification registration failed\n",
1885*4882a593Smuzhiyun 				dev_name(&adev_dimm->dev));
1886*4882a593Smuzhiyun 		return -ENXIO;
1887*4882a593Smuzhiyun 	}
1888*4882a593Smuzhiyun 	/*
1889*4882a593Smuzhiyun 	 * Record nfit_mem for the notification path to track back to
1890*4882a593Smuzhiyun 	 * the nfit sysfs attributes for this dimm device object.
1891*4882a593Smuzhiyun 	 */
1892*4882a593Smuzhiyun 	dev_set_drvdata(&adev_dimm->dev, nfit_mem);
1893*4882a593Smuzhiyun 
1894*4882a593Smuzhiyun 	/*
1895*4882a593Smuzhiyun 	 * There are 4 "legacy" NVDIMM command sets
1896*4882a593Smuzhiyun 	 * (NVDIMM_FAMILY_{INTEL,MSFT,HPE1,HPE2}) that were created before
1897*4882a593Smuzhiyun 	 * an EFI working group was established to constrain this
1898*4882a593Smuzhiyun 	 * proliferation. The nfit driver probes for the supported command
1899*4882a593Smuzhiyun 	 * set by GUID. Note, if you're a platform developer looking to add
1900*4882a593Smuzhiyun 	 * a new command set to this probe, consider using an existing set,
1901*4882a593Smuzhiyun 	 * or otherwise seek approval to publish the command set at
1902*4882a593Smuzhiyun 	 * http://www.uefi.org/RFIC_LIST.
1903*4882a593Smuzhiyun 	 *
1904*4882a593Smuzhiyun 	 * Note, that checking for function0 (bit0) tells us if any commands
1905*4882a593Smuzhiyun 	 * are reachable through this GUID.
1906*4882a593Smuzhiyun 	 */
1907*4882a593Smuzhiyun 	clear_bit(NVDIMM_FAMILY_INTEL, &nd_desc->dimm_family_mask);
1908*4882a593Smuzhiyun 	for (i = 0; i <= NVDIMM_FAMILY_MAX; i++)
1909*4882a593Smuzhiyun 		if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) {
1910*4882a593Smuzhiyun 			set_bit(i, &nd_desc->dimm_family_mask);
1911*4882a593Smuzhiyun 			if (family < 0 || i == default_dsm_family)
1912*4882a593Smuzhiyun 				family = i;
1913*4882a593Smuzhiyun 		}
1914*4882a593Smuzhiyun 
1915*4882a593Smuzhiyun 	/* limit the supported commands to those that are publicly documented */
1916*4882a593Smuzhiyun 	nfit_mem->family = family;
1917*4882a593Smuzhiyun 	if (override_dsm_mask && !disable_vendor_specific)
1918*4882a593Smuzhiyun 		dsm_mask = override_dsm_mask;
1919*4882a593Smuzhiyun 	else if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
1920*4882a593Smuzhiyun 		dsm_mask = NVDIMM_INTEL_CMDMASK;
1921*4882a593Smuzhiyun 		if (disable_vendor_specific)
1922*4882a593Smuzhiyun 			dsm_mask &= ~(1 << ND_CMD_VENDOR);
1923*4882a593Smuzhiyun 	} else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) {
1924*4882a593Smuzhiyun 		dsm_mask = 0x1c3c76;
1925*4882a593Smuzhiyun 	} else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) {
1926*4882a593Smuzhiyun 		dsm_mask = 0x1fe;
1927*4882a593Smuzhiyun 		if (disable_vendor_specific)
1928*4882a593Smuzhiyun 			dsm_mask &= ~(1 << 8);
1929*4882a593Smuzhiyun 	} else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) {
1930*4882a593Smuzhiyun 		dsm_mask = 0xffffffff;
1931*4882a593Smuzhiyun 	} else if (nfit_mem->family == NVDIMM_FAMILY_HYPERV) {
1932*4882a593Smuzhiyun 		dsm_mask = 0x1f;
1933*4882a593Smuzhiyun 	} else {
1934*4882a593Smuzhiyun 		dev_dbg(dev, "unknown dimm command family\n");
1935*4882a593Smuzhiyun 		nfit_mem->family = -1;
1936*4882a593Smuzhiyun 		/* DSMs are optional, continue loading the driver... */
1937*4882a593Smuzhiyun 		return 0;
1938*4882a593Smuzhiyun 	}
1939*4882a593Smuzhiyun 
1940*4882a593Smuzhiyun 	/*
1941*4882a593Smuzhiyun 	 * Function 0 is the command interrogation function, don't
1942*4882a593Smuzhiyun 	 * export it to potential userspace use, and enable it to be
1943*4882a593Smuzhiyun 	 * used as an error value in acpi_nfit_ctl().
1944*4882a593Smuzhiyun 	 */
1945*4882a593Smuzhiyun 	dsm_mask &= ~1UL;
1946*4882a593Smuzhiyun 
1947*4882a593Smuzhiyun 	guid = to_nfit_uuid(nfit_mem->family);
1948*4882a593Smuzhiyun 	for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
1949*4882a593Smuzhiyun 		if (acpi_check_dsm(adev_dimm->handle, guid,
1950*4882a593Smuzhiyun 					nfit_dsm_revid(nfit_mem->family, i),
1951*4882a593Smuzhiyun 					1ULL << i))
1952*4882a593Smuzhiyun 			set_bit(i, &nfit_mem->dsm_mask);
1953*4882a593Smuzhiyun 
1954*4882a593Smuzhiyun 	/*
1955*4882a593Smuzhiyun 	 * Prefer the NVDIMM_FAMILY_INTEL label read commands if present
1956*4882a593Smuzhiyun 	 * due to their better semantics handling locked capacity.
1957*4882a593Smuzhiyun 	 */
1958*4882a593Smuzhiyun 	label_mask = 1 << ND_CMD_GET_CONFIG_SIZE | 1 << ND_CMD_GET_CONFIG_DATA
1959*4882a593Smuzhiyun 		| 1 << ND_CMD_SET_CONFIG_DATA;
1960*4882a593Smuzhiyun 	if (family == NVDIMM_FAMILY_INTEL
1961*4882a593Smuzhiyun 			&& (dsm_mask & label_mask) == label_mask)
1962*4882a593Smuzhiyun 		/* skip _LS{I,R,W} enabling */;
1963*4882a593Smuzhiyun 	else {
1964*4882a593Smuzhiyun 		if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
1965*4882a593Smuzhiyun 				&& acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
1966*4882a593Smuzhiyun 			dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
1967*4882a593Smuzhiyun 			set_bit(NFIT_MEM_LSR, &nfit_mem->flags);
1968*4882a593Smuzhiyun 		}
1969*4882a593Smuzhiyun 
1970*4882a593Smuzhiyun 		if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
1971*4882a593Smuzhiyun 				&& acpi_nvdimm_has_method(adev_dimm, "_LSW")) {
1972*4882a593Smuzhiyun 			dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
1973*4882a593Smuzhiyun 			set_bit(NFIT_MEM_LSW, &nfit_mem->flags);
1974*4882a593Smuzhiyun 		}
1975*4882a593Smuzhiyun 
1976*4882a593Smuzhiyun 		/*
1977*4882a593Smuzhiyun 		 * Quirk read-only label configurations to preserve
1978*4882a593Smuzhiyun 		 * access to label-less namespaces by default.
1979*4882a593Smuzhiyun 		 */
1980*4882a593Smuzhiyun 		if (!test_bit(NFIT_MEM_LSW, &nfit_mem->flags)
1981*4882a593Smuzhiyun 				&& !force_labels) {
1982*4882a593Smuzhiyun 			dev_dbg(dev, "%s: No _LSW, disable labels\n",
1983*4882a593Smuzhiyun 					dev_name(&adev_dimm->dev));
1984*4882a593Smuzhiyun 			clear_bit(NFIT_MEM_LSR, &nfit_mem->flags);
1985*4882a593Smuzhiyun 		} else
1986*4882a593Smuzhiyun 			dev_dbg(dev, "%s: Force enable labels\n",
1987*4882a593Smuzhiyun 					dev_name(&adev_dimm->dev));
1988*4882a593Smuzhiyun 	}
1989*4882a593Smuzhiyun 
1990*4882a593Smuzhiyun 	populate_shutdown_status(nfit_mem);
1991*4882a593Smuzhiyun 
1992*4882a593Smuzhiyun 	return 0;
1993*4882a593Smuzhiyun }
1994*4882a593Smuzhiyun 
shutdown_dimm_notify(void * data)1995*4882a593Smuzhiyun static void shutdown_dimm_notify(void *data)
1996*4882a593Smuzhiyun {
1997*4882a593Smuzhiyun 	struct acpi_nfit_desc *acpi_desc = data;
1998*4882a593Smuzhiyun 	struct nfit_mem *nfit_mem;
1999*4882a593Smuzhiyun 
2000*4882a593Smuzhiyun 	mutex_lock(&acpi_desc->init_mutex);
2001*4882a593Smuzhiyun 	/*
2002*4882a593Smuzhiyun 	 * Clear out the nfit_mem->flags_attr and shut down dimm event
2003*4882a593Smuzhiyun 	 * notifications.
2004*4882a593Smuzhiyun 	 */
2005*4882a593Smuzhiyun 	list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
2006*4882a593Smuzhiyun 		struct acpi_device *adev_dimm = nfit_mem->adev;
2007*4882a593Smuzhiyun 
2008*4882a593Smuzhiyun 		if (nfit_mem->flags_attr) {
2009*4882a593Smuzhiyun 			sysfs_put(nfit_mem->flags_attr);
2010*4882a593Smuzhiyun 			nfit_mem->flags_attr = NULL;
2011*4882a593Smuzhiyun 		}
2012*4882a593Smuzhiyun 		if (adev_dimm) {
2013*4882a593Smuzhiyun 			acpi_remove_notify_handler(adev_dimm->handle,
2014*4882a593Smuzhiyun 					ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify);
2015*4882a593Smuzhiyun 			dev_set_drvdata(&adev_dimm->dev, NULL);
2016*4882a593Smuzhiyun 		}
2017*4882a593Smuzhiyun 	}
2018*4882a593Smuzhiyun 	mutex_unlock(&acpi_desc->init_mutex);
2019*4882a593Smuzhiyun }
2020*4882a593Smuzhiyun 
acpi_nfit_get_security_ops(int family)2021*4882a593Smuzhiyun static const struct nvdimm_security_ops *acpi_nfit_get_security_ops(int family)
2022*4882a593Smuzhiyun {
2023*4882a593Smuzhiyun 	switch (family) {
2024*4882a593Smuzhiyun 	case NVDIMM_FAMILY_INTEL:
2025*4882a593Smuzhiyun 		return intel_security_ops;
2026*4882a593Smuzhiyun 	default:
2027*4882a593Smuzhiyun 		return NULL;
2028*4882a593Smuzhiyun 	}
2029*4882a593Smuzhiyun }
2030*4882a593Smuzhiyun 
acpi_nfit_get_fw_ops(struct nfit_mem * nfit_mem)2031*4882a593Smuzhiyun static const struct nvdimm_fw_ops *acpi_nfit_get_fw_ops(
2032*4882a593Smuzhiyun 		struct nfit_mem *nfit_mem)
2033*4882a593Smuzhiyun {
2034*4882a593Smuzhiyun 	unsigned long mask;
2035*4882a593Smuzhiyun 	struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
2036*4882a593Smuzhiyun 	struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2037*4882a593Smuzhiyun 
2038*4882a593Smuzhiyun 	if (!nd_desc->fw_ops)
2039*4882a593Smuzhiyun 		return NULL;
2040*4882a593Smuzhiyun 
2041*4882a593Smuzhiyun 	if (nfit_mem->family != NVDIMM_FAMILY_INTEL)
2042*4882a593Smuzhiyun 		return NULL;
2043*4882a593Smuzhiyun 
2044*4882a593Smuzhiyun 	mask = nfit_mem->dsm_mask & NVDIMM_INTEL_FW_ACTIVATE_CMDMASK;
2045*4882a593Smuzhiyun 	if (mask != NVDIMM_INTEL_FW_ACTIVATE_CMDMASK)
2046*4882a593Smuzhiyun 		return NULL;
2047*4882a593Smuzhiyun 
2048*4882a593Smuzhiyun 	return intel_fw_ops;
2049*4882a593Smuzhiyun }
2050*4882a593Smuzhiyun 
acpi_nfit_register_dimms(struct acpi_nfit_desc * acpi_desc)2051*4882a593Smuzhiyun static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
2052*4882a593Smuzhiyun {
2053*4882a593Smuzhiyun 	struct nfit_mem *nfit_mem;
2054*4882a593Smuzhiyun 	int dimm_count = 0, rc;
2055*4882a593Smuzhiyun 	struct nvdimm *nvdimm;
2056*4882a593Smuzhiyun 
2057*4882a593Smuzhiyun 	list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
2058*4882a593Smuzhiyun 		struct acpi_nfit_flush_address *flush;
2059*4882a593Smuzhiyun 		unsigned long flags = 0, cmd_mask;
2060*4882a593Smuzhiyun 		struct nfit_memdev *nfit_memdev;
2061*4882a593Smuzhiyun 		u32 device_handle;
2062*4882a593Smuzhiyun 		u16 mem_flags;
2063*4882a593Smuzhiyun 
2064*4882a593Smuzhiyun 		device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
2065*4882a593Smuzhiyun 		nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
2066*4882a593Smuzhiyun 		if (nvdimm) {
2067*4882a593Smuzhiyun 			dimm_count++;
2068*4882a593Smuzhiyun 			continue;
2069*4882a593Smuzhiyun 		}
2070*4882a593Smuzhiyun 
2071*4882a593Smuzhiyun 		if (nfit_mem->bdw && nfit_mem->memdev_pmem) {
2072*4882a593Smuzhiyun 			set_bit(NDD_ALIASING, &flags);
2073*4882a593Smuzhiyun 			set_bit(NDD_LABELING, &flags);
2074*4882a593Smuzhiyun 		}
2075*4882a593Smuzhiyun 
2076*4882a593Smuzhiyun 		/* collate flags across all memdevs for this dimm */
2077*4882a593Smuzhiyun 		list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
2078*4882a593Smuzhiyun 			struct acpi_nfit_memory_map *dimm_memdev;
2079*4882a593Smuzhiyun 
2080*4882a593Smuzhiyun 			dimm_memdev = __to_nfit_memdev(nfit_mem);
2081*4882a593Smuzhiyun 			if (dimm_memdev->device_handle
2082*4882a593Smuzhiyun 					!= nfit_memdev->memdev->device_handle)
2083*4882a593Smuzhiyun 				continue;
2084*4882a593Smuzhiyun 			dimm_memdev->flags |= nfit_memdev->memdev->flags;
2085*4882a593Smuzhiyun 		}
2086*4882a593Smuzhiyun 
2087*4882a593Smuzhiyun 		mem_flags = __to_nfit_memdev(nfit_mem)->flags;
2088*4882a593Smuzhiyun 		if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED)
2089*4882a593Smuzhiyun 			set_bit(NDD_UNARMED, &flags);
2090*4882a593Smuzhiyun 
2091*4882a593Smuzhiyun 		rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
2092*4882a593Smuzhiyun 		if (rc)
2093*4882a593Smuzhiyun 			continue;
2094*4882a593Smuzhiyun 
2095*4882a593Smuzhiyun 		/*
2096*4882a593Smuzhiyun 		 * TODO: provide translation for non-NVDIMM_FAMILY_INTEL
2097*4882a593Smuzhiyun 		 * devices (i.e. from nd_cmd to acpi_dsm) to standardize the
2098*4882a593Smuzhiyun 		 * userspace interface.
2099*4882a593Smuzhiyun 		 */
2100*4882a593Smuzhiyun 		cmd_mask = 1UL << ND_CMD_CALL;
2101*4882a593Smuzhiyun 		if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
2102*4882a593Smuzhiyun 			/*
2103*4882a593Smuzhiyun 			 * These commands have a 1:1 correspondence
2104*4882a593Smuzhiyun 			 * between DSM payload and libnvdimm ioctl
2105*4882a593Smuzhiyun 			 * payload format.
2106*4882a593Smuzhiyun 			 */
2107*4882a593Smuzhiyun 			cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK;
2108*4882a593Smuzhiyun 		}
2109*4882a593Smuzhiyun 
2110*4882a593Smuzhiyun 		/* Quirk to ignore LOCAL for labels on HYPERV DIMMs */
2111*4882a593Smuzhiyun 		if (nfit_mem->family == NVDIMM_FAMILY_HYPERV)
2112*4882a593Smuzhiyun 			set_bit(NDD_NOBLK, &flags);
2113*4882a593Smuzhiyun 
2114*4882a593Smuzhiyun 		if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) {
2115*4882a593Smuzhiyun 			set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
2116*4882a593Smuzhiyun 			set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
2117*4882a593Smuzhiyun 		}
2118*4882a593Smuzhiyun 		if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags))
2119*4882a593Smuzhiyun 			set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
2120*4882a593Smuzhiyun 
2121*4882a593Smuzhiyun 		flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush
2122*4882a593Smuzhiyun 			: NULL;
2123*4882a593Smuzhiyun 		nvdimm = __nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
2124*4882a593Smuzhiyun 				acpi_nfit_dimm_attribute_groups,
2125*4882a593Smuzhiyun 				flags, cmd_mask, flush ? flush->hint_count : 0,
2126*4882a593Smuzhiyun 				nfit_mem->flush_wpq, &nfit_mem->id[0],
2127*4882a593Smuzhiyun 				acpi_nfit_get_security_ops(nfit_mem->family),
2128*4882a593Smuzhiyun 				acpi_nfit_get_fw_ops(nfit_mem));
2129*4882a593Smuzhiyun 		if (!nvdimm)
2130*4882a593Smuzhiyun 			return -ENOMEM;
2131*4882a593Smuzhiyun 
2132*4882a593Smuzhiyun 		nfit_mem->nvdimm = nvdimm;
2133*4882a593Smuzhiyun 		dimm_count++;
2134*4882a593Smuzhiyun 
2135*4882a593Smuzhiyun 		if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
2136*4882a593Smuzhiyun 			continue;
2137*4882a593Smuzhiyun 
2138*4882a593Smuzhiyun 		dev_err(acpi_desc->dev, "Error found in NVDIMM %s flags:%s%s%s%s%s\n",
2139*4882a593Smuzhiyun 				nvdimm_name(nvdimm),
2140*4882a593Smuzhiyun 		  mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
2141*4882a593Smuzhiyun 		  mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
2142*4882a593Smuzhiyun 		  mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
2143*4882a593Smuzhiyun 		  mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "",
2144*4882a593Smuzhiyun 		  mem_flags & ACPI_NFIT_MEM_MAP_FAILED ? " map_fail" : "");
2145*4882a593Smuzhiyun 
2146*4882a593Smuzhiyun 	}
2147*4882a593Smuzhiyun 
2148*4882a593Smuzhiyun 	rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
2149*4882a593Smuzhiyun 	if (rc)
2150*4882a593Smuzhiyun 		return rc;
2151*4882a593Smuzhiyun 
2152*4882a593Smuzhiyun 	/*
2153*4882a593Smuzhiyun 	 * Now that dimms are successfully registered, and async registration
2154*4882a593Smuzhiyun 	 * is flushed, attempt to enable event notification.
2155*4882a593Smuzhiyun 	 */
2156*4882a593Smuzhiyun 	list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
2157*4882a593Smuzhiyun 		struct kernfs_node *nfit_kernfs;
2158*4882a593Smuzhiyun 
2159*4882a593Smuzhiyun 		nvdimm = nfit_mem->nvdimm;
2160*4882a593Smuzhiyun 		if (!nvdimm)
2161*4882a593Smuzhiyun 			continue;
2162*4882a593Smuzhiyun 
2163*4882a593Smuzhiyun 		nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit");
2164*4882a593Smuzhiyun 		if (nfit_kernfs)
2165*4882a593Smuzhiyun 			nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs,
2166*4882a593Smuzhiyun 					"flags");
2167*4882a593Smuzhiyun 		sysfs_put(nfit_kernfs);
2168*4882a593Smuzhiyun 		if (!nfit_mem->flags_attr)
2169*4882a593Smuzhiyun 			dev_warn(acpi_desc->dev, "%s: notifications disabled\n",
2170*4882a593Smuzhiyun 					nvdimm_name(nvdimm));
2171*4882a593Smuzhiyun 	}
2172*4882a593Smuzhiyun 
2173*4882a593Smuzhiyun 	return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify,
2174*4882a593Smuzhiyun 			acpi_desc);
2175*4882a593Smuzhiyun }
2176*4882a593Smuzhiyun 
2177*4882a593Smuzhiyun /*
2178*4882a593Smuzhiyun  * These constants are private because there are no kernel consumers of
2179*4882a593Smuzhiyun  * these commands.
2180*4882a593Smuzhiyun  */
2181*4882a593Smuzhiyun enum nfit_aux_cmds {
2182*4882a593Smuzhiyun 	NFIT_CMD_TRANSLATE_SPA = 5,
2183*4882a593Smuzhiyun 	NFIT_CMD_ARS_INJECT_SET = 7,
2184*4882a593Smuzhiyun 	NFIT_CMD_ARS_INJECT_CLEAR = 8,
2185*4882a593Smuzhiyun 	NFIT_CMD_ARS_INJECT_GET = 9,
2186*4882a593Smuzhiyun };
2187*4882a593Smuzhiyun 
acpi_nfit_init_dsms(struct acpi_nfit_desc * acpi_desc)2188*4882a593Smuzhiyun static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
2189*4882a593Smuzhiyun {
2190*4882a593Smuzhiyun 	struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2191*4882a593Smuzhiyun 	const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS);
2192*4882a593Smuzhiyun 	unsigned long dsm_mask, *mask;
2193*4882a593Smuzhiyun 	struct acpi_device *adev;
2194*4882a593Smuzhiyun 	int i;
2195*4882a593Smuzhiyun 
2196*4882a593Smuzhiyun 	set_bit(ND_CMD_CALL, &nd_desc->cmd_mask);
2197*4882a593Smuzhiyun 	set_bit(NVDIMM_BUS_FAMILY_NFIT, &nd_desc->bus_family_mask);
2198*4882a593Smuzhiyun 
2199*4882a593Smuzhiyun 	/* enable nfit_test to inject bus command emulation */
2200*4882a593Smuzhiyun 	if (acpi_desc->bus_cmd_force_en) {
2201*4882a593Smuzhiyun 		nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
2202*4882a593Smuzhiyun 		mask = &nd_desc->bus_family_mask;
2203*4882a593Smuzhiyun 		if (acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]) {
2204*4882a593Smuzhiyun 			set_bit(NVDIMM_BUS_FAMILY_INTEL, mask);
2205*4882a593Smuzhiyun 			nd_desc->fw_ops = intel_bus_fw_ops;
2206*4882a593Smuzhiyun 		}
2207*4882a593Smuzhiyun 	}
2208*4882a593Smuzhiyun 
2209*4882a593Smuzhiyun 	adev = to_acpi_dev(acpi_desc);
2210*4882a593Smuzhiyun 	if (!adev)
2211*4882a593Smuzhiyun 		return;
2212*4882a593Smuzhiyun 
2213*4882a593Smuzhiyun 	for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
2214*4882a593Smuzhiyun 		if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
2215*4882a593Smuzhiyun 			set_bit(i, &nd_desc->cmd_mask);
2216*4882a593Smuzhiyun 
2217*4882a593Smuzhiyun 	dsm_mask =
2218*4882a593Smuzhiyun 		(1 << ND_CMD_ARS_CAP) |
2219*4882a593Smuzhiyun 		(1 << ND_CMD_ARS_START) |
2220*4882a593Smuzhiyun 		(1 << ND_CMD_ARS_STATUS) |
2221*4882a593Smuzhiyun 		(1 << ND_CMD_CLEAR_ERROR) |
2222*4882a593Smuzhiyun 		(1 << NFIT_CMD_TRANSLATE_SPA) |
2223*4882a593Smuzhiyun 		(1 << NFIT_CMD_ARS_INJECT_SET) |
2224*4882a593Smuzhiyun 		(1 << NFIT_CMD_ARS_INJECT_CLEAR) |
2225*4882a593Smuzhiyun 		(1 << NFIT_CMD_ARS_INJECT_GET);
2226*4882a593Smuzhiyun 	for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
2227*4882a593Smuzhiyun 		if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
2228*4882a593Smuzhiyun 			set_bit(i, &acpi_desc->bus_dsm_mask);
2229*4882a593Smuzhiyun 
2230*4882a593Smuzhiyun 	/* Enumerate allowed NVDIMM_BUS_FAMILY_INTEL commands */
2231*4882a593Smuzhiyun 	dsm_mask = NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK;
2232*4882a593Smuzhiyun 	guid = to_nfit_bus_uuid(NVDIMM_BUS_FAMILY_INTEL);
2233*4882a593Smuzhiyun 	mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL];
2234*4882a593Smuzhiyun 	for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
2235*4882a593Smuzhiyun 		if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
2236*4882a593Smuzhiyun 			set_bit(i, mask);
2237*4882a593Smuzhiyun 
2238*4882a593Smuzhiyun 	if (*mask == dsm_mask) {
2239*4882a593Smuzhiyun 		set_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask);
2240*4882a593Smuzhiyun 		nd_desc->fw_ops = intel_bus_fw_ops;
2241*4882a593Smuzhiyun 	}
2242*4882a593Smuzhiyun }
2243*4882a593Smuzhiyun 
range_index_show(struct device * dev,struct device_attribute * attr,char * buf)2244*4882a593Smuzhiyun static ssize_t range_index_show(struct device *dev,
2245*4882a593Smuzhiyun 		struct device_attribute *attr, char *buf)
2246*4882a593Smuzhiyun {
2247*4882a593Smuzhiyun 	struct nd_region *nd_region = to_nd_region(dev);
2248*4882a593Smuzhiyun 	struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
2249*4882a593Smuzhiyun 
2250*4882a593Smuzhiyun 	return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
2251*4882a593Smuzhiyun }
2252*4882a593Smuzhiyun static DEVICE_ATTR_RO(range_index);
2253*4882a593Smuzhiyun 
2254*4882a593Smuzhiyun static struct attribute *acpi_nfit_region_attributes[] = {
2255*4882a593Smuzhiyun 	&dev_attr_range_index.attr,
2256*4882a593Smuzhiyun 	NULL,
2257*4882a593Smuzhiyun };
2258*4882a593Smuzhiyun 
2259*4882a593Smuzhiyun static const struct attribute_group acpi_nfit_region_attribute_group = {
2260*4882a593Smuzhiyun 	.name = "nfit",
2261*4882a593Smuzhiyun 	.attrs = acpi_nfit_region_attributes,
2262*4882a593Smuzhiyun };
2263*4882a593Smuzhiyun 
2264*4882a593Smuzhiyun static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
2265*4882a593Smuzhiyun 	&acpi_nfit_region_attribute_group,
2266*4882a593Smuzhiyun 	NULL,
2267*4882a593Smuzhiyun };
2268*4882a593Smuzhiyun 
2269*4882a593Smuzhiyun /* enough info to uniquely specify an interleave set */
2270*4882a593Smuzhiyun struct nfit_set_info {
2271*4882a593Smuzhiyun 	struct nfit_set_info_map {
2272*4882a593Smuzhiyun 		u64 region_offset;
2273*4882a593Smuzhiyun 		u32 serial_number;
2274*4882a593Smuzhiyun 		u32 pad;
2275*4882a593Smuzhiyun 	} mapping[0];
2276*4882a593Smuzhiyun };
2277*4882a593Smuzhiyun 
2278*4882a593Smuzhiyun struct nfit_set_info2 {
2279*4882a593Smuzhiyun 	struct nfit_set_info_map2 {
2280*4882a593Smuzhiyun 		u64 region_offset;
2281*4882a593Smuzhiyun 		u32 serial_number;
2282*4882a593Smuzhiyun 		u16 vendor_id;
2283*4882a593Smuzhiyun 		u16 manufacturing_date;
2284*4882a593Smuzhiyun 		u8  manufacturing_location;
2285*4882a593Smuzhiyun 		u8  reserved[31];
2286*4882a593Smuzhiyun 	} mapping[0];
2287*4882a593Smuzhiyun };
2288*4882a593Smuzhiyun 
sizeof_nfit_set_info(int num_mappings)2289*4882a593Smuzhiyun static size_t sizeof_nfit_set_info(int num_mappings)
2290*4882a593Smuzhiyun {
2291*4882a593Smuzhiyun 	return sizeof(struct nfit_set_info)
2292*4882a593Smuzhiyun 		+ num_mappings * sizeof(struct nfit_set_info_map);
2293*4882a593Smuzhiyun }
2294*4882a593Smuzhiyun 
sizeof_nfit_set_info2(int num_mappings)2295*4882a593Smuzhiyun static size_t sizeof_nfit_set_info2(int num_mappings)
2296*4882a593Smuzhiyun {
2297*4882a593Smuzhiyun 	return sizeof(struct nfit_set_info2)
2298*4882a593Smuzhiyun 		+ num_mappings * sizeof(struct nfit_set_info_map2);
2299*4882a593Smuzhiyun }
2300*4882a593Smuzhiyun 
cmp_map_compat(const void * m0,const void * m1)2301*4882a593Smuzhiyun static int cmp_map_compat(const void *m0, const void *m1)
2302*4882a593Smuzhiyun {
2303*4882a593Smuzhiyun 	const struct nfit_set_info_map *map0 = m0;
2304*4882a593Smuzhiyun 	const struct nfit_set_info_map *map1 = m1;
2305*4882a593Smuzhiyun 
2306*4882a593Smuzhiyun 	return memcmp(&map0->region_offset, &map1->region_offset,
2307*4882a593Smuzhiyun 			sizeof(u64));
2308*4882a593Smuzhiyun }
2309*4882a593Smuzhiyun 
cmp_map(const void * m0,const void * m1)2310*4882a593Smuzhiyun static int cmp_map(const void *m0, const void *m1)
2311*4882a593Smuzhiyun {
2312*4882a593Smuzhiyun 	const struct nfit_set_info_map *map0 = m0;
2313*4882a593Smuzhiyun 	const struct nfit_set_info_map *map1 = m1;
2314*4882a593Smuzhiyun 
2315*4882a593Smuzhiyun 	if (map0->region_offset < map1->region_offset)
2316*4882a593Smuzhiyun 		return -1;
2317*4882a593Smuzhiyun 	else if (map0->region_offset > map1->region_offset)
2318*4882a593Smuzhiyun 		return 1;
2319*4882a593Smuzhiyun 	return 0;
2320*4882a593Smuzhiyun }
2321*4882a593Smuzhiyun 
cmp_map2(const void * m0,const void * m1)2322*4882a593Smuzhiyun static int cmp_map2(const void *m0, const void *m1)
2323*4882a593Smuzhiyun {
2324*4882a593Smuzhiyun 	const struct nfit_set_info_map2 *map0 = m0;
2325*4882a593Smuzhiyun 	const struct nfit_set_info_map2 *map1 = m1;
2326*4882a593Smuzhiyun 
2327*4882a593Smuzhiyun 	if (map0->region_offset < map1->region_offset)
2328*4882a593Smuzhiyun 		return -1;
2329*4882a593Smuzhiyun 	else if (map0->region_offset > map1->region_offset)
2330*4882a593Smuzhiyun 		return 1;
2331*4882a593Smuzhiyun 	return 0;
2332*4882a593Smuzhiyun }
2333*4882a593Smuzhiyun 
2334*4882a593Smuzhiyun /* Retrieve the nth entry referencing this spa */
memdev_from_spa(struct acpi_nfit_desc * acpi_desc,u16 range_index,int n)2335*4882a593Smuzhiyun static struct acpi_nfit_memory_map *memdev_from_spa(
2336*4882a593Smuzhiyun 		struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
2337*4882a593Smuzhiyun {
2338*4882a593Smuzhiyun 	struct nfit_memdev *nfit_memdev;
2339*4882a593Smuzhiyun 
2340*4882a593Smuzhiyun 	list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list)
2341*4882a593Smuzhiyun 		if (nfit_memdev->memdev->range_index == range_index)
2342*4882a593Smuzhiyun 			if (n-- == 0)
2343*4882a593Smuzhiyun 				return nfit_memdev->memdev;
2344*4882a593Smuzhiyun 	return NULL;
2345*4882a593Smuzhiyun }
2346*4882a593Smuzhiyun 
acpi_nfit_init_interleave_set(struct acpi_nfit_desc * acpi_desc,struct nd_region_desc * ndr_desc,struct acpi_nfit_system_address * spa)2347*4882a593Smuzhiyun static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
2348*4882a593Smuzhiyun 		struct nd_region_desc *ndr_desc,
2349*4882a593Smuzhiyun 		struct acpi_nfit_system_address *spa)
2350*4882a593Smuzhiyun {
2351*4882a593Smuzhiyun 	struct device *dev = acpi_desc->dev;
2352*4882a593Smuzhiyun 	struct nd_interleave_set *nd_set;
2353*4882a593Smuzhiyun 	u16 nr = ndr_desc->num_mappings;
2354*4882a593Smuzhiyun 	struct nfit_set_info2 *info2;
2355*4882a593Smuzhiyun 	struct nfit_set_info *info;
2356*4882a593Smuzhiyun 	int i;
2357*4882a593Smuzhiyun 
2358*4882a593Smuzhiyun 	nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
2359*4882a593Smuzhiyun 	if (!nd_set)
2360*4882a593Smuzhiyun 		return -ENOMEM;
2361*4882a593Smuzhiyun 	import_guid(&nd_set->type_guid, spa->range_guid);
2362*4882a593Smuzhiyun 
2363*4882a593Smuzhiyun 	info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
2364*4882a593Smuzhiyun 	if (!info)
2365*4882a593Smuzhiyun 		return -ENOMEM;
2366*4882a593Smuzhiyun 
2367*4882a593Smuzhiyun 	info2 = devm_kzalloc(dev, sizeof_nfit_set_info2(nr), GFP_KERNEL);
2368*4882a593Smuzhiyun 	if (!info2)
2369*4882a593Smuzhiyun 		return -ENOMEM;
2370*4882a593Smuzhiyun 
2371*4882a593Smuzhiyun 	for (i = 0; i < nr; i++) {
2372*4882a593Smuzhiyun 		struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
2373*4882a593Smuzhiyun 		struct nfit_set_info_map *map = &info->mapping[i];
2374*4882a593Smuzhiyun 		struct nfit_set_info_map2 *map2 = &info2->mapping[i];
2375*4882a593Smuzhiyun 		struct nvdimm *nvdimm = mapping->nvdimm;
2376*4882a593Smuzhiyun 		struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
2377*4882a593Smuzhiyun 		struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
2378*4882a593Smuzhiyun 				spa->range_index, i);
2379*4882a593Smuzhiyun 		struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
2380*4882a593Smuzhiyun 
2381*4882a593Smuzhiyun 		if (!memdev || !nfit_mem->dcr) {
2382*4882a593Smuzhiyun 			dev_err(dev, "%s: failed to find DCR\n", __func__);
2383*4882a593Smuzhiyun 			return -ENODEV;
2384*4882a593Smuzhiyun 		}
2385*4882a593Smuzhiyun 
2386*4882a593Smuzhiyun 		map->region_offset = memdev->region_offset;
2387*4882a593Smuzhiyun 		map->serial_number = dcr->serial_number;
2388*4882a593Smuzhiyun 
2389*4882a593Smuzhiyun 		map2->region_offset = memdev->region_offset;
2390*4882a593Smuzhiyun 		map2->serial_number = dcr->serial_number;
2391*4882a593Smuzhiyun 		map2->vendor_id = dcr->vendor_id;
2392*4882a593Smuzhiyun 		map2->manufacturing_date = dcr->manufacturing_date;
2393*4882a593Smuzhiyun 		map2->manufacturing_location = dcr->manufacturing_location;
2394*4882a593Smuzhiyun 	}
2395*4882a593Smuzhiyun 
2396*4882a593Smuzhiyun 	/* v1.1 namespaces */
2397*4882a593Smuzhiyun 	sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
2398*4882a593Smuzhiyun 			cmp_map, NULL);
2399*4882a593Smuzhiyun 	nd_set->cookie1 = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
2400*4882a593Smuzhiyun 
2401*4882a593Smuzhiyun 	/* v1.2 namespaces */
2402*4882a593Smuzhiyun 	sort(&info2->mapping[0], nr, sizeof(struct nfit_set_info_map2),
2403*4882a593Smuzhiyun 			cmp_map2, NULL);
2404*4882a593Smuzhiyun 	nd_set->cookie2 = nd_fletcher64(info2, sizeof_nfit_set_info2(nr), 0);
2405*4882a593Smuzhiyun 
2406*4882a593Smuzhiyun 	/* support v1.1 namespaces created with the wrong sort order */
2407*4882a593Smuzhiyun 	sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
2408*4882a593Smuzhiyun 			cmp_map_compat, NULL);
2409*4882a593Smuzhiyun 	nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
2410*4882a593Smuzhiyun 
2411*4882a593Smuzhiyun 	/* record the result of the sort for the mapping position */
2412*4882a593Smuzhiyun 	for (i = 0; i < nr; i++) {
2413*4882a593Smuzhiyun 		struct nfit_set_info_map2 *map2 = &info2->mapping[i];
2414*4882a593Smuzhiyun 		int j;
2415*4882a593Smuzhiyun 
2416*4882a593Smuzhiyun 		for (j = 0; j < nr; j++) {
2417*4882a593Smuzhiyun 			struct nd_mapping_desc *mapping = &ndr_desc->mapping[j];
2418*4882a593Smuzhiyun 			struct nvdimm *nvdimm = mapping->nvdimm;
2419*4882a593Smuzhiyun 			struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
2420*4882a593Smuzhiyun 			struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
2421*4882a593Smuzhiyun 
2422*4882a593Smuzhiyun 			if (map2->serial_number == dcr->serial_number &&
2423*4882a593Smuzhiyun 			    map2->vendor_id == dcr->vendor_id &&
2424*4882a593Smuzhiyun 			    map2->manufacturing_date == dcr->manufacturing_date &&
2425*4882a593Smuzhiyun 			    map2->manufacturing_location
2426*4882a593Smuzhiyun 				    == dcr->manufacturing_location) {
2427*4882a593Smuzhiyun 				mapping->position = i;
2428*4882a593Smuzhiyun 				break;
2429*4882a593Smuzhiyun 			}
2430*4882a593Smuzhiyun 		}
2431*4882a593Smuzhiyun 	}
2432*4882a593Smuzhiyun 
2433*4882a593Smuzhiyun 	ndr_desc->nd_set = nd_set;
2434*4882a593Smuzhiyun 	devm_kfree(dev, info);
2435*4882a593Smuzhiyun 	devm_kfree(dev, info2);
2436*4882a593Smuzhiyun 
2437*4882a593Smuzhiyun 	return 0;
2438*4882a593Smuzhiyun }
2439*4882a593Smuzhiyun 
to_interleave_offset(u64 offset,struct nfit_blk_mmio * mmio)2440*4882a593Smuzhiyun static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
2441*4882a593Smuzhiyun {
2442*4882a593Smuzhiyun 	struct acpi_nfit_interleave *idt = mmio->idt;
2443*4882a593Smuzhiyun 	u32 sub_line_offset, line_index, line_offset;
2444*4882a593Smuzhiyun 	u64 line_no, table_skip_count, table_offset;
2445*4882a593Smuzhiyun 
2446*4882a593Smuzhiyun 	line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset);
2447*4882a593Smuzhiyun 	table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index);
2448*4882a593Smuzhiyun 	line_offset = idt->line_offset[line_index]
2449*4882a593Smuzhiyun 		* mmio->line_size;
2450*4882a593Smuzhiyun 	table_offset = table_skip_count * mmio->table_size;
2451*4882a593Smuzhiyun 
2452*4882a593Smuzhiyun 	return mmio->base_offset + line_offset + table_offset + sub_line_offset;
2453*4882a593Smuzhiyun }
2454*4882a593Smuzhiyun 
read_blk_stat(struct nfit_blk * nfit_blk,unsigned int bw)2455*4882a593Smuzhiyun static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
2456*4882a593Smuzhiyun {
2457*4882a593Smuzhiyun 	struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
2458*4882a593Smuzhiyun 	u64 offset = nfit_blk->stat_offset + mmio->size * bw;
2459*4882a593Smuzhiyun 	const u32 STATUS_MASK = 0x80000037;
2460*4882a593Smuzhiyun 
2461*4882a593Smuzhiyun 	if (mmio->num_lines)
2462*4882a593Smuzhiyun 		offset = to_interleave_offset(offset, mmio);
2463*4882a593Smuzhiyun 
2464*4882a593Smuzhiyun 	return readl(mmio->addr.base + offset) & STATUS_MASK;
2465*4882a593Smuzhiyun }
2466*4882a593Smuzhiyun 
write_blk_ctl(struct nfit_blk * nfit_blk,unsigned int bw,resource_size_t dpa,unsigned int len,unsigned int write)2467*4882a593Smuzhiyun static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
2468*4882a593Smuzhiyun 		resource_size_t dpa, unsigned int len, unsigned int write)
2469*4882a593Smuzhiyun {
2470*4882a593Smuzhiyun 	u64 cmd, offset;
2471*4882a593Smuzhiyun 	struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
2472*4882a593Smuzhiyun 
2473*4882a593Smuzhiyun 	enum {
2474*4882a593Smuzhiyun 		BCW_OFFSET_MASK = (1ULL << 48)-1,
2475*4882a593Smuzhiyun 		BCW_LEN_SHIFT = 48,
2476*4882a593Smuzhiyun 		BCW_LEN_MASK = (1ULL << 8) - 1,
2477*4882a593Smuzhiyun 		BCW_CMD_SHIFT = 56,
2478*4882a593Smuzhiyun 	};
2479*4882a593Smuzhiyun 
2480*4882a593Smuzhiyun 	cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK;
2481*4882a593Smuzhiyun 	len = len >> L1_CACHE_SHIFT;
2482*4882a593Smuzhiyun 	cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT;
2483*4882a593Smuzhiyun 	cmd |= ((u64) write) << BCW_CMD_SHIFT;
2484*4882a593Smuzhiyun 
2485*4882a593Smuzhiyun 	offset = nfit_blk->cmd_offset + mmio->size * bw;
2486*4882a593Smuzhiyun 	if (mmio->num_lines)
2487*4882a593Smuzhiyun 		offset = to_interleave_offset(offset, mmio);
2488*4882a593Smuzhiyun 
2489*4882a593Smuzhiyun 	writeq(cmd, mmio->addr.base + offset);
2490*4882a593Smuzhiyun 	nvdimm_flush(nfit_blk->nd_region, NULL);
2491*4882a593Smuzhiyun 
2492*4882a593Smuzhiyun 	if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
2493*4882a593Smuzhiyun 		readq(mmio->addr.base + offset);
2494*4882a593Smuzhiyun }
2495*4882a593Smuzhiyun 
acpi_nfit_blk_single_io(struct nfit_blk * nfit_blk,resource_size_t dpa,void * iobuf,size_t len,int rw,unsigned int lane)2496*4882a593Smuzhiyun static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
2497*4882a593Smuzhiyun 		resource_size_t dpa, void *iobuf, size_t len, int rw,
2498*4882a593Smuzhiyun 		unsigned int lane)
2499*4882a593Smuzhiyun {
2500*4882a593Smuzhiyun 	struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
2501*4882a593Smuzhiyun 	unsigned int copied = 0;
2502*4882a593Smuzhiyun 	u64 base_offset;
2503*4882a593Smuzhiyun 	int rc;
2504*4882a593Smuzhiyun 
2505*4882a593Smuzhiyun 	base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
2506*4882a593Smuzhiyun 		+ lane * mmio->size;
2507*4882a593Smuzhiyun 	write_blk_ctl(nfit_blk, lane, dpa, len, rw);
2508*4882a593Smuzhiyun 	while (len) {
2509*4882a593Smuzhiyun 		unsigned int c;
2510*4882a593Smuzhiyun 		u64 offset;
2511*4882a593Smuzhiyun 
2512*4882a593Smuzhiyun 		if (mmio->num_lines) {
2513*4882a593Smuzhiyun 			u32 line_offset;
2514*4882a593Smuzhiyun 
2515*4882a593Smuzhiyun 			offset = to_interleave_offset(base_offset + copied,
2516*4882a593Smuzhiyun 					mmio);
2517*4882a593Smuzhiyun 			div_u64_rem(offset, mmio->line_size, &line_offset);
2518*4882a593Smuzhiyun 			c = min_t(size_t, len, mmio->line_size - line_offset);
2519*4882a593Smuzhiyun 		} else {
2520*4882a593Smuzhiyun 			offset = base_offset + nfit_blk->bdw_offset;
2521*4882a593Smuzhiyun 			c = len;
2522*4882a593Smuzhiyun 		}
2523*4882a593Smuzhiyun 
2524*4882a593Smuzhiyun 		if (rw)
2525*4882a593Smuzhiyun 			memcpy_flushcache(mmio->addr.aperture + offset, iobuf + copied, c);
2526*4882a593Smuzhiyun 		else {
2527*4882a593Smuzhiyun 			if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
2528*4882a593Smuzhiyun 				arch_invalidate_pmem((void __force *)
2529*4882a593Smuzhiyun 					mmio->addr.aperture + offset, c);
2530*4882a593Smuzhiyun 
2531*4882a593Smuzhiyun 			memcpy(iobuf + copied, mmio->addr.aperture + offset, c);
2532*4882a593Smuzhiyun 		}
2533*4882a593Smuzhiyun 
2534*4882a593Smuzhiyun 		copied += c;
2535*4882a593Smuzhiyun 		len -= c;
2536*4882a593Smuzhiyun 	}
2537*4882a593Smuzhiyun 
2538*4882a593Smuzhiyun 	if (rw)
2539*4882a593Smuzhiyun 		nvdimm_flush(nfit_blk->nd_region, NULL);
2540*4882a593Smuzhiyun 
2541*4882a593Smuzhiyun 	rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
2542*4882a593Smuzhiyun 	return rc;
2543*4882a593Smuzhiyun }
2544*4882a593Smuzhiyun 
acpi_nfit_blk_region_do_io(struct nd_blk_region * ndbr,resource_size_t dpa,void * iobuf,u64 len,int rw)2545*4882a593Smuzhiyun static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
2546*4882a593Smuzhiyun 		resource_size_t dpa, void *iobuf, u64 len, int rw)
2547*4882a593Smuzhiyun {
2548*4882a593Smuzhiyun 	struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
2549*4882a593Smuzhiyun 	struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
2550*4882a593Smuzhiyun 	struct nd_region *nd_region = nfit_blk->nd_region;
2551*4882a593Smuzhiyun 	unsigned int lane, copied = 0;
2552*4882a593Smuzhiyun 	int rc = 0;
2553*4882a593Smuzhiyun 
2554*4882a593Smuzhiyun 	lane = nd_region_acquire_lane(nd_region);
2555*4882a593Smuzhiyun 	while (len) {
2556*4882a593Smuzhiyun 		u64 c = min(len, mmio->size);
2557*4882a593Smuzhiyun 
2558*4882a593Smuzhiyun 		rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied,
2559*4882a593Smuzhiyun 				iobuf + copied, c, rw, lane);
2560*4882a593Smuzhiyun 		if (rc)
2561*4882a593Smuzhiyun 			break;
2562*4882a593Smuzhiyun 
2563*4882a593Smuzhiyun 		copied += c;
2564*4882a593Smuzhiyun 		len -= c;
2565*4882a593Smuzhiyun 	}
2566*4882a593Smuzhiyun 	nd_region_release_lane(nd_region, lane);
2567*4882a593Smuzhiyun 
2568*4882a593Smuzhiyun 	return rc;
2569*4882a593Smuzhiyun }
2570*4882a593Smuzhiyun 
nfit_blk_init_interleave(struct nfit_blk_mmio * mmio,struct acpi_nfit_interleave * idt,u16 interleave_ways)2571*4882a593Smuzhiyun static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
2572*4882a593Smuzhiyun 		struct acpi_nfit_interleave *idt, u16 interleave_ways)
2573*4882a593Smuzhiyun {
2574*4882a593Smuzhiyun 	if (idt) {
2575*4882a593Smuzhiyun 		mmio->num_lines = idt->line_count;
2576*4882a593Smuzhiyun 		mmio->line_size = idt->line_size;
2577*4882a593Smuzhiyun 		if (interleave_ways == 0)
2578*4882a593Smuzhiyun 			return -ENXIO;
2579*4882a593Smuzhiyun 		mmio->table_size = mmio->num_lines * interleave_ways
2580*4882a593Smuzhiyun 			* mmio->line_size;
2581*4882a593Smuzhiyun 	}
2582*4882a593Smuzhiyun 
2583*4882a593Smuzhiyun 	return 0;
2584*4882a593Smuzhiyun }
2585*4882a593Smuzhiyun 
acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor * nd_desc,struct nvdimm * nvdimm,struct nfit_blk * nfit_blk)2586*4882a593Smuzhiyun static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
2587*4882a593Smuzhiyun 		struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
2588*4882a593Smuzhiyun {
2589*4882a593Smuzhiyun 	struct nd_cmd_dimm_flags flags;
2590*4882a593Smuzhiyun 	int rc;
2591*4882a593Smuzhiyun 
2592*4882a593Smuzhiyun 	memset(&flags, 0, sizeof(flags));
2593*4882a593Smuzhiyun 	rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
2594*4882a593Smuzhiyun 			sizeof(flags), NULL);
2595*4882a593Smuzhiyun 
2596*4882a593Smuzhiyun 	if (rc >= 0 && flags.status == 0)
2597*4882a593Smuzhiyun 		nfit_blk->dimm_flags = flags.flags;
2598*4882a593Smuzhiyun 	else if (rc == -ENOTTY) {
2599*4882a593Smuzhiyun 		/* fall back to a conservative default */
2600*4882a593Smuzhiyun 		nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH;
2601*4882a593Smuzhiyun 		rc = 0;
2602*4882a593Smuzhiyun 	} else
2603*4882a593Smuzhiyun 		rc = -ENXIO;
2604*4882a593Smuzhiyun 
2605*4882a593Smuzhiyun 	return rc;
2606*4882a593Smuzhiyun }
2607*4882a593Smuzhiyun 
acpi_nfit_blk_region_enable(struct nvdimm_bus * nvdimm_bus,struct device * dev)2608*4882a593Smuzhiyun static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
2609*4882a593Smuzhiyun 		struct device *dev)
2610*4882a593Smuzhiyun {
2611*4882a593Smuzhiyun 	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
2612*4882a593Smuzhiyun 	struct nd_blk_region *ndbr = to_nd_blk_region(dev);
2613*4882a593Smuzhiyun 	struct nfit_blk_mmio *mmio;
2614*4882a593Smuzhiyun 	struct nfit_blk *nfit_blk;
2615*4882a593Smuzhiyun 	struct nfit_mem *nfit_mem;
2616*4882a593Smuzhiyun 	struct nvdimm *nvdimm;
2617*4882a593Smuzhiyun 	int rc;
2618*4882a593Smuzhiyun 
2619*4882a593Smuzhiyun 	nvdimm = nd_blk_region_to_dimm(ndbr);
2620*4882a593Smuzhiyun 	nfit_mem = nvdimm_provider_data(nvdimm);
2621*4882a593Smuzhiyun 	if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
2622*4882a593Smuzhiyun 		dev_dbg(dev, "missing%s%s%s\n",
2623*4882a593Smuzhiyun 				nfit_mem ? "" : " nfit_mem",
2624*4882a593Smuzhiyun 				(nfit_mem && nfit_mem->dcr) ? "" : " dcr",
2625*4882a593Smuzhiyun 				(nfit_mem && nfit_mem->bdw) ? "" : " bdw");
2626*4882a593Smuzhiyun 		return -ENXIO;
2627*4882a593Smuzhiyun 	}
2628*4882a593Smuzhiyun 
2629*4882a593Smuzhiyun 	nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL);
2630*4882a593Smuzhiyun 	if (!nfit_blk)
2631*4882a593Smuzhiyun 		return -ENOMEM;
2632*4882a593Smuzhiyun 	nd_blk_region_set_provider_data(ndbr, nfit_blk);
2633*4882a593Smuzhiyun 	nfit_blk->nd_region = to_nd_region(dev);
2634*4882a593Smuzhiyun 
2635*4882a593Smuzhiyun 	/* map block aperture memory */
2636*4882a593Smuzhiyun 	nfit_blk->bdw_offset = nfit_mem->bdw->offset;
2637*4882a593Smuzhiyun 	mmio = &nfit_blk->mmio[BDW];
2638*4882a593Smuzhiyun 	mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address,
2639*4882a593Smuzhiyun 			nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr));
2640*4882a593Smuzhiyun 	if (!mmio->addr.base) {
2641*4882a593Smuzhiyun 		dev_dbg(dev, "%s failed to map bdw\n",
2642*4882a593Smuzhiyun 				nvdimm_name(nvdimm));
2643*4882a593Smuzhiyun 		return -ENOMEM;
2644*4882a593Smuzhiyun 	}
2645*4882a593Smuzhiyun 	mmio->size = nfit_mem->bdw->size;
2646*4882a593Smuzhiyun 	mmio->base_offset = nfit_mem->memdev_bdw->region_offset;
2647*4882a593Smuzhiyun 	mmio->idt = nfit_mem->idt_bdw;
2648*4882a593Smuzhiyun 	mmio->spa = nfit_mem->spa_bdw;
2649*4882a593Smuzhiyun 	rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw,
2650*4882a593Smuzhiyun 			nfit_mem->memdev_bdw->interleave_ways);
2651*4882a593Smuzhiyun 	if (rc) {
2652*4882a593Smuzhiyun 		dev_dbg(dev, "%s failed to init bdw interleave\n",
2653*4882a593Smuzhiyun 				nvdimm_name(nvdimm));
2654*4882a593Smuzhiyun 		return rc;
2655*4882a593Smuzhiyun 	}
2656*4882a593Smuzhiyun 
2657*4882a593Smuzhiyun 	/* map block control memory */
2658*4882a593Smuzhiyun 	nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
2659*4882a593Smuzhiyun 	nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
2660*4882a593Smuzhiyun 	mmio = &nfit_blk->mmio[DCR];
2661*4882a593Smuzhiyun 	mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address,
2662*4882a593Smuzhiyun 			nfit_mem->spa_dcr->length);
2663*4882a593Smuzhiyun 	if (!mmio->addr.base) {
2664*4882a593Smuzhiyun 		dev_dbg(dev, "%s failed to map dcr\n",
2665*4882a593Smuzhiyun 				nvdimm_name(nvdimm));
2666*4882a593Smuzhiyun 		return -ENOMEM;
2667*4882a593Smuzhiyun 	}
2668*4882a593Smuzhiyun 	mmio->size = nfit_mem->dcr->window_size;
2669*4882a593Smuzhiyun 	mmio->base_offset = nfit_mem->memdev_dcr->region_offset;
2670*4882a593Smuzhiyun 	mmio->idt = nfit_mem->idt_dcr;
2671*4882a593Smuzhiyun 	mmio->spa = nfit_mem->spa_dcr;
2672*4882a593Smuzhiyun 	rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr,
2673*4882a593Smuzhiyun 			nfit_mem->memdev_dcr->interleave_ways);
2674*4882a593Smuzhiyun 	if (rc) {
2675*4882a593Smuzhiyun 		dev_dbg(dev, "%s failed to init dcr interleave\n",
2676*4882a593Smuzhiyun 				nvdimm_name(nvdimm));
2677*4882a593Smuzhiyun 		return rc;
2678*4882a593Smuzhiyun 	}
2679*4882a593Smuzhiyun 
2680*4882a593Smuzhiyun 	rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
2681*4882a593Smuzhiyun 	if (rc < 0) {
2682*4882a593Smuzhiyun 		dev_dbg(dev, "%s failed get DIMM flags\n",
2683*4882a593Smuzhiyun 				nvdimm_name(nvdimm));
2684*4882a593Smuzhiyun 		return rc;
2685*4882a593Smuzhiyun 	}
2686*4882a593Smuzhiyun 
2687*4882a593Smuzhiyun 	if (nvdimm_has_flush(nfit_blk->nd_region) < 0)
2688*4882a593Smuzhiyun 		dev_warn(dev, "unable to guarantee persistence of writes\n");
2689*4882a593Smuzhiyun 
2690*4882a593Smuzhiyun 	if (mmio->line_size == 0)
2691*4882a593Smuzhiyun 		return 0;
2692*4882a593Smuzhiyun 
2693*4882a593Smuzhiyun 	if ((u32) nfit_blk->cmd_offset % mmio->line_size
2694*4882a593Smuzhiyun 			+ 8 > mmio->line_size) {
2695*4882a593Smuzhiyun 		dev_dbg(dev, "cmd_offset crosses interleave boundary\n");
2696*4882a593Smuzhiyun 		return -ENXIO;
2697*4882a593Smuzhiyun 	} else if ((u32) nfit_blk->stat_offset % mmio->line_size
2698*4882a593Smuzhiyun 			+ 8 > mmio->line_size) {
2699*4882a593Smuzhiyun 		dev_dbg(dev, "stat_offset crosses interleave boundary\n");
2700*4882a593Smuzhiyun 		return -ENXIO;
2701*4882a593Smuzhiyun 	}
2702*4882a593Smuzhiyun 
2703*4882a593Smuzhiyun 	return 0;
2704*4882a593Smuzhiyun }
2705*4882a593Smuzhiyun 
ars_get_cap(struct acpi_nfit_desc * acpi_desc,struct nd_cmd_ars_cap * cmd,struct nfit_spa * nfit_spa)2706*4882a593Smuzhiyun static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
2707*4882a593Smuzhiyun 		struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
2708*4882a593Smuzhiyun {
2709*4882a593Smuzhiyun 	struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2710*4882a593Smuzhiyun 	struct acpi_nfit_system_address *spa = nfit_spa->spa;
2711*4882a593Smuzhiyun 	int cmd_rc, rc;
2712*4882a593Smuzhiyun 
2713*4882a593Smuzhiyun 	cmd->address = spa->address;
2714*4882a593Smuzhiyun 	cmd->length = spa->length;
2715*4882a593Smuzhiyun 	rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
2716*4882a593Smuzhiyun 			sizeof(*cmd), &cmd_rc);
2717*4882a593Smuzhiyun 	if (rc < 0)
2718*4882a593Smuzhiyun 		return rc;
2719*4882a593Smuzhiyun 	return cmd_rc;
2720*4882a593Smuzhiyun }
2721*4882a593Smuzhiyun 
ars_start(struct acpi_nfit_desc * acpi_desc,struct nfit_spa * nfit_spa,enum nfit_ars_state req_type)2722*4882a593Smuzhiyun static int ars_start(struct acpi_nfit_desc *acpi_desc,
2723*4882a593Smuzhiyun 		struct nfit_spa *nfit_spa, enum nfit_ars_state req_type)
2724*4882a593Smuzhiyun {
2725*4882a593Smuzhiyun 	int rc;
2726*4882a593Smuzhiyun 	int cmd_rc;
2727*4882a593Smuzhiyun 	struct nd_cmd_ars_start ars_start;
2728*4882a593Smuzhiyun 	struct acpi_nfit_system_address *spa = nfit_spa->spa;
2729*4882a593Smuzhiyun 	struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2730*4882a593Smuzhiyun 
2731*4882a593Smuzhiyun 	memset(&ars_start, 0, sizeof(ars_start));
2732*4882a593Smuzhiyun 	ars_start.address = spa->address;
2733*4882a593Smuzhiyun 	ars_start.length = spa->length;
2734*4882a593Smuzhiyun 	if (req_type == ARS_REQ_SHORT)
2735*4882a593Smuzhiyun 		ars_start.flags = ND_ARS_RETURN_PREV_DATA;
2736*4882a593Smuzhiyun 	if (nfit_spa_type(spa) == NFIT_SPA_PM)
2737*4882a593Smuzhiyun 		ars_start.type = ND_ARS_PERSISTENT;
2738*4882a593Smuzhiyun 	else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE)
2739*4882a593Smuzhiyun 		ars_start.type = ND_ARS_VOLATILE;
2740*4882a593Smuzhiyun 	else
2741*4882a593Smuzhiyun 		return -ENOTTY;
2742*4882a593Smuzhiyun 
2743*4882a593Smuzhiyun 	rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
2744*4882a593Smuzhiyun 			sizeof(ars_start), &cmd_rc);
2745*4882a593Smuzhiyun 
2746*4882a593Smuzhiyun 	if (rc < 0)
2747*4882a593Smuzhiyun 		return rc;
2748*4882a593Smuzhiyun 	if (cmd_rc < 0)
2749*4882a593Smuzhiyun 		return cmd_rc;
2750*4882a593Smuzhiyun 	set_bit(ARS_VALID, &acpi_desc->scrub_flags);
2751*4882a593Smuzhiyun 	return 0;
2752*4882a593Smuzhiyun }
2753*4882a593Smuzhiyun 
ars_continue(struct acpi_nfit_desc * acpi_desc)2754*4882a593Smuzhiyun static int ars_continue(struct acpi_nfit_desc *acpi_desc)
2755*4882a593Smuzhiyun {
2756*4882a593Smuzhiyun 	int rc, cmd_rc;
2757*4882a593Smuzhiyun 	struct nd_cmd_ars_start ars_start;
2758*4882a593Smuzhiyun 	struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2759*4882a593Smuzhiyun 	struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2760*4882a593Smuzhiyun 
2761*4882a593Smuzhiyun 	ars_start = (struct nd_cmd_ars_start) {
2762*4882a593Smuzhiyun 		.address = ars_status->restart_address,
2763*4882a593Smuzhiyun 		.length = ars_status->restart_length,
2764*4882a593Smuzhiyun 		.type = ars_status->type,
2765*4882a593Smuzhiyun 	};
2766*4882a593Smuzhiyun 	rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
2767*4882a593Smuzhiyun 			sizeof(ars_start), &cmd_rc);
2768*4882a593Smuzhiyun 	if (rc < 0)
2769*4882a593Smuzhiyun 		return rc;
2770*4882a593Smuzhiyun 	return cmd_rc;
2771*4882a593Smuzhiyun }
2772*4882a593Smuzhiyun 
ars_get_status(struct acpi_nfit_desc * acpi_desc)2773*4882a593Smuzhiyun static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
2774*4882a593Smuzhiyun {
2775*4882a593Smuzhiyun 	struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2776*4882a593Smuzhiyun 	struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2777*4882a593Smuzhiyun 	int rc, cmd_rc;
2778*4882a593Smuzhiyun 
2779*4882a593Smuzhiyun 	rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status,
2780*4882a593Smuzhiyun 			acpi_desc->max_ars, &cmd_rc);
2781*4882a593Smuzhiyun 	if (rc < 0)
2782*4882a593Smuzhiyun 		return rc;
2783*4882a593Smuzhiyun 	return cmd_rc;
2784*4882a593Smuzhiyun }
2785*4882a593Smuzhiyun 
ars_complete(struct acpi_nfit_desc * acpi_desc,struct nfit_spa * nfit_spa)2786*4882a593Smuzhiyun static void ars_complete(struct acpi_nfit_desc *acpi_desc,
2787*4882a593Smuzhiyun 		struct nfit_spa *nfit_spa)
2788*4882a593Smuzhiyun {
2789*4882a593Smuzhiyun 	struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2790*4882a593Smuzhiyun 	struct acpi_nfit_system_address *spa = nfit_spa->spa;
2791*4882a593Smuzhiyun 	struct nd_region *nd_region = nfit_spa->nd_region;
2792*4882a593Smuzhiyun 	struct device *dev;
2793*4882a593Smuzhiyun 
2794*4882a593Smuzhiyun 	lockdep_assert_held(&acpi_desc->init_mutex);
2795*4882a593Smuzhiyun 	/*
2796*4882a593Smuzhiyun 	 * Only advance the ARS state for ARS runs initiated by the
2797*4882a593Smuzhiyun 	 * kernel, ignore ARS results from BIOS initiated runs for scrub
2798*4882a593Smuzhiyun 	 * completion tracking.
2799*4882a593Smuzhiyun 	 */
2800*4882a593Smuzhiyun 	if (acpi_desc->scrub_spa != nfit_spa)
2801*4882a593Smuzhiyun 		return;
2802*4882a593Smuzhiyun 
2803*4882a593Smuzhiyun 	if ((ars_status->address >= spa->address && ars_status->address
2804*4882a593Smuzhiyun 				< spa->address + spa->length)
2805*4882a593Smuzhiyun 			|| (ars_status->address < spa->address)) {
2806*4882a593Smuzhiyun 		/*
2807*4882a593Smuzhiyun 		 * Assume that if a scrub starts at an offset from the
2808*4882a593Smuzhiyun 		 * start of nfit_spa that we are in the continuation
2809*4882a593Smuzhiyun 		 * case.
2810*4882a593Smuzhiyun 		 *
2811*4882a593Smuzhiyun 		 * Otherwise, if the scrub covers the spa range, mark
2812*4882a593Smuzhiyun 		 * any pending request complete.
2813*4882a593Smuzhiyun 		 */
2814*4882a593Smuzhiyun 		if (ars_status->address + ars_status->length
2815*4882a593Smuzhiyun 				>= spa->address + spa->length)
2816*4882a593Smuzhiyun 				/* complete */;
2817*4882a593Smuzhiyun 		else
2818*4882a593Smuzhiyun 			return;
2819*4882a593Smuzhiyun 	} else
2820*4882a593Smuzhiyun 		return;
2821*4882a593Smuzhiyun 
2822*4882a593Smuzhiyun 	acpi_desc->scrub_spa = NULL;
2823*4882a593Smuzhiyun 	if (nd_region) {
2824*4882a593Smuzhiyun 		dev = nd_region_dev(nd_region);
2825*4882a593Smuzhiyun 		nvdimm_region_notify(nd_region, NVDIMM_REVALIDATE_POISON);
2826*4882a593Smuzhiyun 	} else
2827*4882a593Smuzhiyun 		dev = acpi_desc->dev;
2828*4882a593Smuzhiyun 	dev_dbg(dev, "ARS: range %d complete\n", spa->range_index);
2829*4882a593Smuzhiyun }
2830*4882a593Smuzhiyun 
ars_status_process_records(struct acpi_nfit_desc * acpi_desc)2831*4882a593Smuzhiyun static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc)
2832*4882a593Smuzhiyun {
2833*4882a593Smuzhiyun 	struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus;
2834*4882a593Smuzhiyun 	struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2835*4882a593Smuzhiyun 	int rc;
2836*4882a593Smuzhiyun 	u32 i;
2837*4882a593Smuzhiyun 
2838*4882a593Smuzhiyun 	/*
2839*4882a593Smuzhiyun 	 * First record starts at 44 byte offset from the start of the
2840*4882a593Smuzhiyun 	 * payload.
2841*4882a593Smuzhiyun 	 */
2842*4882a593Smuzhiyun 	if (ars_status->out_length < 44)
2843*4882a593Smuzhiyun 		return 0;
2844*4882a593Smuzhiyun 
2845*4882a593Smuzhiyun 	/*
2846*4882a593Smuzhiyun 	 * Ignore potentially stale results that are only refreshed
2847*4882a593Smuzhiyun 	 * after a start-ARS event.
2848*4882a593Smuzhiyun 	 */
2849*4882a593Smuzhiyun 	if (!test_and_clear_bit(ARS_VALID, &acpi_desc->scrub_flags)) {
2850*4882a593Smuzhiyun 		dev_dbg(acpi_desc->dev, "skip %d stale records\n",
2851*4882a593Smuzhiyun 				ars_status->num_records);
2852*4882a593Smuzhiyun 		return 0;
2853*4882a593Smuzhiyun 	}
2854*4882a593Smuzhiyun 
2855*4882a593Smuzhiyun 	for (i = 0; i < ars_status->num_records; i++) {
2856*4882a593Smuzhiyun 		/* only process full records */
2857*4882a593Smuzhiyun 		if (ars_status->out_length
2858*4882a593Smuzhiyun 				< 44 + sizeof(struct nd_ars_record) * (i + 1))
2859*4882a593Smuzhiyun 			break;
2860*4882a593Smuzhiyun 		rc = nvdimm_bus_add_badrange(nvdimm_bus,
2861*4882a593Smuzhiyun 				ars_status->records[i].err_address,
2862*4882a593Smuzhiyun 				ars_status->records[i].length);
2863*4882a593Smuzhiyun 		if (rc)
2864*4882a593Smuzhiyun 			return rc;
2865*4882a593Smuzhiyun 	}
2866*4882a593Smuzhiyun 	if (i < ars_status->num_records)
2867*4882a593Smuzhiyun 		dev_warn(acpi_desc->dev, "detected truncated ars results\n");
2868*4882a593Smuzhiyun 
2869*4882a593Smuzhiyun 	return 0;
2870*4882a593Smuzhiyun }
2871*4882a593Smuzhiyun 
acpi_nfit_remove_resource(void * data)2872*4882a593Smuzhiyun static void acpi_nfit_remove_resource(void *data)
2873*4882a593Smuzhiyun {
2874*4882a593Smuzhiyun 	struct resource *res = data;
2875*4882a593Smuzhiyun 
2876*4882a593Smuzhiyun 	remove_resource(res);
2877*4882a593Smuzhiyun }
2878*4882a593Smuzhiyun 
acpi_nfit_insert_resource(struct acpi_nfit_desc * acpi_desc,struct nd_region_desc * ndr_desc)2879*4882a593Smuzhiyun static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
2880*4882a593Smuzhiyun 		struct nd_region_desc *ndr_desc)
2881*4882a593Smuzhiyun {
2882*4882a593Smuzhiyun 	struct resource *res, *nd_res = ndr_desc->res;
2883*4882a593Smuzhiyun 	int is_pmem, ret;
2884*4882a593Smuzhiyun 
2885*4882a593Smuzhiyun 	/* No operation if the region is already registered as PMEM */
2886*4882a593Smuzhiyun 	is_pmem = region_intersects(nd_res->start, resource_size(nd_res),
2887*4882a593Smuzhiyun 				IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY);
2888*4882a593Smuzhiyun 	if (is_pmem == REGION_INTERSECTS)
2889*4882a593Smuzhiyun 		return 0;
2890*4882a593Smuzhiyun 
2891*4882a593Smuzhiyun 	res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL);
2892*4882a593Smuzhiyun 	if (!res)
2893*4882a593Smuzhiyun 		return -ENOMEM;
2894*4882a593Smuzhiyun 
2895*4882a593Smuzhiyun 	res->name = "Persistent Memory";
2896*4882a593Smuzhiyun 	res->start = nd_res->start;
2897*4882a593Smuzhiyun 	res->end = nd_res->end;
2898*4882a593Smuzhiyun 	res->flags = IORESOURCE_MEM;
2899*4882a593Smuzhiyun 	res->desc = IORES_DESC_PERSISTENT_MEMORY;
2900*4882a593Smuzhiyun 
2901*4882a593Smuzhiyun 	ret = insert_resource(&iomem_resource, res);
2902*4882a593Smuzhiyun 	if (ret)
2903*4882a593Smuzhiyun 		return ret;
2904*4882a593Smuzhiyun 
2905*4882a593Smuzhiyun 	ret = devm_add_action_or_reset(acpi_desc->dev,
2906*4882a593Smuzhiyun 					acpi_nfit_remove_resource,
2907*4882a593Smuzhiyun 					res);
2908*4882a593Smuzhiyun 	if (ret)
2909*4882a593Smuzhiyun 		return ret;
2910*4882a593Smuzhiyun 
2911*4882a593Smuzhiyun 	return 0;
2912*4882a593Smuzhiyun }
2913*4882a593Smuzhiyun 
acpi_nfit_init_mapping(struct acpi_nfit_desc * acpi_desc,struct nd_mapping_desc * mapping,struct nd_region_desc * ndr_desc,struct acpi_nfit_memory_map * memdev,struct nfit_spa * nfit_spa)2914*4882a593Smuzhiyun static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
2915*4882a593Smuzhiyun 		struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc,
2916*4882a593Smuzhiyun 		struct acpi_nfit_memory_map *memdev,
2917*4882a593Smuzhiyun 		struct nfit_spa *nfit_spa)
2918*4882a593Smuzhiyun {
2919*4882a593Smuzhiyun 	struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
2920*4882a593Smuzhiyun 			memdev->device_handle);
2921*4882a593Smuzhiyun 	struct acpi_nfit_system_address *spa = nfit_spa->spa;
2922*4882a593Smuzhiyun 	struct nd_blk_region_desc *ndbr_desc;
2923*4882a593Smuzhiyun 	struct nfit_mem *nfit_mem;
2924*4882a593Smuzhiyun 	int rc;
2925*4882a593Smuzhiyun 
2926*4882a593Smuzhiyun 	if (!nvdimm) {
2927*4882a593Smuzhiyun 		dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
2928*4882a593Smuzhiyun 				spa->range_index, memdev->device_handle);
2929*4882a593Smuzhiyun 		return -ENODEV;
2930*4882a593Smuzhiyun 	}
2931*4882a593Smuzhiyun 
2932*4882a593Smuzhiyun 	mapping->nvdimm = nvdimm;
2933*4882a593Smuzhiyun 	switch (nfit_spa_type(spa)) {
2934*4882a593Smuzhiyun 	case NFIT_SPA_PM:
2935*4882a593Smuzhiyun 	case NFIT_SPA_VOLATILE:
2936*4882a593Smuzhiyun 		mapping->start = memdev->address;
2937*4882a593Smuzhiyun 		mapping->size = memdev->region_size;
2938*4882a593Smuzhiyun 		break;
2939*4882a593Smuzhiyun 	case NFIT_SPA_DCR:
2940*4882a593Smuzhiyun 		nfit_mem = nvdimm_provider_data(nvdimm);
2941*4882a593Smuzhiyun 		if (!nfit_mem || !nfit_mem->bdw) {
2942*4882a593Smuzhiyun 			dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
2943*4882a593Smuzhiyun 					spa->range_index, nvdimm_name(nvdimm));
2944*4882a593Smuzhiyun 			break;
2945*4882a593Smuzhiyun 		}
2946*4882a593Smuzhiyun 
2947*4882a593Smuzhiyun 		mapping->size = nfit_mem->bdw->capacity;
2948*4882a593Smuzhiyun 		mapping->start = nfit_mem->bdw->start_address;
2949*4882a593Smuzhiyun 		ndr_desc->num_lanes = nfit_mem->bdw->windows;
2950*4882a593Smuzhiyun 		ndr_desc->mapping = mapping;
2951*4882a593Smuzhiyun 		ndr_desc->num_mappings = 1;
2952*4882a593Smuzhiyun 		ndbr_desc = to_blk_region_desc(ndr_desc);
2953*4882a593Smuzhiyun 		ndbr_desc->enable = acpi_nfit_blk_region_enable;
2954*4882a593Smuzhiyun 		ndbr_desc->do_io = acpi_desc->blk_do_io;
2955*4882a593Smuzhiyun 		rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
2956*4882a593Smuzhiyun 		if (rc)
2957*4882a593Smuzhiyun 			return rc;
2958*4882a593Smuzhiyun 		nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
2959*4882a593Smuzhiyun 				ndr_desc);
2960*4882a593Smuzhiyun 		if (!nfit_spa->nd_region)
2961*4882a593Smuzhiyun 			return -ENOMEM;
2962*4882a593Smuzhiyun 		break;
2963*4882a593Smuzhiyun 	}
2964*4882a593Smuzhiyun 
2965*4882a593Smuzhiyun 	return 0;
2966*4882a593Smuzhiyun }
2967*4882a593Smuzhiyun 
nfit_spa_is_virtual(struct acpi_nfit_system_address * spa)2968*4882a593Smuzhiyun static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa)
2969*4882a593Smuzhiyun {
2970*4882a593Smuzhiyun 	return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
2971*4882a593Smuzhiyun 		nfit_spa_type(spa) == NFIT_SPA_VCD   ||
2972*4882a593Smuzhiyun 		nfit_spa_type(spa) == NFIT_SPA_PDISK ||
2973*4882a593Smuzhiyun 		nfit_spa_type(spa) == NFIT_SPA_PCD);
2974*4882a593Smuzhiyun }
2975*4882a593Smuzhiyun 
nfit_spa_is_volatile(struct acpi_nfit_system_address * spa)2976*4882a593Smuzhiyun static bool nfit_spa_is_volatile(struct acpi_nfit_system_address *spa)
2977*4882a593Smuzhiyun {
2978*4882a593Smuzhiyun 	return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
2979*4882a593Smuzhiyun 		nfit_spa_type(spa) == NFIT_SPA_VCD   ||
2980*4882a593Smuzhiyun 		nfit_spa_type(spa) == NFIT_SPA_VOLATILE);
2981*4882a593Smuzhiyun }
2982*4882a593Smuzhiyun 
acpi_nfit_register_region(struct acpi_nfit_desc * acpi_desc,struct nfit_spa * nfit_spa)2983*4882a593Smuzhiyun static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
2984*4882a593Smuzhiyun 		struct nfit_spa *nfit_spa)
2985*4882a593Smuzhiyun {
2986*4882a593Smuzhiyun 	static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS];
2987*4882a593Smuzhiyun 	struct acpi_nfit_system_address *spa = nfit_spa->spa;
2988*4882a593Smuzhiyun 	struct nd_blk_region_desc ndbr_desc;
2989*4882a593Smuzhiyun 	struct nd_region_desc *ndr_desc;
2990*4882a593Smuzhiyun 	struct nfit_memdev *nfit_memdev;
2991*4882a593Smuzhiyun 	struct nvdimm_bus *nvdimm_bus;
2992*4882a593Smuzhiyun 	struct resource res;
2993*4882a593Smuzhiyun 	int count = 0, rc;
2994*4882a593Smuzhiyun 
2995*4882a593Smuzhiyun 	if (nfit_spa->nd_region)
2996*4882a593Smuzhiyun 		return 0;
2997*4882a593Smuzhiyun 
2998*4882a593Smuzhiyun 	if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) {
2999*4882a593Smuzhiyun 		dev_dbg(acpi_desc->dev, "detected invalid spa index\n");
3000*4882a593Smuzhiyun 		return 0;
3001*4882a593Smuzhiyun 	}
3002*4882a593Smuzhiyun 
3003*4882a593Smuzhiyun 	memset(&res, 0, sizeof(res));
3004*4882a593Smuzhiyun 	memset(&mappings, 0, sizeof(mappings));
3005*4882a593Smuzhiyun 	memset(&ndbr_desc, 0, sizeof(ndbr_desc));
3006*4882a593Smuzhiyun 	res.start = spa->address;
3007*4882a593Smuzhiyun 	res.end = res.start + spa->length - 1;
3008*4882a593Smuzhiyun 	ndr_desc = &ndbr_desc.ndr_desc;
3009*4882a593Smuzhiyun 	ndr_desc->res = &res;
3010*4882a593Smuzhiyun 	ndr_desc->provider_data = nfit_spa;
3011*4882a593Smuzhiyun 	ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
3012*4882a593Smuzhiyun 	if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) {
3013*4882a593Smuzhiyun 		ndr_desc->numa_node = pxm_to_online_node(spa->proximity_domain);
3014*4882a593Smuzhiyun 		ndr_desc->target_node = pxm_to_node(spa->proximity_domain);
3015*4882a593Smuzhiyun 	} else {
3016*4882a593Smuzhiyun 		ndr_desc->numa_node = NUMA_NO_NODE;
3017*4882a593Smuzhiyun 		ndr_desc->target_node = NUMA_NO_NODE;
3018*4882a593Smuzhiyun 	}
3019*4882a593Smuzhiyun 
3020*4882a593Smuzhiyun 	/* Fallback to address based numa information if node lookup failed */
3021*4882a593Smuzhiyun 	if (ndr_desc->numa_node == NUMA_NO_NODE) {
3022*4882a593Smuzhiyun 		ndr_desc->numa_node = memory_add_physaddr_to_nid(spa->address);
3023*4882a593Smuzhiyun 		dev_info(acpi_desc->dev, "changing numa node from %d to %d for nfit region [%pa-%pa]",
3024*4882a593Smuzhiyun 			NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
3025*4882a593Smuzhiyun 	}
3026*4882a593Smuzhiyun 	if (ndr_desc->target_node == NUMA_NO_NODE) {
3027*4882a593Smuzhiyun 		ndr_desc->target_node = phys_to_target_node(spa->address);
3028*4882a593Smuzhiyun 		dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]",
3029*4882a593Smuzhiyun 			NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
3030*4882a593Smuzhiyun 	}
3031*4882a593Smuzhiyun 
3032*4882a593Smuzhiyun 	/*
3033*4882a593Smuzhiyun 	 * Persistence domain bits are hierarchical, if
3034*4882a593Smuzhiyun 	 * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then
3035*4882a593Smuzhiyun 	 * ACPI_NFIT_CAPABILITY_MEM_FLUSH is implied.
3036*4882a593Smuzhiyun 	 */
3037*4882a593Smuzhiyun 	if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH)
3038*4882a593Smuzhiyun 		set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags);
3039*4882a593Smuzhiyun 	else if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH)
3040*4882a593Smuzhiyun 		set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags);
3041*4882a593Smuzhiyun 
3042*4882a593Smuzhiyun 	list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
3043*4882a593Smuzhiyun 		struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
3044*4882a593Smuzhiyun 		struct nd_mapping_desc *mapping;
3045*4882a593Smuzhiyun 
3046*4882a593Smuzhiyun 		/* range index 0 == unmapped in SPA or invalid-SPA */
3047*4882a593Smuzhiyun 		if (memdev->range_index == 0 || spa->range_index == 0)
3048*4882a593Smuzhiyun 			continue;
3049*4882a593Smuzhiyun 		if (memdev->range_index != spa->range_index)
3050*4882a593Smuzhiyun 			continue;
3051*4882a593Smuzhiyun 		if (count >= ND_MAX_MAPPINGS) {
3052*4882a593Smuzhiyun 			dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n",
3053*4882a593Smuzhiyun 					spa->range_index, ND_MAX_MAPPINGS);
3054*4882a593Smuzhiyun 			return -ENXIO;
3055*4882a593Smuzhiyun 		}
3056*4882a593Smuzhiyun 		mapping = &mappings[count++];
3057*4882a593Smuzhiyun 		rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc,
3058*4882a593Smuzhiyun 				memdev, nfit_spa);
3059*4882a593Smuzhiyun 		if (rc)
3060*4882a593Smuzhiyun 			goto out;
3061*4882a593Smuzhiyun 	}
3062*4882a593Smuzhiyun 
3063*4882a593Smuzhiyun 	ndr_desc->mapping = mappings;
3064*4882a593Smuzhiyun 	ndr_desc->num_mappings = count;
3065*4882a593Smuzhiyun 	rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
3066*4882a593Smuzhiyun 	if (rc)
3067*4882a593Smuzhiyun 		goto out;
3068*4882a593Smuzhiyun 
3069*4882a593Smuzhiyun 	nvdimm_bus = acpi_desc->nvdimm_bus;
3070*4882a593Smuzhiyun 	if (nfit_spa_type(spa) == NFIT_SPA_PM) {
3071*4882a593Smuzhiyun 		rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc);
3072*4882a593Smuzhiyun 		if (rc) {
3073*4882a593Smuzhiyun 			dev_warn(acpi_desc->dev,
3074*4882a593Smuzhiyun 				"failed to insert pmem resource to iomem: %d\n",
3075*4882a593Smuzhiyun 				rc);
3076*4882a593Smuzhiyun 			goto out;
3077*4882a593Smuzhiyun 		}
3078*4882a593Smuzhiyun 
3079*4882a593Smuzhiyun 		nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
3080*4882a593Smuzhiyun 				ndr_desc);
3081*4882a593Smuzhiyun 		if (!nfit_spa->nd_region)
3082*4882a593Smuzhiyun 			rc = -ENOMEM;
3083*4882a593Smuzhiyun 	} else if (nfit_spa_is_volatile(spa)) {
3084*4882a593Smuzhiyun 		nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
3085*4882a593Smuzhiyun 				ndr_desc);
3086*4882a593Smuzhiyun 		if (!nfit_spa->nd_region)
3087*4882a593Smuzhiyun 			rc = -ENOMEM;
3088*4882a593Smuzhiyun 	} else if (nfit_spa_is_virtual(spa)) {
3089*4882a593Smuzhiyun 		nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
3090*4882a593Smuzhiyun 				ndr_desc);
3091*4882a593Smuzhiyun 		if (!nfit_spa->nd_region)
3092*4882a593Smuzhiyun 			rc = -ENOMEM;
3093*4882a593Smuzhiyun 	}
3094*4882a593Smuzhiyun 
3095*4882a593Smuzhiyun  out:
3096*4882a593Smuzhiyun 	if (rc)
3097*4882a593Smuzhiyun 		dev_err(acpi_desc->dev, "failed to register spa range %d\n",
3098*4882a593Smuzhiyun 				nfit_spa->spa->range_index);
3099*4882a593Smuzhiyun 	return rc;
3100*4882a593Smuzhiyun }
3101*4882a593Smuzhiyun 
ars_status_alloc(struct acpi_nfit_desc * acpi_desc)3102*4882a593Smuzhiyun static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc)
3103*4882a593Smuzhiyun {
3104*4882a593Smuzhiyun 	struct device *dev = acpi_desc->dev;
3105*4882a593Smuzhiyun 	struct nd_cmd_ars_status *ars_status;
3106*4882a593Smuzhiyun 
3107*4882a593Smuzhiyun 	if (acpi_desc->ars_status) {
3108*4882a593Smuzhiyun 		memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
3109*4882a593Smuzhiyun 		return 0;
3110*4882a593Smuzhiyun 	}
3111*4882a593Smuzhiyun 
3112*4882a593Smuzhiyun 	ars_status = devm_kzalloc(dev, acpi_desc->max_ars, GFP_KERNEL);
3113*4882a593Smuzhiyun 	if (!ars_status)
3114*4882a593Smuzhiyun 		return -ENOMEM;
3115*4882a593Smuzhiyun 	acpi_desc->ars_status = ars_status;
3116*4882a593Smuzhiyun 	return 0;
3117*4882a593Smuzhiyun }
3118*4882a593Smuzhiyun 
acpi_nfit_query_poison(struct acpi_nfit_desc * acpi_desc)3119*4882a593Smuzhiyun static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc)
3120*4882a593Smuzhiyun {
3121*4882a593Smuzhiyun 	int rc;
3122*4882a593Smuzhiyun 
3123*4882a593Smuzhiyun 	if (ars_status_alloc(acpi_desc))
3124*4882a593Smuzhiyun 		return -ENOMEM;
3125*4882a593Smuzhiyun 
3126*4882a593Smuzhiyun 	rc = ars_get_status(acpi_desc);
3127*4882a593Smuzhiyun 
3128*4882a593Smuzhiyun 	if (rc < 0 && rc != -ENOSPC)
3129*4882a593Smuzhiyun 		return rc;
3130*4882a593Smuzhiyun 
3131*4882a593Smuzhiyun 	if (ars_status_process_records(acpi_desc))
3132*4882a593Smuzhiyun 		dev_err(acpi_desc->dev, "Failed to process ARS records\n");
3133*4882a593Smuzhiyun 
3134*4882a593Smuzhiyun 	return rc;
3135*4882a593Smuzhiyun }
3136*4882a593Smuzhiyun 
ars_register(struct acpi_nfit_desc * acpi_desc,struct nfit_spa * nfit_spa)3137*4882a593Smuzhiyun static int ars_register(struct acpi_nfit_desc *acpi_desc,
3138*4882a593Smuzhiyun 		struct nfit_spa *nfit_spa)
3139*4882a593Smuzhiyun {
3140*4882a593Smuzhiyun 	int rc;
3141*4882a593Smuzhiyun 
3142*4882a593Smuzhiyun 	if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
3143*4882a593Smuzhiyun 		return acpi_nfit_register_region(acpi_desc, nfit_spa);
3144*4882a593Smuzhiyun 
3145*4882a593Smuzhiyun 	set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
3146*4882a593Smuzhiyun 	if (!no_init_ars)
3147*4882a593Smuzhiyun 		set_bit(ARS_REQ_LONG, &nfit_spa->ars_state);
3148*4882a593Smuzhiyun 
3149*4882a593Smuzhiyun 	switch (acpi_nfit_query_poison(acpi_desc)) {
3150*4882a593Smuzhiyun 	case 0:
3151*4882a593Smuzhiyun 	case -ENOSPC:
3152*4882a593Smuzhiyun 	case -EAGAIN:
3153*4882a593Smuzhiyun 		rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT);
3154*4882a593Smuzhiyun 		/* shouldn't happen, try again later */
3155*4882a593Smuzhiyun 		if (rc == -EBUSY)
3156*4882a593Smuzhiyun 			break;
3157*4882a593Smuzhiyun 		if (rc) {
3158*4882a593Smuzhiyun 			set_bit(ARS_FAILED, &nfit_spa->ars_state);
3159*4882a593Smuzhiyun 			break;
3160*4882a593Smuzhiyun 		}
3161*4882a593Smuzhiyun 		clear_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
3162*4882a593Smuzhiyun 		rc = acpi_nfit_query_poison(acpi_desc);
3163*4882a593Smuzhiyun 		if (rc)
3164*4882a593Smuzhiyun 			break;
3165*4882a593Smuzhiyun 		acpi_desc->scrub_spa = nfit_spa;
3166*4882a593Smuzhiyun 		ars_complete(acpi_desc, nfit_spa);
3167*4882a593Smuzhiyun 		/*
3168*4882a593Smuzhiyun 		 * If ars_complete() says we didn't complete the
3169*4882a593Smuzhiyun 		 * short scrub, we'll try again with a long
3170*4882a593Smuzhiyun 		 * request.
3171*4882a593Smuzhiyun 		 */
3172*4882a593Smuzhiyun 		acpi_desc->scrub_spa = NULL;
3173*4882a593Smuzhiyun 		break;
3174*4882a593Smuzhiyun 	case -EBUSY:
3175*4882a593Smuzhiyun 	case -ENOMEM:
3176*4882a593Smuzhiyun 		/*
3177*4882a593Smuzhiyun 		 * BIOS was using ARS, wait for it to complete (or
3178*4882a593Smuzhiyun 		 * resources to become available) and then perform our
3179*4882a593Smuzhiyun 		 * own scrubs.
3180*4882a593Smuzhiyun 		 */
3181*4882a593Smuzhiyun 		break;
3182*4882a593Smuzhiyun 	default:
3183*4882a593Smuzhiyun 		set_bit(ARS_FAILED, &nfit_spa->ars_state);
3184*4882a593Smuzhiyun 		break;
3185*4882a593Smuzhiyun 	}
3186*4882a593Smuzhiyun 
3187*4882a593Smuzhiyun 	return acpi_nfit_register_region(acpi_desc, nfit_spa);
3188*4882a593Smuzhiyun }
3189*4882a593Smuzhiyun 
ars_complete_all(struct acpi_nfit_desc * acpi_desc)3190*4882a593Smuzhiyun static void ars_complete_all(struct acpi_nfit_desc *acpi_desc)
3191*4882a593Smuzhiyun {
3192*4882a593Smuzhiyun 	struct nfit_spa *nfit_spa;
3193*4882a593Smuzhiyun 
3194*4882a593Smuzhiyun 	list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3195*4882a593Smuzhiyun 		if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
3196*4882a593Smuzhiyun 			continue;
3197*4882a593Smuzhiyun 		ars_complete(acpi_desc, nfit_spa);
3198*4882a593Smuzhiyun 	}
3199*4882a593Smuzhiyun }
3200*4882a593Smuzhiyun 
__acpi_nfit_scrub(struct acpi_nfit_desc * acpi_desc,int query_rc)3201*4882a593Smuzhiyun static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
3202*4882a593Smuzhiyun 		int query_rc)
3203*4882a593Smuzhiyun {
3204*4882a593Smuzhiyun 	unsigned int tmo = acpi_desc->scrub_tmo;
3205*4882a593Smuzhiyun 	struct device *dev = acpi_desc->dev;
3206*4882a593Smuzhiyun 	struct nfit_spa *nfit_spa;
3207*4882a593Smuzhiyun 
3208*4882a593Smuzhiyun 	lockdep_assert_held(&acpi_desc->init_mutex);
3209*4882a593Smuzhiyun 
3210*4882a593Smuzhiyun 	if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags))
3211*4882a593Smuzhiyun 		return 0;
3212*4882a593Smuzhiyun 
3213*4882a593Smuzhiyun 	if (query_rc == -EBUSY) {
3214*4882a593Smuzhiyun 		dev_dbg(dev, "ARS: ARS busy\n");
3215*4882a593Smuzhiyun 		return min(30U * 60U, tmo * 2);
3216*4882a593Smuzhiyun 	}
3217*4882a593Smuzhiyun 	if (query_rc == -ENOSPC) {
3218*4882a593Smuzhiyun 		dev_dbg(dev, "ARS: ARS continue\n");
3219*4882a593Smuzhiyun 		ars_continue(acpi_desc);
3220*4882a593Smuzhiyun 		return 1;
3221*4882a593Smuzhiyun 	}
3222*4882a593Smuzhiyun 	if (query_rc && query_rc != -EAGAIN) {
3223*4882a593Smuzhiyun 		unsigned long long addr, end;
3224*4882a593Smuzhiyun 
3225*4882a593Smuzhiyun 		addr = acpi_desc->ars_status->address;
3226*4882a593Smuzhiyun 		end = addr + acpi_desc->ars_status->length;
3227*4882a593Smuzhiyun 		dev_dbg(dev, "ARS: %llx-%llx failed (%d)\n", addr, end,
3228*4882a593Smuzhiyun 				query_rc);
3229*4882a593Smuzhiyun 	}
3230*4882a593Smuzhiyun 
3231*4882a593Smuzhiyun 	ars_complete_all(acpi_desc);
3232*4882a593Smuzhiyun 	list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3233*4882a593Smuzhiyun 		enum nfit_ars_state req_type;
3234*4882a593Smuzhiyun 		int rc;
3235*4882a593Smuzhiyun 
3236*4882a593Smuzhiyun 		if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
3237*4882a593Smuzhiyun 			continue;
3238*4882a593Smuzhiyun 
3239*4882a593Smuzhiyun 		/* prefer short ARS requests first */
3240*4882a593Smuzhiyun 		if (test_bit(ARS_REQ_SHORT, &nfit_spa->ars_state))
3241*4882a593Smuzhiyun 			req_type = ARS_REQ_SHORT;
3242*4882a593Smuzhiyun 		else if (test_bit(ARS_REQ_LONG, &nfit_spa->ars_state))
3243*4882a593Smuzhiyun 			req_type = ARS_REQ_LONG;
3244*4882a593Smuzhiyun 		else
3245*4882a593Smuzhiyun 			continue;
3246*4882a593Smuzhiyun 		rc = ars_start(acpi_desc, nfit_spa, req_type);
3247*4882a593Smuzhiyun 
3248*4882a593Smuzhiyun 		dev = nd_region_dev(nfit_spa->nd_region);
3249*4882a593Smuzhiyun 		dev_dbg(dev, "ARS: range %d ARS start %s (%d)\n",
3250*4882a593Smuzhiyun 				nfit_spa->spa->range_index,
3251*4882a593Smuzhiyun 				req_type == ARS_REQ_SHORT ? "short" : "long",
3252*4882a593Smuzhiyun 				rc);
3253*4882a593Smuzhiyun 		/*
3254*4882a593Smuzhiyun 		 * Hmm, we raced someone else starting ARS? Try again in
3255*4882a593Smuzhiyun 		 * a bit.
3256*4882a593Smuzhiyun 		 */
3257*4882a593Smuzhiyun 		if (rc == -EBUSY)
3258*4882a593Smuzhiyun 			return 1;
3259*4882a593Smuzhiyun 		if (rc == 0) {
3260*4882a593Smuzhiyun 			dev_WARN_ONCE(dev, acpi_desc->scrub_spa,
3261*4882a593Smuzhiyun 					"scrub start while range %d active\n",
3262*4882a593Smuzhiyun 					acpi_desc->scrub_spa->spa->range_index);
3263*4882a593Smuzhiyun 			clear_bit(req_type, &nfit_spa->ars_state);
3264*4882a593Smuzhiyun 			acpi_desc->scrub_spa = nfit_spa;
3265*4882a593Smuzhiyun 			/*
3266*4882a593Smuzhiyun 			 * Consider this spa last for future scrub
3267*4882a593Smuzhiyun 			 * requests
3268*4882a593Smuzhiyun 			 */
3269*4882a593Smuzhiyun 			list_move_tail(&nfit_spa->list, &acpi_desc->spas);
3270*4882a593Smuzhiyun 			return 1;
3271*4882a593Smuzhiyun 		}
3272*4882a593Smuzhiyun 
3273*4882a593Smuzhiyun 		dev_err(dev, "ARS: range %d ARS failed (%d)\n",
3274*4882a593Smuzhiyun 				nfit_spa->spa->range_index, rc);
3275*4882a593Smuzhiyun 		set_bit(ARS_FAILED, &nfit_spa->ars_state);
3276*4882a593Smuzhiyun 	}
3277*4882a593Smuzhiyun 	return 0;
3278*4882a593Smuzhiyun }
3279*4882a593Smuzhiyun 
__sched_ars(struct acpi_nfit_desc * acpi_desc,unsigned int tmo)3280*4882a593Smuzhiyun static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo)
3281*4882a593Smuzhiyun {
3282*4882a593Smuzhiyun 	lockdep_assert_held(&acpi_desc->init_mutex);
3283*4882a593Smuzhiyun 
3284*4882a593Smuzhiyun 	set_bit(ARS_BUSY, &acpi_desc->scrub_flags);
3285*4882a593Smuzhiyun 	/* note this should only be set from within the workqueue */
3286*4882a593Smuzhiyun 	if (tmo)
3287*4882a593Smuzhiyun 		acpi_desc->scrub_tmo = tmo;
3288*4882a593Smuzhiyun 	queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ);
3289*4882a593Smuzhiyun }
3290*4882a593Smuzhiyun 
sched_ars(struct acpi_nfit_desc * acpi_desc)3291*4882a593Smuzhiyun static void sched_ars(struct acpi_nfit_desc *acpi_desc)
3292*4882a593Smuzhiyun {
3293*4882a593Smuzhiyun 	__sched_ars(acpi_desc, 0);
3294*4882a593Smuzhiyun }
3295*4882a593Smuzhiyun 
notify_ars_done(struct acpi_nfit_desc * acpi_desc)3296*4882a593Smuzhiyun static void notify_ars_done(struct acpi_nfit_desc *acpi_desc)
3297*4882a593Smuzhiyun {
3298*4882a593Smuzhiyun 	lockdep_assert_held(&acpi_desc->init_mutex);
3299*4882a593Smuzhiyun 
3300*4882a593Smuzhiyun 	clear_bit(ARS_BUSY, &acpi_desc->scrub_flags);
3301*4882a593Smuzhiyun 	acpi_desc->scrub_count++;
3302*4882a593Smuzhiyun 	if (acpi_desc->scrub_count_state)
3303*4882a593Smuzhiyun 		sysfs_notify_dirent(acpi_desc->scrub_count_state);
3304*4882a593Smuzhiyun }
3305*4882a593Smuzhiyun 
acpi_nfit_scrub(struct work_struct * work)3306*4882a593Smuzhiyun static void acpi_nfit_scrub(struct work_struct *work)
3307*4882a593Smuzhiyun {
3308*4882a593Smuzhiyun 	struct acpi_nfit_desc *acpi_desc;
3309*4882a593Smuzhiyun 	unsigned int tmo;
3310*4882a593Smuzhiyun 	int query_rc;
3311*4882a593Smuzhiyun 
3312*4882a593Smuzhiyun 	acpi_desc = container_of(work, typeof(*acpi_desc), dwork.work);
3313*4882a593Smuzhiyun 	mutex_lock(&acpi_desc->init_mutex);
3314*4882a593Smuzhiyun 	query_rc = acpi_nfit_query_poison(acpi_desc);
3315*4882a593Smuzhiyun 	tmo = __acpi_nfit_scrub(acpi_desc, query_rc);
3316*4882a593Smuzhiyun 	if (tmo)
3317*4882a593Smuzhiyun 		__sched_ars(acpi_desc, tmo);
3318*4882a593Smuzhiyun 	else
3319*4882a593Smuzhiyun 		notify_ars_done(acpi_desc);
3320*4882a593Smuzhiyun 	memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
3321*4882a593Smuzhiyun 	clear_bit(ARS_POLL, &acpi_desc->scrub_flags);
3322*4882a593Smuzhiyun 	mutex_unlock(&acpi_desc->init_mutex);
3323*4882a593Smuzhiyun }
3324*4882a593Smuzhiyun 
acpi_nfit_init_ars(struct acpi_nfit_desc * acpi_desc,struct nfit_spa * nfit_spa)3325*4882a593Smuzhiyun static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc,
3326*4882a593Smuzhiyun 		struct nfit_spa *nfit_spa)
3327*4882a593Smuzhiyun {
3328*4882a593Smuzhiyun 	int type = nfit_spa_type(nfit_spa->spa);
3329*4882a593Smuzhiyun 	struct nd_cmd_ars_cap ars_cap;
3330*4882a593Smuzhiyun 	int rc;
3331*4882a593Smuzhiyun 
3332*4882a593Smuzhiyun 	set_bit(ARS_FAILED, &nfit_spa->ars_state);
3333*4882a593Smuzhiyun 	memset(&ars_cap, 0, sizeof(ars_cap));
3334*4882a593Smuzhiyun 	rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
3335*4882a593Smuzhiyun 	if (rc < 0)
3336*4882a593Smuzhiyun 		return;
3337*4882a593Smuzhiyun 	/* check that the supported scrub types match the spa type */
3338*4882a593Smuzhiyun 	if (type == NFIT_SPA_VOLATILE && ((ars_cap.status >> 16)
3339*4882a593Smuzhiyun 				& ND_ARS_VOLATILE) == 0)
3340*4882a593Smuzhiyun 		return;
3341*4882a593Smuzhiyun 	if (type == NFIT_SPA_PM && ((ars_cap.status >> 16)
3342*4882a593Smuzhiyun 				& ND_ARS_PERSISTENT) == 0)
3343*4882a593Smuzhiyun 		return;
3344*4882a593Smuzhiyun 
3345*4882a593Smuzhiyun 	nfit_spa->max_ars = ars_cap.max_ars_out;
3346*4882a593Smuzhiyun 	nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
3347*4882a593Smuzhiyun 	acpi_desc->max_ars = max(nfit_spa->max_ars, acpi_desc->max_ars);
3348*4882a593Smuzhiyun 	clear_bit(ARS_FAILED, &nfit_spa->ars_state);
3349*4882a593Smuzhiyun }
3350*4882a593Smuzhiyun 
acpi_nfit_register_regions(struct acpi_nfit_desc * acpi_desc)3351*4882a593Smuzhiyun static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
3352*4882a593Smuzhiyun {
3353*4882a593Smuzhiyun 	struct nfit_spa *nfit_spa;
3354*4882a593Smuzhiyun 	int rc, do_sched_ars = 0;
3355*4882a593Smuzhiyun 
3356*4882a593Smuzhiyun 	set_bit(ARS_VALID, &acpi_desc->scrub_flags);
3357*4882a593Smuzhiyun 	list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3358*4882a593Smuzhiyun 		switch (nfit_spa_type(nfit_spa->spa)) {
3359*4882a593Smuzhiyun 		case NFIT_SPA_VOLATILE:
3360*4882a593Smuzhiyun 		case NFIT_SPA_PM:
3361*4882a593Smuzhiyun 			acpi_nfit_init_ars(acpi_desc, nfit_spa);
3362*4882a593Smuzhiyun 			break;
3363*4882a593Smuzhiyun 		}
3364*4882a593Smuzhiyun 	}
3365*4882a593Smuzhiyun 
3366*4882a593Smuzhiyun 	list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3367*4882a593Smuzhiyun 		switch (nfit_spa_type(nfit_spa->spa)) {
3368*4882a593Smuzhiyun 		case NFIT_SPA_VOLATILE:
3369*4882a593Smuzhiyun 		case NFIT_SPA_PM:
3370*4882a593Smuzhiyun 			/* register regions and kick off initial ARS run */
3371*4882a593Smuzhiyun 			rc = ars_register(acpi_desc, nfit_spa);
3372*4882a593Smuzhiyun 			if (rc)
3373*4882a593Smuzhiyun 				return rc;
3374*4882a593Smuzhiyun 
3375*4882a593Smuzhiyun 			/*
3376*4882a593Smuzhiyun 			 * Kick off background ARS if at least one
3377*4882a593Smuzhiyun 			 * region successfully registered ARS
3378*4882a593Smuzhiyun 			 */
3379*4882a593Smuzhiyun 			if (!test_bit(ARS_FAILED, &nfit_spa->ars_state))
3380*4882a593Smuzhiyun 				do_sched_ars++;
3381*4882a593Smuzhiyun 			break;
3382*4882a593Smuzhiyun 		case NFIT_SPA_BDW:
3383*4882a593Smuzhiyun 			/* nothing to register */
3384*4882a593Smuzhiyun 			break;
3385*4882a593Smuzhiyun 		case NFIT_SPA_DCR:
3386*4882a593Smuzhiyun 		case NFIT_SPA_VDISK:
3387*4882a593Smuzhiyun 		case NFIT_SPA_VCD:
3388*4882a593Smuzhiyun 		case NFIT_SPA_PDISK:
3389*4882a593Smuzhiyun 		case NFIT_SPA_PCD:
3390*4882a593Smuzhiyun 			/* register known regions that don't support ARS */
3391*4882a593Smuzhiyun 			rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
3392*4882a593Smuzhiyun 			if (rc)
3393*4882a593Smuzhiyun 				return rc;
3394*4882a593Smuzhiyun 			break;
3395*4882a593Smuzhiyun 		default:
3396*4882a593Smuzhiyun 			/* don't register unknown regions */
3397*4882a593Smuzhiyun 			break;
3398*4882a593Smuzhiyun 		}
3399*4882a593Smuzhiyun 	}
3400*4882a593Smuzhiyun 
3401*4882a593Smuzhiyun 	if (do_sched_ars)
3402*4882a593Smuzhiyun 		sched_ars(acpi_desc);
3403*4882a593Smuzhiyun 	return 0;
3404*4882a593Smuzhiyun }
3405*4882a593Smuzhiyun 
acpi_nfit_check_deletions(struct acpi_nfit_desc * acpi_desc,struct nfit_table_prev * prev)3406*4882a593Smuzhiyun static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
3407*4882a593Smuzhiyun 		struct nfit_table_prev *prev)
3408*4882a593Smuzhiyun {
3409*4882a593Smuzhiyun 	struct device *dev = acpi_desc->dev;
3410*4882a593Smuzhiyun 
3411*4882a593Smuzhiyun 	if (!list_empty(&prev->spas) ||
3412*4882a593Smuzhiyun 			!list_empty(&prev->memdevs) ||
3413*4882a593Smuzhiyun 			!list_empty(&prev->dcrs) ||
3414*4882a593Smuzhiyun 			!list_empty(&prev->bdws) ||
3415*4882a593Smuzhiyun 			!list_empty(&prev->idts) ||
3416*4882a593Smuzhiyun 			!list_empty(&prev->flushes)) {
3417*4882a593Smuzhiyun 		dev_err(dev, "new nfit deletes entries (unsupported)\n");
3418*4882a593Smuzhiyun 		return -ENXIO;
3419*4882a593Smuzhiyun 	}
3420*4882a593Smuzhiyun 	return 0;
3421*4882a593Smuzhiyun }
3422*4882a593Smuzhiyun 
acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc * acpi_desc)3423*4882a593Smuzhiyun static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc)
3424*4882a593Smuzhiyun {
3425*4882a593Smuzhiyun 	struct device *dev = acpi_desc->dev;
3426*4882a593Smuzhiyun 	struct kernfs_node *nfit;
3427*4882a593Smuzhiyun 	struct device *bus_dev;
3428*4882a593Smuzhiyun 
3429*4882a593Smuzhiyun 	if (!ars_supported(acpi_desc->nvdimm_bus))
3430*4882a593Smuzhiyun 		return 0;
3431*4882a593Smuzhiyun 
3432*4882a593Smuzhiyun 	bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
3433*4882a593Smuzhiyun 	nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit");
3434*4882a593Smuzhiyun 	if (!nfit) {
3435*4882a593Smuzhiyun 		dev_err(dev, "sysfs_get_dirent 'nfit' failed\n");
3436*4882a593Smuzhiyun 		return -ENODEV;
3437*4882a593Smuzhiyun 	}
3438*4882a593Smuzhiyun 	acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub");
3439*4882a593Smuzhiyun 	sysfs_put(nfit);
3440*4882a593Smuzhiyun 	if (!acpi_desc->scrub_count_state) {
3441*4882a593Smuzhiyun 		dev_err(dev, "sysfs_get_dirent 'scrub' failed\n");
3442*4882a593Smuzhiyun 		return -ENODEV;
3443*4882a593Smuzhiyun 	}
3444*4882a593Smuzhiyun 
3445*4882a593Smuzhiyun 	return 0;
3446*4882a593Smuzhiyun }
3447*4882a593Smuzhiyun 
acpi_nfit_unregister(void * data)3448*4882a593Smuzhiyun static void acpi_nfit_unregister(void *data)
3449*4882a593Smuzhiyun {
3450*4882a593Smuzhiyun 	struct acpi_nfit_desc *acpi_desc = data;
3451*4882a593Smuzhiyun 
3452*4882a593Smuzhiyun 	nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
3453*4882a593Smuzhiyun }
3454*4882a593Smuzhiyun 
acpi_nfit_init(struct acpi_nfit_desc * acpi_desc,void * data,acpi_size sz)3455*4882a593Smuzhiyun int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz)
3456*4882a593Smuzhiyun {
3457*4882a593Smuzhiyun 	struct device *dev = acpi_desc->dev;
3458*4882a593Smuzhiyun 	struct nfit_table_prev prev;
3459*4882a593Smuzhiyun 	const void *end;
3460*4882a593Smuzhiyun 	int rc;
3461*4882a593Smuzhiyun 
3462*4882a593Smuzhiyun 	if (!acpi_desc->nvdimm_bus) {
3463*4882a593Smuzhiyun 		acpi_nfit_init_dsms(acpi_desc);
3464*4882a593Smuzhiyun 
3465*4882a593Smuzhiyun 		acpi_desc->nvdimm_bus = nvdimm_bus_register(dev,
3466*4882a593Smuzhiyun 				&acpi_desc->nd_desc);
3467*4882a593Smuzhiyun 		if (!acpi_desc->nvdimm_bus)
3468*4882a593Smuzhiyun 			return -ENOMEM;
3469*4882a593Smuzhiyun 
3470*4882a593Smuzhiyun 		rc = devm_add_action_or_reset(dev, acpi_nfit_unregister,
3471*4882a593Smuzhiyun 				acpi_desc);
3472*4882a593Smuzhiyun 		if (rc)
3473*4882a593Smuzhiyun 			return rc;
3474*4882a593Smuzhiyun 
3475*4882a593Smuzhiyun 		rc = acpi_nfit_desc_init_scrub_attr(acpi_desc);
3476*4882a593Smuzhiyun 		if (rc)
3477*4882a593Smuzhiyun 			return rc;
3478*4882a593Smuzhiyun 
3479*4882a593Smuzhiyun 		/* register this acpi_desc for mce notifications */
3480*4882a593Smuzhiyun 		mutex_lock(&acpi_desc_lock);
3481*4882a593Smuzhiyun 		list_add_tail(&acpi_desc->list, &acpi_descs);
3482*4882a593Smuzhiyun 		mutex_unlock(&acpi_desc_lock);
3483*4882a593Smuzhiyun 	}
3484*4882a593Smuzhiyun 
3485*4882a593Smuzhiyun 	mutex_lock(&acpi_desc->init_mutex);
3486*4882a593Smuzhiyun 
3487*4882a593Smuzhiyun 	INIT_LIST_HEAD(&prev.spas);
3488*4882a593Smuzhiyun 	INIT_LIST_HEAD(&prev.memdevs);
3489*4882a593Smuzhiyun 	INIT_LIST_HEAD(&prev.dcrs);
3490*4882a593Smuzhiyun 	INIT_LIST_HEAD(&prev.bdws);
3491*4882a593Smuzhiyun 	INIT_LIST_HEAD(&prev.idts);
3492*4882a593Smuzhiyun 	INIT_LIST_HEAD(&prev.flushes);
3493*4882a593Smuzhiyun 
3494*4882a593Smuzhiyun 	list_cut_position(&prev.spas, &acpi_desc->spas,
3495*4882a593Smuzhiyun 				acpi_desc->spas.prev);
3496*4882a593Smuzhiyun 	list_cut_position(&prev.memdevs, &acpi_desc->memdevs,
3497*4882a593Smuzhiyun 				acpi_desc->memdevs.prev);
3498*4882a593Smuzhiyun 	list_cut_position(&prev.dcrs, &acpi_desc->dcrs,
3499*4882a593Smuzhiyun 				acpi_desc->dcrs.prev);
3500*4882a593Smuzhiyun 	list_cut_position(&prev.bdws, &acpi_desc->bdws,
3501*4882a593Smuzhiyun 				acpi_desc->bdws.prev);
3502*4882a593Smuzhiyun 	list_cut_position(&prev.idts, &acpi_desc->idts,
3503*4882a593Smuzhiyun 				acpi_desc->idts.prev);
3504*4882a593Smuzhiyun 	list_cut_position(&prev.flushes, &acpi_desc->flushes,
3505*4882a593Smuzhiyun 				acpi_desc->flushes.prev);
3506*4882a593Smuzhiyun 
3507*4882a593Smuzhiyun 	end = data + sz;
3508*4882a593Smuzhiyun 	while (!IS_ERR_OR_NULL(data))
3509*4882a593Smuzhiyun 		data = add_table(acpi_desc, &prev, data, end);
3510*4882a593Smuzhiyun 
3511*4882a593Smuzhiyun 	if (IS_ERR(data)) {
3512*4882a593Smuzhiyun 		dev_dbg(dev, "nfit table parsing error: %ld\n",	PTR_ERR(data));
3513*4882a593Smuzhiyun 		rc = PTR_ERR(data);
3514*4882a593Smuzhiyun 		goto out_unlock;
3515*4882a593Smuzhiyun 	}
3516*4882a593Smuzhiyun 
3517*4882a593Smuzhiyun 	rc = acpi_nfit_check_deletions(acpi_desc, &prev);
3518*4882a593Smuzhiyun 	if (rc)
3519*4882a593Smuzhiyun 		goto out_unlock;
3520*4882a593Smuzhiyun 
3521*4882a593Smuzhiyun 	rc = nfit_mem_init(acpi_desc);
3522*4882a593Smuzhiyun 	if (rc)
3523*4882a593Smuzhiyun 		goto out_unlock;
3524*4882a593Smuzhiyun 
3525*4882a593Smuzhiyun 	rc = acpi_nfit_register_dimms(acpi_desc);
3526*4882a593Smuzhiyun 	if (rc)
3527*4882a593Smuzhiyun 		goto out_unlock;
3528*4882a593Smuzhiyun 
3529*4882a593Smuzhiyun 	rc = acpi_nfit_register_regions(acpi_desc);
3530*4882a593Smuzhiyun 
3531*4882a593Smuzhiyun  out_unlock:
3532*4882a593Smuzhiyun 	mutex_unlock(&acpi_desc->init_mutex);
3533*4882a593Smuzhiyun 	return rc;
3534*4882a593Smuzhiyun }
3535*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(acpi_nfit_init);
3536*4882a593Smuzhiyun 
acpi_nfit_flush_probe(struct nvdimm_bus_descriptor * nd_desc)3537*4882a593Smuzhiyun static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
3538*4882a593Smuzhiyun {
3539*4882a593Smuzhiyun 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
3540*4882a593Smuzhiyun 	struct device *dev = acpi_desc->dev;
3541*4882a593Smuzhiyun 
3542*4882a593Smuzhiyun 	/* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
3543*4882a593Smuzhiyun 	nfit_device_lock(dev);
3544*4882a593Smuzhiyun 	nfit_device_unlock(dev);
3545*4882a593Smuzhiyun 
3546*4882a593Smuzhiyun 	/* Bounce the init_mutex to complete initial registration */
3547*4882a593Smuzhiyun 	mutex_lock(&acpi_desc->init_mutex);
3548*4882a593Smuzhiyun 	mutex_unlock(&acpi_desc->init_mutex);
3549*4882a593Smuzhiyun 
3550*4882a593Smuzhiyun 	return 0;
3551*4882a593Smuzhiyun }
3552*4882a593Smuzhiyun 
__acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor * nd_desc,struct nvdimm * nvdimm,unsigned int cmd)3553*4882a593Smuzhiyun static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
3554*4882a593Smuzhiyun 		struct nvdimm *nvdimm, unsigned int cmd)
3555*4882a593Smuzhiyun {
3556*4882a593Smuzhiyun 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
3557*4882a593Smuzhiyun 
3558*4882a593Smuzhiyun 	if (nvdimm)
3559*4882a593Smuzhiyun 		return 0;
3560*4882a593Smuzhiyun 	if (cmd != ND_CMD_ARS_START)
3561*4882a593Smuzhiyun 		return 0;
3562*4882a593Smuzhiyun 
3563*4882a593Smuzhiyun 	/*
3564*4882a593Smuzhiyun 	 * The kernel and userspace may race to initiate a scrub, but
3565*4882a593Smuzhiyun 	 * the scrub thread is prepared to lose that initial race.  It
3566*4882a593Smuzhiyun 	 * just needs guarantees that any ARS it initiates are not
3567*4882a593Smuzhiyun 	 * interrupted by any intervening start requests from userspace.
3568*4882a593Smuzhiyun 	 */
3569*4882a593Smuzhiyun 	if (work_busy(&acpi_desc->dwork.work))
3570*4882a593Smuzhiyun 		return -EBUSY;
3571*4882a593Smuzhiyun 
3572*4882a593Smuzhiyun 	return 0;
3573*4882a593Smuzhiyun }
3574*4882a593Smuzhiyun 
3575*4882a593Smuzhiyun /*
3576*4882a593Smuzhiyun  * Prevent security and firmware activate commands from being issued via
3577*4882a593Smuzhiyun  * ioctl.
3578*4882a593Smuzhiyun  */
acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor * nd_desc,struct nvdimm * nvdimm,unsigned int cmd,void * buf)3579*4882a593Smuzhiyun static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
3580*4882a593Smuzhiyun 		struct nvdimm *nvdimm, unsigned int cmd, void *buf)
3581*4882a593Smuzhiyun {
3582*4882a593Smuzhiyun 	struct nd_cmd_pkg *call_pkg = buf;
3583*4882a593Smuzhiyun 	unsigned int func;
3584*4882a593Smuzhiyun 
3585*4882a593Smuzhiyun 	if (nvdimm && cmd == ND_CMD_CALL &&
3586*4882a593Smuzhiyun 			call_pkg->nd_family == NVDIMM_FAMILY_INTEL) {
3587*4882a593Smuzhiyun 		func = call_pkg->nd_command;
3588*4882a593Smuzhiyun 		if (func > NVDIMM_CMD_MAX ||
3589*4882a593Smuzhiyun 		    (1 << func) & NVDIMM_INTEL_DENY_CMDMASK)
3590*4882a593Smuzhiyun 			return -EOPNOTSUPP;
3591*4882a593Smuzhiyun 	}
3592*4882a593Smuzhiyun 
3593*4882a593Smuzhiyun 	/* block all non-nfit bus commands */
3594*4882a593Smuzhiyun 	if (!nvdimm && cmd == ND_CMD_CALL &&
3595*4882a593Smuzhiyun 			call_pkg->nd_family != NVDIMM_BUS_FAMILY_NFIT)
3596*4882a593Smuzhiyun 		return -EOPNOTSUPP;
3597*4882a593Smuzhiyun 
3598*4882a593Smuzhiyun 	return __acpi_nfit_clear_to_send(nd_desc, nvdimm, cmd);
3599*4882a593Smuzhiyun }
3600*4882a593Smuzhiyun 
acpi_nfit_ars_rescan(struct acpi_nfit_desc * acpi_desc,enum nfit_ars_state req_type)3601*4882a593Smuzhiyun int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
3602*4882a593Smuzhiyun 		enum nfit_ars_state req_type)
3603*4882a593Smuzhiyun {
3604*4882a593Smuzhiyun 	struct device *dev = acpi_desc->dev;
3605*4882a593Smuzhiyun 	int scheduled = 0, busy = 0;
3606*4882a593Smuzhiyun 	struct nfit_spa *nfit_spa;
3607*4882a593Smuzhiyun 
3608*4882a593Smuzhiyun 	mutex_lock(&acpi_desc->init_mutex);
3609*4882a593Smuzhiyun 	if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags)) {
3610*4882a593Smuzhiyun 		mutex_unlock(&acpi_desc->init_mutex);
3611*4882a593Smuzhiyun 		return 0;
3612*4882a593Smuzhiyun 	}
3613*4882a593Smuzhiyun 
3614*4882a593Smuzhiyun 	list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3615*4882a593Smuzhiyun 		int type = nfit_spa_type(nfit_spa->spa);
3616*4882a593Smuzhiyun 
3617*4882a593Smuzhiyun 		if (type != NFIT_SPA_PM && type != NFIT_SPA_VOLATILE)
3618*4882a593Smuzhiyun 			continue;
3619*4882a593Smuzhiyun 		if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
3620*4882a593Smuzhiyun 			continue;
3621*4882a593Smuzhiyun 
3622*4882a593Smuzhiyun 		if (test_and_set_bit(req_type, &nfit_spa->ars_state))
3623*4882a593Smuzhiyun 			busy++;
3624*4882a593Smuzhiyun 		else
3625*4882a593Smuzhiyun 			scheduled++;
3626*4882a593Smuzhiyun 	}
3627*4882a593Smuzhiyun 	if (scheduled) {
3628*4882a593Smuzhiyun 		sched_ars(acpi_desc);
3629*4882a593Smuzhiyun 		dev_dbg(dev, "ars_scan triggered\n");
3630*4882a593Smuzhiyun 	}
3631*4882a593Smuzhiyun 	mutex_unlock(&acpi_desc->init_mutex);
3632*4882a593Smuzhiyun 
3633*4882a593Smuzhiyun 	if (scheduled)
3634*4882a593Smuzhiyun 		return 0;
3635*4882a593Smuzhiyun 	if (busy)
3636*4882a593Smuzhiyun 		return -EBUSY;
3637*4882a593Smuzhiyun 	return -ENOTTY;
3638*4882a593Smuzhiyun }
3639*4882a593Smuzhiyun 
acpi_nfit_desc_init(struct acpi_nfit_desc * acpi_desc,struct device * dev)3640*4882a593Smuzhiyun void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
3641*4882a593Smuzhiyun {
3642*4882a593Smuzhiyun 	struct nvdimm_bus_descriptor *nd_desc;
3643*4882a593Smuzhiyun 
3644*4882a593Smuzhiyun 	dev_set_drvdata(dev, acpi_desc);
3645*4882a593Smuzhiyun 	acpi_desc->dev = dev;
3646*4882a593Smuzhiyun 	acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io;
3647*4882a593Smuzhiyun 	nd_desc = &acpi_desc->nd_desc;
3648*4882a593Smuzhiyun 	nd_desc->provider_name = "ACPI.NFIT";
3649*4882a593Smuzhiyun 	nd_desc->module = THIS_MODULE;
3650*4882a593Smuzhiyun 	nd_desc->ndctl = acpi_nfit_ctl;
3651*4882a593Smuzhiyun 	nd_desc->flush_probe = acpi_nfit_flush_probe;
3652*4882a593Smuzhiyun 	nd_desc->clear_to_send = acpi_nfit_clear_to_send;
3653*4882a593Smuzhiyun 	nd_desc->attr_groups = acpi_nfit_attribute_groups;
3654*4882a593Smuzhiyun 
3655*4882a593Smuzhiyun 	INIT_LIST_HEAD(&acpi_desc->spas);
3656*4882a593Smuzhiyun 	INIT_LIST_HEAD(&acpi_desc->dcrs);
3657*4882a593Smuzhiyun 	INIT_LIST_HEAD(&acpi_desc->bdws);
3658*4882a593Smuzhiyun 	INIT_LIST_HEAD(&acpi_desc->idts);
3659*4882a593Smuzhiyun 	INIT_LIST_HEAD(&acpi_desc->flushes);
3660*4882a593Smuzhiyun 	INIT_LIST_HEAD(&acpi_desc->memdevs);
3661*4882a593Smuzhiyun 	INIT_LIST_HEAD(&acpi_desc->dimms);
3662*4882a593Smuzhiyun 	INIT_LIST_HEAD(&acpi_desc->list);
3663*4882a593Smuzhiyun 	mutex_init(&acpi_desc->init_mutex);
3664*4882a593Smuzhiyun 	acpi_desc->scrub_tmo = 1;
3665*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&acpi_desc->dwork, acpi_nfit_scrub);
3666*4882a593Smuzhiyun }
3667*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(acpi_nfit_desc_init);
3668*4882a593Smuzhiyun 
acpi_nfit_put_table(void * table)3669*4882a593Smuzhiyun static void acpi_nfit_put_table(void *table)
3670*4882a593Smuzhiyun {
3671*4882a593Smuzhiyun 	acpi_put_table(table);
3672*4882a593Smuzhiyun }
3673*4882a593Smuzhiyun 
acpi_nfit_shutdown(void * data)3674*4882a593Smuzhiyun void acpi_nfit_shutdown(void *data)
3675*4882a593Smuzhiyun {
3676*4882a593Smuzhiyun 	struct acpi_nfit_desc *acpi_desc = data;
3677*4882a593Smuzhiyun 	struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
3678*4882a593Smuzhiyun 
3679*4882a593Smuzhiyun 	/*
3680*4882a593Smuzhiyun 	 * Destruct under acpi_desc_lock so that nfit_handle_mce does not
3681*4882a593Smuzhiyun 	 * race teardown
3682*4882a593Smuzhiyun 	 */
3683*4882a593Smuzhiyun 	mutex_lock(&acpi_desc_lock);
3684*4882a593Smuzhiyun 	list_del(&acpi_desc->list);
3685*4882a593Smuzhiyun 	mutex_unlock(&acpi_desc_lock);
3686*4882a593Smuzhiyun 
3687*4882a593Smuzhiyun 	mutex_lock(&acpi_desc->init_mutex);
3688*4882a593Smuzhiyun 	set_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
3689*4882a593Smuzhiyun 	cancel_delayed_work_sync(&acpi_desc->dwork);
3690*4882a593Smuzhiyun 	mutex_unlock(&acpi_desc->init_mutex);
3691*4882a593Smuzhiyun 
3692*4882a593Smuzhiyun 	/*
3693*4882a593Smuzhiyun 	 * Bounce the nvdimm bus lock to make sure any in-flight
3694*4882a593Smuzhiyun 	 * acpi_nfit_ars_rescan() submissions have had a chance to
3695*4882a593Smuzhiyun 	 * either submit or see ->cancel set.
3696*4882a593Smuzhiyun 	 */
3697*4882a593Smuzhiyun 	nfit_device_lock(bus_dev);
3698*4882a593Smuzhiyun 	nfit_device_unlock(bus_dev);
3699*4882a593Smuzhiyun 
3700*4882a593Smuzhiyun 	flush_workqueue(nfit_wq);
3701*4882a593Smuzhiyun }
3702*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(acpi_nfit_shutdown);
3703*4882a593Smuzhiyun 
acpi_nfit_add(struct acpi_device * adev)3704*4882a593Smuzhiyun static int acpi_nfit_add(struct acpi_device *adev)
3705*4882a593Smuzhiyun {
3706*4882a593Smuzhiyun 	struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
3707*4882a593Smuzhiyun 	struct acpi_nfit_desc *acpi_desc;
3708*4882a593Smuzhiyun 	struct device *dev = &adev->dev;
3709*4882a593Smuzhiyun 	struct acpi_table_header *tbl;
3710*4882a593Smuzhiyun 	acpi_status status = AE_OK;
3711*4882a593Smuzhiyun 	acpi_size sz;
3712*4882a593Smuzhiyun 	int rc = 0;
3713*4882a593Smuzhiyun 
3714*4882a593Smuzhiyun 	status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl);
3715*4882a593Smuzhiyun 	if (ACPI_FAILURE(status)) {
3716*4882a593Smuzhiyun 		/* The NVDIMM root device allows OS to trigger enumeration of
3717*4882a593Smuzhiyun 		 * NVDIMMs through NFIT at boot time and re-enumeration at
3718*4882a593Smuzhiyun 		 * root level via the _FIT method during runtime.
3719*4882a593Smuzhiyun 		 * This is ok to return 0 here, we could have an nvdimm
3720*4882a593Smuzhiyun 		 * hotplugged later and evaluate _FIT method which returns
3721*4882a593Smuzhiyun 		 * data in the format of a series of NFIT Structures.
3722*4882a593Smuzhiyun 		 */
3723*4882a593Smuzhiyun 		dev_dbg(dev, "failed to find NFIT at startup\n");
3724*4882a593Smuzhiyun 		return 0;
3725*4882a593Smuzhiyun 	}
3726*4882a593Smuzhiyun 
3727*4882a593Smuzhiyun 	rc = devm_add_action_or_reset(dev, acpi_nfit_put_table, tbl);
3728*4882a593Smuzhiyun 	if (rc)
3729*4882a593Smuzhiyun 		return rc;
3730*4882a593Smuzhiyun 	sz = tbl->length;
3731*4882a593Smuzhiyun 
3732*4882a593Smuzhiyun 	acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
3733*4882a593Smuzhiyun 	if (!acpi_desc)
3734*4882a593Smuzhiyun 		return -ENOMEM;
3735*4882a593Smuzhiyun 	acpi_nfit_desc_init(acpi_desc, &adev->dev);
3736*4882a593Smuzhiyun 
3737*4882a593Smuzhiyun 	/* Save the acpi header for exporting the revision via sysfs */
3738*4882a593Smuzhiyun 	acpi_desc->acpi_header = *tbl;
3739*4882a593Smuzhiyun 
3740*4882a593Smuzhiyun 	/* Evaluate _FIT and override with that if present */
3741*4882a593Smuzhiyun 	status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
3742*4882a593Smuzhiyun 	if (ACPI_SUCCESS(status) && buf.length > 0) {
3743*4882a593Smuzhiyun 		union acpi_object *obj = buf.pointer;
3744*4882a593Smuzhiyun 
3745*4882a593Smuzhiyun 		if (obj->type == ACPI_TYPE_BUFFER)
3746*4882a593Smuzhiyun 			rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
3747*4882a593Smuzhiyun 					obj->buffer.length);
3748*4882a593Smuzhiyun 		else
3749*4882a593Smuzhiyun 			dev_dbg(dev, "invalid type %d, ignoring _FIT\n",
3750*4882a593Smuzhiyun 				(int) obj->type);
3751*4882a593Smuzhiyun 		kfree(buf.pointer);
3752*4882a593Smuzhiyun 	} else
3753*4882a593Smuzhiyun 		/* skip over the lead-in header table */
3754*4882a593Smuzhiyun 		rc = acpi_nfit_init(acpi_desc, (void *) tbl
3755*4882a593Smuzhiyun 				+ sizeof(struct acpi_table_nfit),
3756*4882a593Smuzhiyun 				sz - sizeof(struct acpi_table_nfit));
3757*4882a593Smuzhiyun 
3758*4882a593Smuzhiyun 	if (rc)
3759*4882a593Smuzhiyun 		return rc;
3760*4882a593Smuzhiyun 	return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc);
3761*4882a593Smuzhiyun }
3762*4882a593Smuzhiyun 
acpi_nfit_remove(struct acpi_device * adev)3763*4882a593Smuzhiyun static int acpi_nfit_remove(struct acpi_device *adev)
3764*4882a593Smuzhiyun {
3765*4882a593Smuzhiyun 	/* see acpi_nfit_unregister */
3766*4882a593Smuzhiyun 	return 0;
3767*4882a593Smuzhiyun }
3768*4882a593Smuzhiyun 
acpi_nfit_update_notify(struct device * dev,acpi_handle handle)3769*4882a593Smuzhiyun static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle)
3770*4882a593Smuzhiyun {
3771*4882a593Smuzhiyun 	struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
3772*4882a593Smuzhiyun 	struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
3773*4882a593Smuzhiyun 	union acpi_object *obj;
3774*4882a593Smuzhiyun 	acpi_status status;
3775*4882a593Smuzhiyun 	int ret;
3776*4882a593Smuzhiyun 
3777*4882a593Smuzhiyun 	if (!dev->driver) {
3778*4882a593Smuzhiyun 		/* dev->driver may be null if we're being removed */
3779*4882a593Smuzhiyun 		dev_dbg(dev, "no driver found for dev\n");
3780*4882a593Smuzhiyun 		return;
3781*4882a593Smuzhiyun 	}
3782*4882a593Smuzhiyun 
3783*4882a593Smuzhiyun 	if (!acpi_desc) {
3784*4882a593Smuzhiyun 		acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
3785*4882a593Smuzhiyun 		if (!acpi_desc)
3786*4882a593Smuzhiyun 			return;
3787*4882a593Smuzhiyun 		acpi_nfit_desc_init(acpi_desc, dev);
3788*4882a593Smuzhiyun 	} else {
3789*4882a593Smuzhiyun 		/*
3790*4882a593Smuzhiyun 		 * Finish previous registration before considering new
3791*4882a593Smuzhiyun 		 * regions.
3792*4882a593Smuzhiyun 		 */
3793*4882a593Smuzhiyun 		flush_workqueue(nfit_wq);
3794*4882a593Smuzhiyun 	}
3795*4882a593Smuzhiyun 
3796*4882a593Smuzhiyun 	/* Evaluate _FIT */
3797*4882a593Smuzhiyun 	status = acpi_evaluate_object(handle, "_FIT", NULL, &buf);
3798*4882a593Smuzhiyun 	if (ACPI_FAILURE(status)) {
3799*4882a593Smuzhiyun 		dev_err(dev, "failed to evaluate _FIT\n");
3800*4882a593Smuzhiyun 		return;
3801*4882a593Smuzhiyun 	}
3802*4882a593Smuzhiyun 
3803*4882a593Smuzhiyun 	obj = buf.pointer;
3804*4882a593Smuzhiyun 	if (obj->type == ACPI_TYPE_BUFFER) {
3805*4882a593Smuzhiyun 		ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
3806*4882a593Smuzhiyun 				obj->buffer.length);
3807*4882a593Smuzhiyun 		if (ret)
3808*4882a593Smuzhiyun 			dev_err(dev, "failed to merge updated NFIT\n");
3809*4882a593Smuzhiyun 	} else
3810*4882a593Smuzhiyun 		dev_err(dev, "Invalid _FIT\n");
3811*4882a593Smuzhiyun 	kfree(buf.pointer);
3812*4882a593Smuzhiyun }
3813*4882a593Smuzhiyun 
acpi_nfit_uc_error_notify(struct device * dev,acpi_handle handle)3814*4882a593Smuzhiyun static void acpi_nfit_uc_error_notify(struct device *dev, acpi_handle handle)
3815*4882a593Smuzhiyun {
3816*4882a593Smuzhiyun 	struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
3817*4882a593Smuzhiyun 
3818*4882a593Smuzhiyun 	if (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON)
3819*4882a593Smuzhiyun 		acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
3820*4882a593Smuzhiyun 	else
3821*4882a593Smuzhiyun 		acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_SHORT);
3822*4882a593Smuzhiyun }
3823*4882a593Smuzhiyun 
__acpi_nfit_notify(struct device * dev,acpi_handle handle,u32 event)3824*4882a593Smuzhiyun void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event)
3825*4882a593Smuzhiyun {
3826*4882a593Smuzhiyun 	dev_dbg(dev, "event: 0x%x\n", event);
3827*4882a593Smuzhiyun 
3828*4882a593Smuzhiyun 	switch (event) {
3829*4882a593Smuzhiyun 	case NFIT_NOTIFY_UPDATE:
3830*4882a593Smuzhiyun 		return acpi_nfit_update_notify(dev, handle);
3831*4882a593Smuzhiyun 	case NFIT_NOTIFY_UC_MEMORY_ERROR:
3832*4882a593Smuzhiyun 		return acpi_nfit_uc_error_notify(dev, handle);
3833*4882a593Smuzhiyun 	default:
3834*4882a593Smuzhiyun 		return;
3835*4882a593Smuzhiyun 	}
3836*4882a593Smuzhiyun }
3837*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__acpi_nfit_notify);
3838*4882a593Smuzhiyun 
acpi_nfit_notify(struct acpi_device * adev,u32 event)3839*4882a593Smuzhiyun static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
3840*4882a593Smuzhiyun {
3841*4882a593Smuzhiyun 	nfit_device_lock(&adev->dev);
3842*4882a593Smuzhiyun 	__acpi_nfit_notify(&adev->dev, adev->handle, event);
3843*4882a593Smuzhiyun 	nfit_device_unlock(&adev->dev);
3844*4882a593Smuzhiyun }
3845*4882a593Smuzhiyun 
3846*4882a593Smuzhiyun static const struct acpi_device_id acpi_nfit_ids[] = {
3847*4882a593Smuzhiyun 	{ "ACPI0012", 0 },
3848*4882a593Smuzhiyun 	{ "", 0 },
3849*4882a593Smuzhiyun };
3850*4882a593Smuzhiyun MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids);
3851*4882a593Smuzhiyun 
3852*4882a593Smuzhiyun static struct acpi_driver acpi_nfit_driver = {
3853*4882a593Smuzhiyun 	.name = KBUILD_MODNAME,
3854*4882a593Smuzhiyun 	.ids = acpi_nfit_ids,
3855*4882a593Smuzhiyun 	.ops = {
3856*4882a593Smuzhiyun 		.add = acpi_nfit_add,
3857*4882a593Smuzhiyun 		.remove = acpi_nfit_remove,
3858*4882a593Smuzhiyun 		.notify = acpi_nfit_notify,
3859*4882a593Smuzhiyun 	},
3860*4882a593Smuzhiyun };
3861*4882a593Smuzhiyun 
nfit_init(void)3862*4882a593Smuzhiyun static __init int nfit_init(void)
3863*4882a593Smuzhiyun {
3864*4882a593Smuzhiyun 	int ret;
3865*4882a593Smuzhiyun 
3866*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
3867*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
3868*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
3869*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
3870*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
3871*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
3872*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
3873*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(struct acpi_nfit_capabilities) != 16);
3874*4882a593Smuzhiyun 
3875*4882a593Smuzhiyun 	guid_parse(UUID_VOLATILE_MEMORY, &nfit_uuid[NFIT_SPA_VOLATILE]);
3876*4882a593Smuzhiyun 	guid_parse(UUID_PERSISTENT_MEMORY, &nfit_uuid[NFIT_SPA_PM]);
3877*4882a593Smuzhiyun 	guid_parse(UUID_CONTROL_REGION, &nfit_uuid[NFIT_SPA_DCR]);
3878*4882a593Smuzhiyun 	guid_parse(UUID_DATA_REGION, &nfit_uuid[NFIT_SPA_BDW]);
3879*4882a593Smuzhiyun 	guid_parse(UUID_VOLATILE_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_VDISK]);
3880*4882a593Smuzhiyun 	guid_parse(UUID_VOLATILE_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_VCD]);
3881*4882a593Smuzhiyun 	guid_parse(UUID_PERSISTENT_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_PDISK]);
3882*4882a593Smuzhiyun 	guid_parse(UUID_PERSISTENT_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_PCD]);
3883*4882a593Smuzhiyun 	guid_parse(UUID_NFIT_BUS, &nfit_uuid[NFIT_DEV_BUS]);
3884*4882a593Smuzhiyun 	guid_parse(UUID_NFIT_DIMM, &nfit_uuid[NFIT_DEV_DIMM]);
3885*4882a593Smuzhiyun 	guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
3886*4882a593Smuzhiyun 	guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
3887*4882a593Smuzhiyun 	guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
3888*4882a593Smuzhiyun 	guid_parse(UUID_NFIT_DIMM_N_HYPERV, &nfit_uuid[NFIT_DEV_DIMM_N_HYPERV]);
3889*4882a593Smuzhiyun 	guid_parse(UUID_INTEL_BUS, &nfit_uuid[NFIT_BUS_INTEL]);
3890*4882a593Smuzhiyun 
3891*4882a593Smuzhiyun 	nfit_wq = create_singlethread_workqueue("nfit");
3892*4882a593Smuzhiyun 	if (!nfit_wq)
3893*4882a593Smuzhiyun 		return -ENOMEM;
3894*4882a593Smuzhiyun 
3895*4882a593Smuzhiyun 	nfit_mce_register();
3896*4882a593Smuzhiyun 	ret = acpi_bus_register_driver(&acpi_nfit_driver);
3897*4882a593Smuzhiyun 	if (ret) {
3898*4882a593Smuzhiyun 		nfit_mce_unregister();
3899*4882a593Smuzhiyun 		destroy_workqueue(nfit_wq);
3900*4882a593Smuzhiyun 	}
3901*4882a593Smuzhiyun 
3902*4882a593Smuzhiyun 	return ret;
3903*4882a593Smuzhiyun 
3904*4882a593Smuzhiyun }
3905*4882a593Smuzhiyun 
nfit_exit(void)3906*4882a593Smuzhiyun static __exit void nfit_exit(void)
3907*4882a593Smuzhiyun {
3908*4882a593Smuzhiyun 	nfit_mce_unregister();
3909*4882a593Smuzhiyun 	acpi_bus_unregister_driver(&acpi_nfit_driver);
3910*4882a593Smuzhiyun 	destroy_workqueue(nfit_wq);
3911*4882a593Smuzhiyun 	WARN_ON(!list_empty(&acpi_descs));
3912*4882a593Smuzhiyun }
3913*4882a593Smuzhiyun 
3914*4882a593Smuzhiyun module_init(nfit_init);
3915*4882a593Smuzhiyun module_exit(nfit_exit);
3916*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
3917*4882a593Smuzhiyun MODULE_AUTHOR("Intel Corporation");
3918