xref: /OK3568_Linux_fs/kernel/drivers/acpi/nfit/intel.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright(c) 2018 Intel Corporation. All rights reserved. */
3*4882a593Smuzhiyun #include <linux/libnvdimm.h>
4*4882a593Smuzhiyun #include <linux/ndctl.h>
5*4882a593Smuzhiyun #include <linux/acpi.h>
6*4882a593Smuzhiyun #include <asm/smp.h>
7*4882a593Smuzhiyun #include "intel.h"
8*4882a593Smuzhiyun #include "nfit.h"
9*4882a593Smuzhiyun 
firmware_activate_noidle_show(struct device * dev,struct device_attribute * attr,char * buf)10*4882a593Smuzhiyun static ssize_t firmware_activate_noidle_show(struct device *dev,
11*4882a593Smuzhiyun 		struct device_attribute *attr, char *buf)
12*4882a593Smuzhiyun {
13*4882a593Smuzhiyun 	struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
14*4882a593Smuzhiyun 	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
15*4882a593Smuzhiyun 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun 	return sprintf(buf, "%s\n", acpi_desc->fwa_noidle ? "Y" : "N");
18*4882a593Smuzhiyun }
19*4882a593Smuzhiyun 
firmware_activate_noidle_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)20*4882a593Smuzhiyun static ssize_t firmware_activate_noidle_store(struct device *dev,
21*4882a593Smuzhiyun 		struct device_attribute *attr, const char *buf, size_t size)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun 	struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
24*4882a593Smuzhiyun 	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
25*4882a593Smuzhiyun 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
26*4882a593Smuzhiyun 	ssize_t rc;
27*4882a593Smuzhiyun 	bool val;
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	rc = kstrtobool(buf, &val);
30*4882a593Smuzhiyun 	if (rc)
31*4882a593Smuzhiyun 		return rc;
32*4882a593Smuzhiyun 	if (val != acpi_desc->fwa_noidle)
33*4882a593Smuzhiyun 		acpi_desc->fwa_cap = NVDIMM_FWA_CAP_INVALID;
34*4882a593Smuzhiyun 	acpi_desc->fwa_noidle = val;
35*4882a593Smuzhiyun 	return size;
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun DEVICE_ATTR_RW(firmware_activate_noidle);
38*4882a593Smuzhiyun 
intel_fwa_supported(struct nvdimm_bus * nvdimm_bus)39*4882a593Smuzhiyun bool intel_fwa_supported(struct nvdimm_bus *nvdimm_bus)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
42*4882a593Smuzhiyun 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
43*4882a593Smuzhiyun 	unsigned long *mask;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	if (!test_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask))
46*4882a593Smuzhiyun 		return false;
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL];
49*4882a593Smuzhiyun 	return *mask == NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK;
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun 
intel_security_flags(struct nvdimm * nvdimm,enum nvdimm_passphrase_type ptype)52*4882a593Smuzhiyun static unsigned long intel_security_flags(struct nvdimm *nvdimm,
53*4882a593Smuzhiyun 		enum nvdimm_passphrase_type ptype)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
56*4882a593Smuzhiyun 	unsigned long security_flags = 0;
57*4882a593Smuzhiyun 	struct {
58*4882a593Smuzhiyun 		struct nd_cmd_pkg pkg;
59*4882a593Smuzhiyun 		struct nd_intel_get_security_state cmd;
60*4882a593Smuzhiyun 	} nd_cmd = {
61*4882a593Smuzhiyun 		.pkg = {
62*4882a593Smuzhiyun 			.nd_command = NVDIMM_INTEL_GET_SECURITY_STATE,
63*4882a593Smuzhiyun 			.nd_family = NVDIMM_FAMILY_INTEL,
64*4882a593Smuzhiyun 			.nd_size_out =
65*4882a593Smuzhiyun 				sizeof(struct nd_intel_get_security_state),
66*4882a593Smuzhiyun 			.nd_fw_size =
67*4882a593Smuzhiyun 				sizeof(struct nd_intel_get_security_state),
68*4882a593Smuzhiyun 		},
69*4882a593Smuzhiyun 	};
70*4882a593Smuzhiyun 	int rc;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	if (!test_bit(NVDIMM_INTEL_GET_SECURITY_STATE, &nfit_mem->dsm_mask))
73*4882a593Smuzhiyun 		return 0;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	/*
76*4882a593Smuzhiyun 	 * Short circuit the state retrieval while we are doing overwrite.
77*4882a593Smuzhiyun 	 * The DSM spec states that the security state is indeterminate
78*4882a593Smuzhiyun 	 * until the overwrite DSM completes.
79*4882a593Smuzhiyun 	 */
80*4882a593Smuzhiyun 	if (nvdimm_in_overwrite(nvdimm) && ptype == NVDIMM_USER)
81*4882a593Smuzhiyun 		return BIT(NVDIMM_SECURITY_OVERWRITE);
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
84*4882a593Smuzhiyun 	if (rc < 0 || nd_cmd.cmd.status) {
85*4882a593Smuzhiyun 		pr_err("%s: security state retrieval failed (%d:%#x)\n",
86*4882a593Smuzhiyun 				nvdimm_name(nvdimm), rc, nd_cmd.cmd.status);
87*4882a593Smuzhiyun 		return 0;
88*4882a593Smuzhiyun 	}
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	/* check and see if security is enabled and locked */
91*4882a593Smuzhiyun 	if (ptype == NVDIMM_MASTER) {
92*4882a593Smuzhiyun 		if (nd_cmd.cmd.extended_state & ND_INTEL_SEC_ESTATE_ENABLED)
93*4882a593Smuzhiyun 			set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags);
94*4882a593Smuzhiyun 		else
95*4882a593Smuzhiyun 			set_bit(NVDIMM_SECURITY_DISABLED, &security_flags);
96*4882a593Smuzhiyun 		if (nd_cmd.cmd.extended_state & ND_INTEL_SEC_ESTATE_PLIMIT)
97*4882a593Smuzhiyun 			set_bit(NVDIMM_SECURITY_FROZEN, &security_flags);
98*4882a593Smuzhiyun 		return security_flags;
99*4882a593Smuzhiyun 	}
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_UNSUPPORTED)
102*4882a593Smuzhiyun 		return 0;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_ENABLED) {
105*4882a593Smuzhiyun 		if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_FROZEN ||
106*4882a593Smuzhiyun 		    nd_cmd.cmd.state & ND_INTEL_SEC_STATE_PLIMIT)
107*4882a593Smuzhiyun 			set_bit(NVDIMM_SECURITY_FROZEN, &security_flags);
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 		if (nd_cmd.cmd.state & ND_INTEL_SEC_STATE_LOCKED)
110*4882a593Smuzhiyun 			set_bit(NVDIMM_SECURITY_LOCKED, &security_flags);
111*4882a593Smuzhiyun 		else
112*4882a593Smuzhiyun 			set_bit(NVDIMM_SECURITY_UNLOCKED, &security_flags);
113*4882a593Smuzhiyun 	} else
114*4882a593Smuzhiyun 		set_bit(NVDIMM_SECURITY_DISABLED, &security_flags);
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	return security_flags;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun 
intel_security_freeze(struct nvdimm * nvdimm)119*4882a593Smuzhiyun static int intel_security_freeze(struct nvdimm *nvdimm)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
122*4882a593Smuzhiyun 	struct {
123*4882a593Smuzhiyun 		struct nd_cmd_pkg pkg;
124*4882a593Smuzhiyun 		struct nd_intel_freeze_lock cmd;
125*4882a593Smuzhiyun 	} nd_cmd = {
126*4882a593Smuzhiyun 		.pkg = {
127*4882a593Smuzhiyun 			.nd_command = NVDIMM_INTEL_FREEZE_LOCK,
128*4882a593Smuzhiyun 			.nd_family = NVDIMM_FAMILY_INTEL,
129*4882a593Smuzhiyun 			.nd_size_out = ND_INTEL_STATUS_SIZE,
130*4882a593Smuzhiyun 			.nd_fw_size = ND_INTEL_STATUS_SIZE,
131*4882a593Smuzhiyun 		},
132*4882a593Smuzhiyun 	};
133*4882a593Smuzhiyun 	int rc;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	if (!test_bit(NVDIMM_INTEL_FREEZE_LOCK, &nfit_mem->dsm_mask))
136*4882a593Smuzhiyun 		return -ENOTTY;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
139*4882a593Smuzhiyun 	if (rc < 0)
140*4882a593Smuzhiyun 		return rc;
141*4882a593Smuzhiyun 	if (nd_cmd.cmd.status)
142*4882a593Smuzhiyun 		return -EIO;
143*4882a593Smuzhiyun 	return 0;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun 
intel_security_change_key(struct nvdimm * nvdimm,const struct nvdimm_key_data * old_data,const struct nvdimm_key_data * new_data,enum nvdimm_passphrase_type ptype)146*4882a593Smuzhiyun static int intel_security_change_key(struct nvdimm *nvdimm,
147*4882a593Smuzhiyun 		const struct nvdimm_key_data *old_data,
148*4882a593Smuzhiyun 		const struct nvdimm_key_data *new_data,
149*4882a593Smuzhiyun 		enum nvdimm_passphrase_type ptype)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
152*4882a593Smuzhiyun 	unsigned int cmd = ptype == NVDIMM_MASTER ?
153*4882a593Smuzhiyun 		NVDIMM_INTEL_SET_MASTER_PASSPHRASE :
154*4882a593Smuzhiyun 		NVDIMM_INTEL_SET_PASSPHRASE;
155*4882a593Smuzhiyun 	struct {
156*4882a593Smuzhiyun 		struct nd_cmd_pkg pkg;
157*4882a593Smuzhiyun 		struct nd_intel_set_passphrase cmd;
158*4882a593Smuzhiyun 	} nd_cmd = {
159*4882a593Smuzhiyun 		.pkg = {
160*4882a593Smuzhiyun 			.nd_family = NVDIMM_FAMILY_INTEL,
161*4882a593Smuzhiyun 			.nd_size_in = ND_INTEL_PASSPHRASE_SIZE * 2,
162*4882a593Smuzhiyun 			.nd_size_out = ND_INTEL_STATUS_SIZE,
163*4882a593Smuzhiyun 			.nd_fw_size = ND_INTEL_STATUS_SIZE,
164*4882a593Smuzhiyun 			.nd_command = cmd,
165*4882a593Smuzhiyun 		},
166*4882a593Smuzhiyun 	};
167*4882a593Smuzhiyun 	int rc;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	if (!test_bit(cmd, &nfit_mem->dsm_mask))
170*4882a593Smuzhiyun 		return -ENOTTY;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	memcpy(nd_cmd.cmd.old_pass, old_data->data,
173*4882a593Smuzhiyun 			sizeof(nd_cmd.cmd.old_pass));
174*4882a593Smuzhiyun 	memcpy(nd_cmd.cmd.new_pass, new_data->data,
175*4882a593Smuzhiyun 			sizeof(nd_cmd.cmd.new_pass));
176*4882a593Smuzhiyun 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
177*4882a593Smuzhiyun 	if (rc < 0)
178*4882a593Smuzhiyun 		return rc;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	switch (nd_cmd.cmd.status) {
181*4882a593Smuzhiyun 	case 0:
182*4882a593Smuzhiyun 		return 0;
183*4882a593Smuzhiyun 	case ND_INTEL_STATUS_INVALID_PASS:
184*4882a593Smuzhiyun 		return -EINVAL;
185*4882a593Smuzhiyun 	case ND_INTEL_STATUS_NOT_SUPPORTED:
186*4882a593Smuzhiyun 		return -EOPNOTSUPP;
187*4882a593Smuzhiyun 	case ND_INTEL_STATUS_INVALID_STATE:
188*4882a593Smuzhiyun 	default:
189*4882a593Smuzhiyun 		return -EIO;
190*4882a593Smuzhiyun 	}
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun static void nvdimm_invalidate_cache(void);
194*4882a593Smuzhiyun 
intel_security_unlock(struct nvdimm * nvdimm,const struct nvdimm_key_data * key_data)195*4882a593Smuzhiyun static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
196*4882a593Smuzhiyun 		const struct nvdimm_key_data *key_data)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
199*4882a593Smuzhiyun 	struct {
200*4882a593Smuzhiyun 		struct nd_cmd_pkg pkg;
201*4882a593Smuzhiyun 		struct nd_intel_unlock_unit cmd;
202*4882a593Smuzhiyun 	} nd_cmd = {
203*4882a593Smuzhiyun 		.pkg = {
204*4882a593Smuzhiyun 			.nd_command = NVDIMM_INTEL_UNLOCK_UNIT,
205*4882a593Smuzhiyun 			.nd_family = NVDIMM_FAMILY_INTEL,
206*4882a593Smuzhiyun 			.nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
207*4882a593Smuzhiyun 			.nd_size_out = ND_INTEL_STATUS_SIZE,
208*4882a593Smuzhiyun 			.nd_fw_size = ND_INTEL_STATUS_SIZE,
209*4882a593Smuzhiyun 		},
210*4882a593Smuzhiyun 	};
211*4882a593Smuzhiyun 	int rc;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	if (!test_bit(NVDIMM_INTEL_UNLOCK_UNIT, &nfit_mem->dsm_mask))
214*4882a593Smuzhiyun 		return -ENOTTY;
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	memcpy(nd_cmd.cmd.passphrase, key_data->data,
217*4882a593Smuzhiyun 			sizeof(nd_cmd.cmd.passphrase));
218*4882a593Smuzhiyun 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
219*4882a593Smuzhiyun 	if (rc < 0)
220*4882a593Smuzhiyun 		return rc;
221*4882a593Smuzhiyun 	switch (nd_cmd.cmd.status) {
222*4882a593Smuzhiyun 	case 0:
223*4882a593Smuzhiyun 		break;
224*4882a593Smuzhiyun 	case ND_INTEL_STATUS_INVALID_PASS:
225*4882a593Smuzhiyun 		return -EINVAL;
226*4882a593Smuzhiyun 	default:
227*4882a593Smuzhiyun 		return -EIO;
228*4882a593Smuzhiyun 	}
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	/* DIMM unlocked, invalidate all CPU caches before we read it */
231*4882a593Smuzhiyun 	nvdimm_invalidate_cache();
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	return 0;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun 
intel_security_disable(struct nvdimm * nvdimm,const struct nvdimm_key_data * key_data)236*4882a593Smuzhiyun static int intel_security_disable(struct nvdimm *nvdimm,
237*4882a593Smuzhiyun 		const struct nvdimm_key_data *key_data)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun 	int rc;
240*4882a593Smuzhiyun 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
241*4882a593Smuzhiyun 	struct {
242*4882a593Smuzhiyun 		struct nd_cmd_pkg pkg;
243*4882a593Smuzhiyun 		struct nd_intel_disable_passphrase cmd;
244*4882a593Smuzhiyun 	} nd_cmd = {
245*4882a593Smuzhiyun 		.pkg = {
246*4882a593Smuzhiyun 			.nd_command = NVDIMM_INTEL_DISABLE_PASSPHRASE,
247*4882a593Smuzhiyun 			.nd_family = NVDIMM_FAMILY_INTEL,
248*4882a593Smuzhiyun 			.nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
249*4882a593Smuzhiyun 			.nd_size_out = ND_INTEL_STATUS_SIZE,
250*4882a593Smuzhiyun 			.nd_fw_size = ND_INTEL_STATUS_SIZE,
251*4882a593Smuzhiyun 		},
252*4882a593Smuzhiyun 	};
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	if (!test_bit(NVDIMM_INTEL_DISABLE_PASSPHRASE, &nfit_mem->dsm_mask))
255*4882a593Smuzhiyun 		return -ENOTTY;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	memcpy(nd_cmd.cmd.passphrase, key_data->data,
258*4882a593Smuzhiyun 			sizeof(nd_cmd.cmd.passphrase));
259*4882a593Smuzhiyun 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
260*4882a593Smuzhiyun 	if (rc < 0)
261*4882a593Smuzhiyun 		return rc;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	switch (nd_cmd.cmd.status) {
264*4882a593Smuzhiyun 	case 0:
265*4882a593Smuzhiyun 		break;
266*4882a593Smuzhiyun 	case ND_INTEL_STATUS_INVALID_PASS:
267*4882a593Smuzhiyun 		return -EINVAL;
268*4882a593Smuzhiyun 	case ND_INTEL_STATUS_INVALID_STATE:
269*4882a593Smuzhiyun 	default:
270*4882a593Smuzhiyun 		return -ENXIO;
271*4882a593Smuzhiyun 	}
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	return 0;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun 
intel_security_erase(struct nvdimm * nvdimm,const struct nvdimm_key_data * key,enum nvdimm_passphrase_type ptype)276*4882a593Smuzhiyun static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
277*4882a593Smuzhiyun 		const struct nvdimm_key_data *key,
278*4882a593Smuzhiyun 		enum nvdimm_passphrase_type ptype)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun 	int rc;
281*4882a593Smuzhiyun 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
282*4882a593Smuzhiyun 	unsigned int cmd = ptype == NVDIMM_MASTER ?
283*4882a593Smuzhiyun 		NVDIMM_INTEL_MASTER_SECURE_ERASE : NVDIMM_INTEL_SECURE_ERASE;
284*4882a593Smuzhiyun 	struct {
285*4882a593Smuzhiyun 		struct nd_cmd_pkg pkg;
286*4882a593Smuzhiyun 		struct nd_intel_secure_erase cmd;
287*4882a593Smuzhiyun 	} nd_cmd = {
288*4882a593Smuzhiyun 		.pkg = {
289*4882a593Smuzhiyun 			.nd_family = NVDIMM_FAMILY_INTEL,
290*4882a593Smuzhiyun 			.nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
291*4882a593Smuzhiyun 			.nd_size_out = ND_INTEL_STATUS_SIZE,
292*4882a593Smuzhiyun 			.nd_fw_size = ND_INTEL_STATUS_SIZE,
293*4882a593Smuzhiyun 			.nd_command = cmd,
294*4882a593Smuzhiyun 		},
295*4882a593Smuzhiyun 	};
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	if (!test_bit(cmd, &nfit_mem->dsm_mask))
298*4882a593Smuzhiyun 		return -ENOTTY;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	/* flush all cache before we erase DIMM */
301*4882a593Smuzhiyun 	nvdimm_invalidate_cache();
302*4882a593Smuzhiyun 	memcpy(nd_cmd.cmd.passphrase, key->data,
303*4882a593Smuzhiyun 			sizeof(nd_cmd.cmd.passphrase));
304*4882a593Smuzhiyun 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
305*4882a593Smuzhiyun 	if (rc < 0)
306*4882a593Smuzhiyun 		return rc;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	switch (nd_cmd.cmd.status) {
309*4882a593Smuzhiyun 	case 0:
310*4882a593Smuzhiyun 		break;
311*4882a593Smuzhiyun 	case ND_INTEL_STATUS_NOT_SUPPORTED:
312*4882a593Smuzhiyun 		return -EOPNOTSUPP;
313*4882a593Smuzhiyun 	case ND_INTEL_STATUS_INVALID_PASS:
314*4882a593Smuzhiyun 		return -EINVAL;
315*4882a593Smuzhiyun 	case ND_INTEL_STATUS_INVALID_STATE:
316*4882a593Smuzhiyun 	default:
317*4882a593Smuzhiyun 		return -ENXIO;
318*4882a593Smuzhiyun 	}
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	/* DIMM erased, invalidate all CPU caches before we read it */
321*4882a593Smuzhiyun 	nvdimm_invalidate_cache();
322*4882a593Smuzhiyun 	return 0;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun 
intel_security_query_overwrite(struct nvdimm * nvdimm)325*4882a593Smuzhiyun static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun 	int rc;
328*4882a593Smuzhiyun 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
329*4882a593Smuzhiyun 	struct {
330*4882a593Smuzhiyun 		struct nd_cmd_pkg pkg;
331*4882a593Smuzhiyun 		struct nd_intel_query_overwrite cmd;
332*4882a593Smuzhiyun 	} nd_cmd = {
333*4882a593Smuzhiyun 		.pkg = {
334*4882a593Smuzhiyun 			.nd_command = NVDIMM_INTEL_QUERY_OVERWRITE,
335*4882a593Smuzhiyun 			.nd_family = NVDIMM_FAMILY_INTEL,
336*4882a593Smuzhiyun 			.nd_size_out = ND_INTEL_STATUS_SIZE,
337*4882a593Smuzhiyun 			.nd_fw_size = ND_INTEL_STATUS_SIZE,
338*4882a593Smuzhiyun 		},
339*4882a593Smuzhiyun 	};
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	if (!test_bit(NVDIMM_INTEL_QUERY_OVERWRITE, &nfit_mem->dsm_mask))
342*4882a593Smuzhiyun 		return -ENOTTY;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
345*4882a593Smuzhiyun 	if (rc < 0)
346*4882a593Smuzhiyun 		return rc;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	switch (nd_cmd.cmd.status) {
349*4882a593Smuzhiyun 	case 0:
350*4882a593Smuzhiyun 		break;
351*4882a593Smuzhiyun 	case ND_INTEL_STATUS_OQUERY_INPROGRESS:
352*4882a593Smuzhiyun 		return -EBUSY;
353*4882a593Smuzhiyun 	default:
354*4882a593Smuzhiyun 		return -ENXIO;
355*4882a593Smuzhiyun 	}
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	/* flush all cache before we make the nvdimms available */
358*4882a593Smuzhiyun 	nvdimm_invalidate_cache();
359*4882a593Smuzhiyun 	return 0;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun 
intel_security_overwrite(struct nvdimm * nvdimm,const struct nvdimm_key_data * nkey)362*4882a593Smuzhiyun static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
363*4882a593Smuzhiyun 		const struct nvdimm_key_data *nkey)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun 	int rc;
366*4882a593Smuzhiyun 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
367*4882a593Smuzhiyun 	struct {
368*4882a593Smuzhiyun 		struct nd_cmd_pkg pkg;
369*4882a593Smuzhiyun 		struct nd_intel_overwrite cmd;
370*4882a593Smuzhiyun 	} nd_cmd = {
371*4882a593Smuzhiyun 		.pkg = {
372*4882a593Smuzhiyun 			.nd_command = NVDIMM_INTEL_OVERWRITE,
373*4882a593Smuzhiyun 			.nd_family = NVDIMM_FAMILY_INTEL,
374*4882a593Smuzhiyun 			.nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
375*4882a593Smuzhiyun 			.nd_size_out = ND_INTEL_STATUS_SIZE,
376*4882a593Smuzhiyun 			.nd_fw_size = ND_INTEL_STATUS_SIZE,
377*4882a593Smuzhiyun 		},
378*4882a593Smuzhiyun 	};
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	if (!test_bit(NVDIMM_INTEL_OVERWRITE, &nfit_mem->dsm_mask))
381*4882a593Smuzhiyun 		return -ENOTTY;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	/* flush all cache before we erase DIMM */
384*4882a593Smuzhiyun 	nvdimm_invalidate_cache();
385*4882a593Smuzhiyun 	memcpy(nd_cmd.cmd.passphrase, nkey->data,
386*4882a593Smuzhiyun 			sizeof(nd_cmd.cmd.passphrase));
387*4882a593Smuzhiyun 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
388*4882a593Smuzhiyun 	if (rc < 0)
389*4882a593Smuzhiyun 		return rc;
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	switch (nd_cmd.cmd.status) {
392*4882a593Smuzhiyun 	case 0:
393*4882a593Smuzhiyun 		return 0;
394*4882a593Smuzhiyun 	case ND_INTEL_STATUS_OVERWRITE_UNSUPPORTED:
395*4882a593Smuzhiyun 		return -ENOTSUPP;
396*4882a593Smuzhiyun 	case ND_INTEL_STATUS_INVALID_PASS:
397*4882a593Smuzhiyun 		return -EINVAL;
398*4882a593Smuzhiyun 	case ND_INTEL_STATUS_INVALID_STATE:
399*4882a593Smuzhiyun 	default:
400*4882a593Smuzhiyun 		return -ENXIO;
401*4882a593Smuzhiyun 	}
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun /*
405*4882a593Smuzhiyun  * TODO: define a cross arch wbinvd equivalent when/if
406*4882a593Smuzhiyun  * NVDIMM_FAMILY_INTEL command support arrives on another arch.
407*4882a593Smuzhiyun  */
408*4882a593Smuzhiyun #ifdef CONFIG_X86
nvdimm_invalidate_cache(void)409*4882a593Smuzhiyun static void nvdimm_invalidate_cache(void)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun 	wbinvd_on_all_cpus();
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun #else
nvdimm_invalidate_cache(void)414*4882a593Smuzhiyun static void nvdimm_invalidate_cache(void)
415*4882a593Smuzhiyun {
416*4882a593Smuzhiyun 	WARN_ON_ONCE("cache invalidation required after unlock\n");
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun #endif
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun static const struct nvdimm_security_ops __intel_security_ops = {
421*4882a593Smuzhiyun 	.get_flags = intel_security_flags,
422*4882a593Smuzhiyun 	.freeze = intel_security_freeze,
423*4882a593Smuzhiyun 	.change_key = intel_security_change_key,
424*4882a593Smuzhiyun 	.disable = intel_security_disable,
425*4882a593Smuzhiyun #ifdef CONFIG_X86
426*4882a593Smuzhiyun 	.unlock = intel_security_unlock,
427*4882a593Smuzhiyun 	.erase = intel_security_erase,
428*4882a593Smuzhiyun 	.overwrite = intel_security_overwrite,
429*4882a593Smuzhiyun 	.query_overwrite = intel_security_query_overwrite,
430*4882a593Smuzhiyun #endif
431*4882a593Smuzhiyun };
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun const struct nvdimm_security_ops *intel_security_ops = &__intel_security_ops;
434*4882a593Smuzhiyun 
intel_bus_fwa_businfo(struct nvdimm_bus_descriptor * nd_desc,struct nd_intel_bus_fw_activate_businfo * info)435*4882a593Smuzhiyun static int intel_bus_fwa_businfo(struct nvdimm_bus_descriptor *nd_desc,
436*4882a593Smuzhiyun 		struct nd_intel_bus_fw_activate_businfo *info)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun 	struct {
439*4882a593Smuzhiyun 		struct nd_cmd_pkg pkg;
440*4882a593Smuzhiyun 		struct nd_intel_bus_fw_activate_businfo cmd;
441*4882a593Smuzhiyun 	} nd_cmd = {
442*4882a593Smuzhiyun 		.pkg = {
443*4882a593Smuzhiyun 			.nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO,
444*4882a593Smuzhiyun 			.nd_family = NVDIMM_BUS_FAMILY_INTEL,
445*4882a593Smuzhiyun 			.nd_size_out =
446*4882a593Smuzhiyun 				sizeof(struct nd_intel_bus_fw_activate_businfo),
447*4882a593Smuzhiyun 			.nd_fw_size =
448*4882a593Smuzhiyun 				sizeof(struct nd_intel_bus_fw_activate_businfo),
449*4882a593Smuzhiyun 		},
450*4882a593Smuzhiyun 	};
451*4882a593Smuzhiyun 	int rc;
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd),
454*4882a593Smuzhiyun 			NULL);
455*4882a593Smuzhiyun 	*info = nd_cmd.cmd;
456*4882a593Smuzhiyun 	return rc;
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun /* The fw_ops expect to be called with the nvdimm_bus_lock() held */
intel_bus_fwa_state(struct nvdimm_bus_descriptor * nd_desc)460*4882a593Smuzhiyun static enum nvdimm_fwa_state intel_bus_fwa_state(
461*4882a593Smuzhiyun 		struct nvdimm_bus_descriptor *nd_desc)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
464*4882a593Smuzhiyun 	struct nd_intel_bus_fw_activate_businfo info;
465*4882a593Smuzhiyun 	struct device *dev = acpi_desc->dev;
466*4882a593Smuzhiyun 	enum nvdimm_fwa_state state;
467*4882a593Smuzhiyun 	int rc;
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	/*
470*4882a593Smuzhiyun 	 * It should not be possible for platform firmware to return
471*4882a593Smuzhiyun 	 * busy because activate is a synchronous operation. Treat it
472*4882a593Smuzhiyun 	 * similar to invalid, i.e. always refresh / poll the status.
473*4882a593Smuzhiyun 	 */
474*4882a593Smuzhiyun 	switch (acpi_desc->fwa_state) {
475*4882a593Smuzhiyun 	case NVDIMM_FWA_INVALID:
476*4882a593Smuzhiyun 	case NVDIMM_FWA_BUSY:
477*4882a593Smuzhiyun 		break;
478*4882a593Smuzhiyun 	default:
479*4882a593Smuzhiyun 		/* check if capability needs to be refreshed */
480*4882a593Smuzhiyun 		if (acpi_desc->fwa_cap == NVDIMM_FWA_CAP_INVALID)
481*4882a593Smuzhiyun 			break;
482*4882a593Smuzhiyun 		return acpi_desc->fwa_state;
483*4882a593Smuzhiyun 	}
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	/* Refresh with platform firmware */
486*4882a593Smuzhiyun 	rc = intel_bus_fwa_businfo(nd_desc, &info);
487*4882a593Smuzhiyun 	if (rc)
488*4882a593Smuzhiyun 		return NVDIMM_FWA_INVALID;
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	switch (info.state) {
491*4882a593Smuzhiyun 	case ND_INTEL_FWA_IDLE:
492*4882a593Smuzhiyun 		state = NVDIMM_FWA_IDLE;
493*4882a593Smuzhiyun 		break;
494*4882a593Smuzhiyun 	case ND_INTEL_FWA_BUSY:
495*4882a593Smuzhiyun 		state = NVDIMM_FWA_BUSY;
496*4882a593Smuzhiyun 		break;
497*4882a593Smuzhiyun 	case ND_INTEL_FWA_ARMED:
498*4882a593Smuzhiyun 		if (info.activate_tmo > info.max_quiesce_tmo)
499*4882a593Smuzhiyun 			state = NVDIMM_FWA_ARM_OVERFLOW;
500*4882a593Smuzhiyun 		else
501*4882a593Smuzhiyun 			state = NVDIMM_FWA_ARMED;
502*4882a593Smuzhiyun 		break;
503*4882a593Smuzhiyun 	default:
504*4882a593Smuzhiyun 		dev_err_once(dev, "invalid firmware activate state %d\n",
505*4882a593Smuzhiyun 				info.state);
506*4882a593Smuzhiyun 		return NVDIMM_FWA_INVALID;
507*4882a593Smuzhiyun 	}
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	/*
510*4882a593Smuzhiyun 	 * Capability data is available in the same payload as state. It
511*4882a593Smuzhiyun 	 * is expected to be static.
512*4882a593Smuzhiyun 	 */
513*4882a593Smuzhiyun 	if (acpi_desc->fwa_cap == NVDIMM_FWA_CAP_INVALID) {
514*4882a593Smuzhiyun 		if (info.capability & ND_INTEL_BUS_FWA_CAP_FWQUIESCE)
515*4882a593Smuzhiyun 			acpi_desc->fwa_cap = NVDIMM_FWA_CAP_QUIESCE;
516*4882a593Smuzhiyun 		else if (info.capability & ND_INTEL_BUS_FWA_CAP_OSQUIESCE) {
517*4882a593Smuzhiyun 			/*
518*4882a593Smuzhiyun 			 * Skip hibernate cycle by default if platform
519*4882a593Smuzhiyun 			 * indicates that it does not need devices to be
520*4882a593Smuzhiyun 			 * quiesced.
521*4882a593Smuzhiyun 			 */
522*4882a593Smuzhiyun 			acpi_desc->fwa_cap = NVDIMM_FWA_CAP_LIVE;
523*4882a593Smuzhiyun 		} else
524*4882a593Smuzhiyun 			acpi_desc->fwa_cap = NVDIMM_FWA_CAP_NONE;
525*4882a593Smuzhiyun 	}
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	acpi_desc->fwa_state = state;
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	return state;
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun 
intel_bus_fwa_capability(struct nvdimm_bus_descriptor * nd_desc)532*4882a593Smuzhiyun static enum nvdimm_fwa_capability intel_bus_fwa_capability(
533*4882a593Smuzhiyun 		struct nvdimm_bus_descriptor *nd_desc)
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	if (acpi_desc->fwa_cap > NVDIMM_FWA_CAP_INVALID)
538*4882a593Smuzhiyun 		return acpi_desc->fwa_cap;
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	if (intel_bus_fwa_state(nd_desc) > NVDIMM_FWA_INVALID)
541*4882a593Smuzhiyun 		return acpi_desc->fwa_cap;
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	return NVDIMM_FWA_CAP_INVALID;
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun 
intel_bus_fwa_activate(struct nvdimm_bus_descriptor * nd_desc)546*4882a593Smuzhiyun static int intel_bus_fwa_activate(struct nvdimm_bus_descriptor *nd_desc)
547*4882a593Smuzhiyun {
548*4882a593Smuzhiyun 	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
549*4882a593Smuzhiyun 	struct {
550*4882a593Smuzhiyun 		struct nd_cmd_pkg pkg;
551*4882a593Smuzhiyun 		struct nd_intel_bus_fw_activate cmd;
552*4882a593Smuzhiyun 	} nd_cmd = {
553*4882a593Smuzhiyun 		.pkg = {
554*4882a593Smuzhiyun 			.nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE,
555*4882a593Smuzhiyun 			.nd_family = NVDIMM_BUS_FAMILY_INTEL,
556*4882a593Smuzhiyun 			.nd_size_in = sizeof(nd_cmd.cmd.iodev_state),
557*4882a593Smuzhiyun 			.nd_size_out =
558*4882a593Smuzhiyun 				sizeof(struct nd_intel_bus_fw_activate),
559*4882a593Smuzhiyun 			.nd_fw_size =
560*4882a593Smuzhiyun 				sizeof(struct nd_intel_bus_fw_activate),
561*4882a593Smuzhiyun 		},
562*4882a593Smuzhiyun 		/*
563*4882a593Smuzhiyun 		 * Even though activate is run from a suspended context,
564*4882a593Smuzhiyun 		 * for safety, still ask platform firmware to force
565*4882a593Smuzhiyun 		 * quiesce devices by default. Let a module
566*4882a593Smuzhiyun 		 * parameter override that policy.
567*4882a593Smuzhiyun 		 */
568*4882a593Smuzhiyun 		.cmd = {
569*4882a593Smuzhiyun 			.iodev_state = acpi_desc->fwa_noidle
570*4882a593Smuzhiyun 				? ND_INTEL_BUS_FWA_IODEV_OS_IDLE
571*4882a593Smuzhiyun 				: ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE,
572*4882a593Smuzhiyun 		},
573*4882a593Smuzhiyun 	};
574*4882a593Smuzhiyun 	int rc;
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	switch (intel_bus_fwa_state(nd_desc)) {
577*4882a593Smuzhiyun 	case NVDIMM_FWA_ARMED:
578*4882a593Smuzhiyun 	case NVDIMM_FWA_ARM_OVERFLOW:
579*4882a593Smuzhiyun 		break;
580*4882a593Smuzhiyun 	default:
581*4882a593Smuzhiyun 		return -ENXIO;
582*4882a593Smuzhiyun 	}
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd),
585*4882a593Smuzhiyun 			NULL);
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	/*
588*4882a593Smuzhiyun 	 * Whether the command succeeded, or failed, the agent checking
589*4882a593Smuzhiyun 	 * for the result needs to query the DIMMs individually.
590*4882a593Smuzhiyun 	 * Increment the activation count to invalidate all the DIMM
591*4882a593Smuzhiyun 	 * states at once (it's otherwise not possible to take
592*4882a593Smuzhiyun 	 * acpi_desc->init_mutex in this context)
593*4882a593Smuzhiyun 	 */
594*4882a593Smuzhiyun 	acpi_desc->fwa_state = NVDIMM_FWA_INVALID;
595*4882a593Smuzhiyun 	acpi_desc->fwa_count++;
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	dev_dbg(acpi_desc->dev, "result: %d\n", rc);
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	return rc;
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun static const struct nvdimm_bus_fw_ops __intel_bus_fw_ops = {
603*4882a593Smuzhiyun 	.activate_state = intel_bus_fwa_state,
604*4882a593Smuzhiyun 	.capability = intel_bus_fwa_capability,
605*4882a593Smuzhiyun 	.activate = intel_bus_fwa_activate,
606*4882a593Smuzhiyun };
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun const struct nvdimm_bus_fw_ops *intel_bus_fw_ops = &__intel_bus_fw_ops;
609*4882a593Smuzhiyun 
intel_fwa_dimminfo(struct nvdimm * nvdimm,struct nd_intel_fw_activate_dimminfo * info)610*4882a593Smuzhiyun static int intel_fwa_dimminfo(struct nvdimm *nvdimm,
611*4882a593Smuzhiyun 		struct nd_intel_fw_activate_dimminfo *info)
612*4882a593Smuzhiyun {
613*4882a593Smuzhiyun 	struct {
614*4882a593Smuzhiyun 		struct nd_cmd_pkg pkg;
615*4882a593Smuzhiyun 		struct nd_intel_fw_activate_dimminfo cmd;
616*4882a593Smuzhiyun 	} nd_cmd = {
617*4882a593Smuzhiyun 		.pkg = {
618*4882a593Smuzhiyun 			.nd_command = NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO,
619*4882a593Smuzhiyun 			.nd_family = NVDIMM_FAMILY_INTEL,
620*4882a593Smuzhiyun 			.nd_size_out =
621*4882a593Smuzhiyun 				sizeof(struct nd_intel_fw_activate_dimminfo),
622*4882a593Smuzhiyun 			.nd_fw_size =
623*4882a593Smuzhiyun 				sizeof(struct nd_intel_fw_activate_dimminfo),
624*4882a593Smuzhiyun 		},
625*4882a593Smuzhiyun 	};
626*4882a593Smuzhiyun 	int rc;
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
629*4882a593Smuzhiyun 	*info = nd_cmd.cmd;
630*4882a593Smuzhiyun 	return rc;
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun 
intel_fwa_state(struct nvdimm * nvdimm)633*4882a593Smuzhiyun static enum nvdimm_fwa_state intel_fwa_state(struct nvdimm *nvdimm)
634*4882a593Smuzhiyun {
635*4882a593Smuzhiyun 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
636*4882a593Smuzhiyun 	struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
637*4882a593Smuzhiyun 	struct nd_intel_fw_activate_dimminfo info;
638*4882a593Smuzhiyun 	int rc;
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	/*
641*4882a593Smuzhiyun 	 * Similar to the bus state, since activate is synchronous the
642*4882a593Smuzhiyun 	 * busy state should resolve within the context of 'activate'.
643*4882a593Smuzhiyun 	 */
644*4882a593Smuzhiyun 	switch (nfit_mem->fwa_state) {
645*4882a593Smuzhiyun 	case NVDIMM_FWA_INVALID:
646*4882a593Smuzhiyun 	case NVDIMM_FWA_BUSY:
647*4882a593Smuzhiyun 		break;
648*4882a593Smuzhiyun 	default:
649*4882a593Smuzhiyun 		/* If no activations occurred the old state is still valid */
650*4882a593Smuzhiyun 		if (nfit_mem->fwa_count == acpi_desc->fwa_count)
651*4882a593Smuzhiyun 			return nfit_mem->fwa_state;
652*4882a593Smuzhiyun 	}
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	rc = intel_fwa_dimminfo(nvdimm, &info);
655*4882a593Smuzhiyun 	if (rc)
656*4882a593Smuzhiyun 		return NVDIMM_FWA_INVALID;
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	switch (info.state) {
659*4882a593Smuzhiyun 	case ND_INTEL_FWA_IDLE:
660*4882a593Smuzhiyun 		nfit_mem->fwa_state = NVDIMM_FWA_IDLE;
661*4882a593Smuzhiyun 		break;
662*4882a593Smuzhiyun 	case ND_INTEL_FWA_BUSY:
663*4882a593Smuzhiyun 		nfit_mem->fwa_state = NVDIMM_FWA_BUSY;
664*4882a593Smuzhiyun 		break;
665*4882a593Smuzhiyun 	case ND_INTEL_FWA_ARMED:
666*4882a593Smuzhiyun 		nfit_mem->fwa_state = NVDIMM_FWA_ARMED;
667*4882a593Smuzhiyun 		break;
668*4882a593Smuzhiyun 	default:
669*4882a593Smuzhiyun 		nfit_mem->fwa_state = NVDIMM_FWA_INVALID;
670*4882a593Smuzhiyun 		break;
671*4882a593Smuzhiyun 	}
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 	switch (info.result) {
674*4882a593Smuzhiyun 	case ND_INTEL_DIMM_FWA_NONE:
675*4882a593Smuzhiyun 		nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NONE;
676*4882a593Smuzhiyun 		break;
677*4882a593Smuzhiyun 	case ND_INTEL_DIMM_FWA_SUCCESS:
678*4882a593Smuzhiyun 		nfit_mem->fwa_result = NVDIMM_FWA_RESULT_SUCCESS;
679*4882a593Smuzhiyun 		break;
680*4882a593Smuzhiyun 	case ND_INTEL_DIMM_FWA_NOTSTAGED:
681*4882a593Smuzhiyun 		nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NOTSTAGED;
682*4882a593Smuzhiyun 		break;
683*4882a593Smuzhiyun 	case ND_INTEL_DIMM_FWA_NEEDRESET:
684*4882a593Smuzhiyun 		nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NEEDRESET;
685*4882a593Smuzhiyun 		break;
686*4882a593Smuzhiyun 	case ND_INTEL_DIMM_FWA_MEDIAFAILED:
687*4882a593Smuzhiyun 	case ND_INTEL_DIMM_FWA_ABORT:
688*4882a593Smuzhiyun 	case ND_INTEL_DIMM_FWA_NOTSUPP:
689*4882a593Smuzhiyun 	case ND_INTEL_DIMM_FWA_ERROR:
690*4882a593Smuzhiyun 	default:
691*4882a593Smuzhiyun 		nfit_mem->fwa_result = NVDIMM_FWA_RESULT_FAIL;
692*4882a593Smuzhiyun 		break;
693*4882a593Smuzhiyun 	}
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 	nfit_mem->fwa_count = acpi_desc->fwa_count;
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	return nfit_mem->fwa_state;
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun 
intel_fwa_result(struct nvdimm * nvdimm)700*4882a593Smuzhiyun static enum nvdimm_fwa_result intel_fwa_result(struct nvdimm *nvdimm)
701*4882a593Smuzhiyun {
702*4882a593Smuzhiyun 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
703*4882a593Smuzhiyun 	struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 	if (nfit_mem->fwa_count == acpi_desc->fwa_count
706*4882a593Smuzhiyun 			&& nfit_mem->fwa_result > NVDIMM_FWA_RESULT_INVALID)
707*4882a593Smuzhiyun 		return nfit_mem->fwa_result;
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	if (intel_fwa_state(nvdimm) > NVDIMM_FWA_INVALID)
710*4882a593Smuzhiyun 		return nfit_mem->fwa_result;
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 	return NVDIMM_FWA_RESULT_INVALID;
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun 
intel_fwa_arm(struct nvdimm * nvdimm,enum nvdimm_fwa_trigger arm)715*4882a593Smuzhiyun static int intel_fwa_arm(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arm)
716*4882a593Smuzhiyun {
717*4882a593Smuzhiyun 	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
718*4882a593Smuzhiyun 	struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
719*4882a593Smuzhiyun 	struct {
720*4882a593Smuzhiyun 		struct nd_cmd_pkg pkg;
721*4882a593Smuzhiyun 		struct nd_intel_fw_activate_arm cmd;
722*4882a593Smuzhiyun 	} nd_cmd = {
723*4882a593Smuzhiyun 		.pkg = {
724*4882a593Smuzhiyun 			.nd_command = NVDIMM_INTEL_FW_ACTIVATE_ARM,
725*4882a593Smuzhiyun 			.nd_family = NVDIMM_FAMILY_INTEL,
726*4882a593Smuzhiyun 			.nd_size_in = sizeof(nd_cmd.cmd.activate_arm),
727*4882a593Smuzhiyun 			.nd_size_out =
728*4882a593Smuzhiyun 				sizeof(struct nd_intel_fw_activate_arm),
729*4882a593Smuzhiyun 			.nd_fw_size =
730*4882a593Smuzhiyun 				sizeof(struct nd_intel_fw_activate_arm),
731*4882a593Smuzhiyun 		},
732*4882a593Smuzhiyun 		.cmd = {
733*4882a593Smuzhiyun 			.activate_arm = arm == NVDIMM_FWA_ARM
734*4882a593Smuzhiyun 				? ND_INTEL_DIMM_FWA_ARM
735*4882a593Smuzhiyun 				: ND_INTEL_DIMM_FWA_DISARM,
736*4882a593Smuzhiyun 		},
737*4882a593Smuzhiyun 	};
738*4882a593Smuzhiyun 	int rc;
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	switch (intel_fwa_state(nvdimm)) {
741*4882a593Smuzhiyun 	case NVDIMM_FWA_INVALID:
742*4882a593Smuzhiyun 		return -ENXIO;
743*4882a593Smuzhiyun 	case NVDIMM_FWA_BUSY:
744*4882a593Smuzhiyun 		return -EBUSY;
745*4882a593Smuzhiyun 	case NVDIMM_FWA_IDLE:
746*4882a593Smuzhiyun 		if (arm == NVDIMM_FWA_DISARM)
747*4882a593Smuzhiyun 			return 0;
748*4882a593Smuzhiyun 		break;
749*4882a593Smuzhiyun 	case NVDIMM_FWA_ARMED:
750*4882a593Smuzhiyun 		if (arm == NVDIMM_FWA_ARM)
751*4882a593Smuzhiyun 			return 0;
752*4882a593Smuzhiyun 		break;
753*4882a593Smuzhiyun 	default:
754*4882a593Smuzhiyun 		return -ENXIO;
755*4882a593Smuzhiyun 	}
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	/*
758*4882a593Smuzhiyun 	 * Invalidate the bus-level state, now that we're committed to
759*4882a593Smuzhiyun 	 * changing the 'arm' state.
760*4882a593Smuzhiyun 	 */
761*4882a593Smuzhiyun 	acpi_desc->fwa_state = NVDIMM_FWA_INVALID;
762*4882a593Smuzhiyun 	nfit_mem->fwa_state = NVDIMM_FWA_INVALID;
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 	rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 	dev_dbg(acpi_desc->dev, "%s result: %d\n", arm == NVDIMM_FWA_ARM
767*4882a593Smuzhiyun 			? "arm" : "disarm", rc);
768*4882a593Smuzhiyun 	return rc;
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun static const struct nvdimm_fw_ops __intel_fw_ops = {
772*4882a593Smuzhiyun 	.activate_state = intel_fwa_state,
773*4882a593Smuzhiyun 	.activate_result = intel_fwa_result,
774*4882a593Smuzhiyun 	.arm = intel_fwa_arm,
775*4882a593Smuzhiyun };
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun const struct nvdimm_fw_ops *intel_fw_ops = &__intel_fw_ops;
778