1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright(c) 2018 Intel Corporation. All rights reserved. */
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun #include <linux/module.h>
5*4882a593Smuzhiyun #include <linux/device.h>
6*4882a593Smuzhiyun #include <linux/ndctl.h>
7*4882a593Smuzhiyun #include <linux/slab.h>
8*4882a593Smuzhiyun #include <linux/io.h>
9*4882a593Smuzhiyun #include <linux/mm.h>
10*4882a593Smuzhiyun #include <linux/cred.h>
11*4882a593Smuzhiyun #include <linux/key.h>
12*4882a593Smuzhiyun #include <linux/key-type.h>
13*4882a593Smuzhiyun #include <keys/user-type.h>
14*4882a593Smuzhiyun #include <keys/encrypted-type.h>
15*4882a593Smuzhiyun #include "nd-core.h"
16*4882a593Smuzhiyun #include "nd.h"
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #define NVDIMM_BASE_KEY 0
19*4882a593Smuzhiyun #define NVDIMM_NEW_KEY 1
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun static bool key_revalidate = true;
22*4882a593Smuzhiyun module_param(key_revalidate, bool, 0444);
23*4882a593Smuzhiyun MODULE_PARM_DESC(key_revalidate, "Require key validation at init.");
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun static const char zero_key[NVDIMM_PASSPHRASE_LEN];
26*4882a593Smuzhiyun
key_data(struct key * key)27*4882a593Smuzhiyun static void *key_data(struct key *key)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun struct encrypted_key_payload *epayload = dereference_key_locked(key);
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun lockdep_assert_held_read(&key->sem);
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun return epayload->decrypted_data;
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun
nvdimm_put_key(struct key * key)36*4882a593Smuzhiyun static void nvdimm_put_key(struct key *key)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun if (!key)
39*4882a593Smuzhiyun return;
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun up_read(&key->sem);
42*4882a593Smuzhiyun key_put(key);
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun * Retrieve kernel key for DIMM and request from user space if
47*4882a593Smuzhiyun * necessary. Returns a key held for read and must be put by
48*4882a593Smuzhiyun * nvdimm_put_key() before the usage goes out of scope.
49*4882a593Smuzhiyun */
nvdimm_request_key(struct nvdimm * nvdimm)50*4882a593Smuzhiyun static struct key *nvdimm_request_key(struct nvdimm *nvdimm)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun struct key *key = NULL;
53*4882a593Smuzhiyun static const char NVDIMM_PREFIX[] = "nvdimm:";
54*4882a593Smuzhiyun char desc[NVDIMM_KEY_DESC_LEN + sizeof(NVDIMM_PREFIX)];
55*4882a593Smuzhiyun struct device *dev = &nvdimm->dev;
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun sprintf(desc, "%s%s", NVDIMM_PREFIX, nvdimm->dimm_id);
58*4882a593Smuzhiyun key = request_key(&key_type_encrypted, desc, "");
59*4882a593Smuzhiyun if (IS_ERR(key)) {
60*4882a593Smuzhiyun if (PTR_ERR(key) == -ENOKEY)
61*4882a593Smuzhiyun dev_dbg(dev, "request_key() found no key\n");
62*4882a593Smuzhiyun else
63*4882a593Smuzhiyun dev_dbg(dev, "request_key() upcall failed\n");
64*4882a593Smuzhiyun key = NULL;
65*4882a593Smuzhiyun } else {
66*4882a593Smuzhiyun struct encrypted_key_payload *epayload;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun down_read(&key->sem);
69*4882a593Smuzhiyun epayload = dereference_key_locked(key);
70*4882a593Smuzhiyun if (epayload->decrypted_datalen != NVDIMM_PASSPHRASE_LEN) {
71*4882a593Smuzhiyun up_read(&key->sem);
72*4882a593Smuzhiyun key_put(key);
73*4882a593Smuzhiyun key = NULL;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun return key;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
nvdimm_get_key_payload(struct nvdimm * nvdimm,struct key ** key)80*4882a593Smuzhiyun static const void *nvdimm_get_key_payload(struct nvdimm *nvdimm,
81*4882a593Smuzhiyun struct key **key)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun *key = nvdimm_request_key(nvdimm);
84*4882a593Smuzhiyun if (!*key)
85*4882a593Smuzhiyun return zero_key;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun return key_data(*key);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
nvdimm_lookup_user_key(struct nvdimm * nvdimm,key_serial_t id,int subclass)90*4882a593Smuzhiyun static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm,
91*4882a593Smuzhiyun key_serial_t id, int subclass)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun key_ref_t keyref;
94*4882a593Smuzhiyun struct key *key;
95*4882a593Smuzhiyun struct encrypted_key_payload *epayload;
96*4882a593Smuzhiyun struct device *dev = &nvdimm->dev;
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun keyref = lookup_user_key(id, 0, KEY_NEED_SEARCH);
99*4882a593Smuzhiyun if (IS_ERR(keyref))
100*4882a593Smuzhiyun return NULL;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun key = key_ref_to_ptr(keyref);
103*4882a593Smuzhiyun if (key->type != &key_type_encrypted) {
104*4882a593Smuzhiyun key_put(key);
105*4882a593Smuzhiyun return NULL;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun dev_dbg(dev, "%s: key found: %#x\n", __func__, key_serial(key));
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun down_read_nested(&key->sem, subclass);
111*4882a593Smuzhiyun epayload = dereference_key_locked(key);
112*4882a593Smuzhiyun if (epayload->decrypted_datalen != NVDIMM_PASSPHRASE_LEN) {
113*4882a593Smuzhiyun up_read(&key->sem);
114*4882a593Smuzhiyun key_put(key);
115*4882a593Smuzhiyun key = NULL;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun return key;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
nvdimm_get_user_key_payload(struct nvdimm * nvdimm,key_serial_t id,int subclass,struct key ** key)120*4882a593Smuzhiyun static const void *nvdimm_get_user_key_payload(struct nvdimm *nvdimm,
121*4882a593Smuzhiyun key_serial_t id, int subclass, struct key **key)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun *key = NULL;
124*4882a593Smuzhiyun if (id == 0) {
125*4882a593Smuzhiyun if (subclass == NVDIMM_BASE_KEY)
126*4882a593Smuzhiyun return zero_key;
127*4882a593Smuzhiyun else
128*4882a593Smuzhiyun return NULL;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun *key = nvdimm_lookup_user_key(nvdimm, id, subclass);
132*4882a593Smuzhiyun if (!*key)
133*4882a593Smuzhiyun return NULL;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun return key_data(*key);
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun
nvdimm_key_revalidate(struct nvdimm * nvdimm)139*4882a593Smuzhiyun static int nvdimm_key_revalidate(struct nvdimm *nvdimm)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun struct key *key;
142*4882a593Smuzhiyun int rc;
143*4882a593Smuzhiyun const void *data;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun if (!nvdimm->sec.ops->change_key)
146*4882a593Smuzhiyun return -EOPNOTSUPP;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun data = nvdimm_get_key_payload(nvdimm, &key);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun /*
151*4882a593Smuzhiyun * Send the same key to the hardware as new and old key to
152*4882a593Smuzhiyun * verify that the key is good.
153*4882a593Smuzhiyun */
154*4882a593Smuzhiyun rc = nvdimm->sec.ops->change_key(nvdimm, data, data, NVDIMM_USER);
155*4882a593Smuzhiyun if (rc < 0) {
156*4882a593Smuzhiyun nvdimm_put_key(key);
157*4882a593Smuzhiyun return rc;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun nvdimm_put_key(key);
161*4882a593Smuzhiyun nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
162*4882a593Smuzhiyun return 0;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
__nvdimm_security_unlock(struct nvdimm * nvdimm)165*4882a593Smuzhiyun static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun struct device *dev = &nvdimm->dev;
168*4882a593Smuzhiyun struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
169*4882a593Smuzhiyun struct key *key;
170*4882a593Smuzhiyun const void *data;
171*4882a593Smuzhiyun int rc;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun /* The bus lock should be held at the top level of the call stack */
174*4882a593Smuzhiyun lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun if (!nvdimm->sec.ops || !nvdimm->sec.ops->unlock
177*4882a593Smuzhiyun || !nvdimm->sec.flags)
178*4882a593Smuzhiyun return -EIO;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /* No need to go further if security is disabled */
181*4882a593Smuzhiyun if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
182*4882a593Smuzhiyun return 0;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
185*4882a593Smuzhiyun dev_dbg(dev, "Security operation in progress.\n");
186*4882a593Smuzhiyun return -EBUSY;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun /*
190*4882a593Smuzhiyun * If the pre-OS has unlocked the DIMM, attempt to send the key
191*4882a593Smuzhiyun * from request_key() to the hardware for verification. Failure
192*4882a593Smuzhiyun * to revalidate the key against the hardware results in a
193*4882a593Smuzhiyun * freeze of the security configuration. I.e. if the OS does not
194*4882a593Smuzhiyun * have the key, security is being managed pre-OS.
195*4882a593Smuzhiyun */
196*4882a593Smuzhiyun if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags)) {
197*4882a593Smuzhiyun if (!key_revalidate)
198*4882a593Smuzhiyun return 0;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun return nvdimm_key_revalidate(nvdimm);
201*4882a593Smuzhiyun } else
202*4882a593Smuzhiyun data = nvdimm_get_key_payload(nvdimm, &key);
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun rc = nvdimm->sec.ops->unlock(nvdimm, data);
205*4882a593Smuzhiyun dev_dbg(dev, "key: %d unlock: %s\n", key_serial(key),
206*4882a593Smuzhiyun rc == 0 ? "success" : "fail");
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun nvdimm_put_key(key);
209*4882a593Smuzhiyun nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
210*4882a593Smuzhiyun return rc;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
nvdimm_security_unlock(struct device * dev)213*4882a593Smuzhiyun int nvdimm_security_unlock(struct device *dev)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun struct nvdimm *nvdimm = to_nvdimm(dev);
216*4882a593Smuzhiyun int rc;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun nvdimm_bus_lock(dev);
219*4882a593Smuzhiyun rc = __nvdimm_security_unlock(nvdimm);
220*4882a593Smuzhiyun nvdimm_bus_unlock(dev);
221*4882a593Smuzhiyun return rc;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
check_security_state(struct nvdimm * nvdimm)224*4882a593Smuzhiyun static int check_security_state(struct nvdimm *nvdimm)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun struct device *dev = &nvdimm->dev;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun if (test_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags)) {
229*4882a593Smuzhiyun dev_dbg(dev, "Incorrect security state: %#lx\n",
230*4882a593Smuzhiyun nvdimm->sec.flags);
231*4882a593Smuzhiyun return -EIO;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
235*4882a593Smuzhiyun dev_dbg(dev, "Security operation in progress.\n");
236*4882a593Smuzhiyun return -EBUSY;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun return 0;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
security_disable(struct nvdimm * nvdimm,unsigned int keyid)242*4882a593Smuzhiyun static int security_disable(struct nvdimm *nvdimm, unsigned int keyid)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun struct device *dev = &nvdimm->dev;
245*4882a593Smuzhiyun struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
246*4882a593Smuzhiyun struct key *key;
247*4882a593Smuzhiyun int rc;
248*4882a593Smuzhiyun const void *data;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun /* The bus lock should be held at the top level of the call stack */
251*4882a593Smuzhiyun lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun if (!nvdimm->sec.ops || !nvdimm->sec.ops->disable
254*4882a593Smuzhiyun || !nvdimm->sec.flags)
255*4882a593Smuzhiyun return -EOPNOTSUPP;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun rc = check_security_state(nvdimm);
258*4882a593Smuzhiyun if (rc)
259*4882a593Smuzhiyun return rc;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun data = nvdimm_get_user_key_payload(nvdimm, keyid,
262*4882a593Smuzhiyun NVDIMM_BASE_KEY, &key);
263*4882a593Smuzhiyun if (!data)
264*4882a593Smuzhiyun return -ENOKEY;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun rc = nvdimm->sec.ops->disable(nvdimm, data);
267*4882a593Smuzhiyun dev_dbg(dev, "key: %d disable: %s\n", key_serial(key),
268*4882a593Smuzhiyun rc == 0 ? "success" : "fail");
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun nvdimm_put_key(key);
271*4882a593Smuzhiyun nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
272*4882a593Smuzhiyun return rc;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
security_update(struct nvdimm * nvdimm,unsigned int keyid,unsigned int new_keyid,enum nvdimm_passphrase_type pass_type)275*4882a593Smuzhiyun static int security_update(struct nvdimm *nvdimm, unsigned int keyid,
276*4882a593Smuzhiyun unsigned int new_keyid,
277*4882a593Smuzhiyun enum nvdimm_passphrase_type pass_type)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun struct device *dev = &nvdimm->dev;
280*4882a593Smuzhiyun struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
281*4882a593Smuzhiyun struct key *key, *newkey;
282*4882a593Smuzhiyun int rc;
283*4882a593Smuzhiyun const void *data, *newdata;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /* The bus lock should be held at the top level of the call stack */
286*4882a593Smuzhiyun lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun if (!nvdimm->sec.ops || !nvdimm->sec.ops->change_key
289*4882a593Smuzhiyun || !nvdimm->sec.flags)
290*4882a593Smuzhiyun return -EOPNOTSUPP;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun rc = check_security_state(nvdimm);
293*4882a593Smuzhiyun if (rc)
294*4882a593Smuzhiyun return rc;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun data = nvdimm_get_user_key_payload(nvdimm, keyid,
297*4882a593Smuzhiyun NVDIMM_BASE_KEY, &key);
298*4882a593Smuzhiyun if (!data)
299*4882a593Smuzhiyun return -ENOKEY;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun newdata = nvdimm_get_user_key_payload(nvdimm, new_keyid,
302*4882a593Smuzhiyun NVDIMM_NEW_KEY, &newkey);
303*4882a593Smuzhiyun if (!newdata) {
304*4882a593Smuzhiyun nvdimm_put_key(key);
305*4882a593Smuzhiyun return -ENOKEY;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun rc = nvdimm->sec.ops->change_key(nvdimm, data, newdata, pass_type);
309*4882a593Smuzhiyun dev_dbg(dev, "key: %d %d update%s: %s\n",
310*4882a593Smuzhiyun key_serial(key), key_serial(newkey),
311*4882a593Smuzhiyun pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
312*4882a593Smuzhiyun rc == 0 ? "success" : "fail");
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun nvdimm_put_key(newkey);
315*4882a593Smuzhiyun nvdimm_put_key(key);
316*4882a593Smuzhiyun if (pass_type == NVDIMM_MASTER)
317*4882a593Smuzhiyun nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm,
318*4882a593Smuzhiyun NVDIMM_MASTER);
319*4882a593Smuzhiyun else
320*4882a593Smuzhiyun nvdimm->sec.flags = nvdimm_security_flags(nvdimm,
321*4882a593Smuzhiyun NVDIMM_USER);
322*4882a593Smuzhiyun return rc;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
security_erase(struct nvdimm * nvdimm,unsigned int keyid,enum nvdimm_passphrase_type pass_type)325*4882a593Smuzhiyun static int security_erase(struct nvdimm *nvdimm, unsigned int keyid,
326*4882a593Smuzhiyun enum nvdimm_passphrase_type pass_type)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun struct device *dev = &nvdimm->dev;
329*4882a593Smuzhiyun struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
330*4882a593Smuzhiyun struct key *key = NULL;
331*4882a593Smuzhiyun int rc;
332*4882a593Smuzhiyun const void *data;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun /* The bus lock should be held at the top level of the call stack */
335*4882a593Smuzhiyun lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun if (!nvdimm->sec.ops || !nvdimm->sec.ops->erase
338*4882a593Smuzhiyun || !nvdimm->sec.flags)
339*4882a593Smuzhiyun return -EOPNOTSUPP;
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun rc = check_security_state(nvdimm);
342*4882a593Smuzhiyun if (rc)
343*4882a593Smuzhiyun return rc;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun if (!test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.ext_flags)
346*4882a593Smuzhiyun && pass_type == NVDIMM_MASTER) {
347*4882a593Smuzhiyun dev_dbg(dev,
348*4882a593Smuzhiyun "Attempt to secure erase in wrong master state.\n");
349*4882a593Smuzhiyun return -EOPNOTSUPP;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun data = nvdimm_get_user_key_payload(nvdimm, keyid,
353*4882a593Smuzhiyun NVDIMM_BASE_KEY, &key);
354*4882a593Smuzhiyun if (!data)
355*4882a593Smuzhiyun return -ENOKEY;
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun rc = nvdimm->sec.ops->erase(nvdimm, data, pass_type);
358*4882a593Smuzhiyun dev_dbg(dev, "key: %d erase%s: %s\n", key_serial(key),
359*4882a593Smuzhiyun pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
360*4882a593Smuzhiyun rc == 0 ? "success" : "fail");
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun nvdimm_put_key(key);
363*4882a593Smuzhiyun nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
364*4882a593Smuzhiyun return rc;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
security_overwrite(struct nvdimm * nvdimm,unsigned int keyid)367*4882a593Smuzhiyun static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun struct device *dev = &nvdimm->dev;
370*4882a593Smuzhiyun struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
371*4882a593Smuzhiyun struct key *key = NULL;
372*4882a593Smuzhiyun int rc;
373*4882a593Smuzhiyun const void *data;
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun /* The bus lock should be held at the top level of the call stack */
376*4882a593Smuzhiyun lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun if (!nvdimm->sec.ops || !nvdimm->sec.ops->overwrite
379*4882a593Smuzhiyun || !nvdimm->sec.flags)
380*4882a593Smuzhiyun return -EOPNOTSUPP;
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun rc = check_security_state(nvdimm);
383*4882a593Smuzhiyun if (rc)
384*4882a593Smuzhiyun return rc;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun data = nvdimm_get_user_key_payload(nvdimm, keyid,
387*4882a593Smuzhiyun NVDIMM_BASE_KEY, &key);
388*4882a593Smuzhiyun if (!data)
389*4882a593Smuzhiyun return -ENOKEY;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun rc = nvdimm->sec.ops->overwrite(nvdimm, data);
392*4882a593Smuzhiyun dev_dbg(dev, "key: %d overwrite submission: %s\n", key_serial(key),
393*4882a593Smuzhiyun rc == 0 ? "success" : "fail");
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun nvdimm_put_key(key);
396*4882a593Smuzhiyun if (rc == 0) {
397*4882a593Smuzhiyun set_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
398*4882a593Smuzhiyun set_bit(NDD_WORK_PENDING, &nvdimm->flags);
399*4882a593Smuzhiyun set_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags);
400*4882a593Smuzhiyun /*
401*4882a593Smuzhiyun * Make sure we don't lose device while doing overwrite
402*4882a593Smuzhiyun * query.
403*4882a593Smuzhiyun */
404*4882a593Smuzhiyun get_device(dev);
405*4882a593Smuzhiyun queue_delayed_work(system_wq, &nvdimm->dwork, 0);
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun return rc;
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun
__nvdimm_security_overwrite_query(struct nvdimm * nvdimm)411*4882a593Smuzhiyun void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm)
412*4882a593Smuzhiyun {
413*4882a593Smuzhiyun struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nvdimm->dev);
414*4882a593Smuzhiyun int rc;
415*4882a593Smuzhiyun unsigned int tmo;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun /* The bus lock should be held at the top level of the call stack */
418*4882a593Smuzhiyun lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun /*
421*4882a593Smuzhiyun * Abort and release device if we no longer have the overwrite
422*4882a593Smuzhiyun * flag set. It means the work has been canceled.
423*4882a593Smuzhiyun */
424*4882a593Smuzhiyun if (!test_bit(NDD_WORK_PENDING, &nvdimm->flags))
425*4882a593Smuzhiyun return;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun tmo = nvdimm->sec.overwrite_tmo;
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun if (!nvdimm->sec.ops || !nvdimm->sec.ops->query_overwrite
430*4882a593Smuzhiyun || !nvdimm->sec.flags)
431*4882a593Smuzhiyun return;
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun rc = nvdimm->sec.ops->query_overwrite(nvdimm);
434*4882a593Smuzhiyun if (rc == -EBUSY) {
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun /* setup delayed work again */
437*4882a593Smuzhiyun tmo += 10;
438*4882a593Smuzhiyun queue_delayed_work(system_wq, &nvdimm->dwork, tmo * HZ);
439*4882a593Smuzhiyun nvdimm->sec.overwrite_tmo = min(15U * 60U, tmo);
440*4882a593Smuzhiyun return;
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun if (rc < 0)
444*4882a593Smuzhiyun dev_dbg(&nvdimm->dev, "overwrite failed\n");
445*4882a593Smuzhiyun else
446*4882a593Smuzhiyun dev_dbg(&nvdimm->dev, "overwrite completed\n");
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun /*
449*4882a593Smuzhiyun * Mark the overwrite work done and update dimm security flags,
450*4882a593Smuzhiyun * then send a sysfs event notification to wake up userspace
451*4882a593Smuzhiyun * poll threads to picked up the changed state.
452*4882a593Smuzhiyun */
453*4882a593Smuzhiyun nvdimm->sec.overwrite_tmo = 0;
454*4882a593Smuzhiyun clear_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
455*4882a593Smuzhiyun clear_bit(NDD_WORK_PENDING, &nvdimm->flags);
456*4882a593Smuzhiyun nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
457*4882a593Smuzhiyun nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
458*4882a593Smuzhiyun if (nvdimm->sec.overwrite_state)
459*4882a593Smuzhiyun sysfs_notify_dirent(nvdimm->sec.overwrite_state);
460*4882a593Smuzhiyun put_device(&nvdimm->dev);
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun
nvdimm_security_overwrite_query(struct work_struct * work)463*4882a593Smuzhiyun void nvdimm_security_overwrite_query(struct work_struct *work)
464*4882a593Smuzhiyun {
465*4882a593Smuzhiyun struct nvdimm *nvdimm =
466*4882a593Smuzhiyun container_of(work, typeof(*nvdimm), dwork.work);
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun nvdimm_bus_lock(&nvdimm->dev);
469*4882a593Smuzhiyun __nvdimm_security_overwrite_query(nvdimm);
470*4882a593Smuzhiyun nvdimm_bus_unlock(&nvdimm->dev);
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun #define OPS \
474*4882a593Smuzhiyun C( OP_FREEZE, "freeze", 1), \
475*4882a593Smuzhiyun C( OP_DISABLE, "disable", 2), \
476*4882a593Smuzhiyun C( OP_UPDATE, "update", 3), \
477*4882a593Smuzhiyun C( OP_ERASE, "erase", 2), \
478*4882a593Smuzhiyun C( OP_OVERWRITE, "overwrite", 2), \
479*4882a593Smuzhiyun C( OP_MASTER_UPDATE, "master_update", 3), \
480*4882a593Smuzhiyun C( OP_MASTER_ERASE, "master_erase", 2)
481*4882a593Smuzhiyun #undef C
482*4882a593Smuzhiyun #define C(a, b, c) a
483*4882a593Smuzhiyun enum nvdimmsec_op_ids { OPS };
484*4882a593Smuzhiyun #undef C
485*4882a593Smuzhiyun #define C(a, b, c) { b, c }
486*4882a593Smuzhiyun static struct {
487*4882a593Smuzhiyun const char *name;
488*4882a593Smuzhiyun int args;
489*4882a593Smuzhiyun } ops[] = { OPS };
490*4882a593Smuzhiyun #undef C
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun #define SEC_CMD_SIZE 32
493*4882a593Smuzhiyun #define KEY_ID_SIZE 10
494*4882a593Smuzhiyun
nvdimm_security_store(struct device * dev,const char * buf,size_t len)495*4882a593Smuzhiyun ssize_t nvdimm_security_store(struct device *dev, const char *buf, size_t len)
496*4882a593Smuzhiyun {
497*4882a593Smuzhiyun struct nvdimm *nvdimm = to_nvdimm(dev);
498*4882a593Smuzhiyun ssize_t rc;
499*4882a593Smuzhiyun char cmd[SEC_CMD_SIZE+1], keystr[KEY_ID_SIZE+1],
500*4882a593Smuzhiyun nkeystr[KEY_ID_SIZE+1];
501*4882a593Smuzhiyun unsigned int key, newkey;
502*4882a593Smuzhiyun int i;
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun rc = sscanf(buf, "%"__stringify(SEC_CMD_SIZE)"s"
505*4882a593Smuzhiyun " %"__stringify(KEY_ID_SIZE)"s"
506*4882a593Smuzhiyun " %"__stringify(KEY_ID_SIZE)"s",
507*4882a593Smuzhiyun cmd, keystr, nkeystr);
508*4882a593Smuzhiyun if (rc < 1)
509*4882a593Smuzhiyun return -EINVAL;
510*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(ops); i++)
511*4882a593Smuzhiyun if (sysfs_streq(cmd, ops[i].name))
512*4882a593Smuzhiyun break;
513*4882a593Smuzhiyun if (i >= ARRAY_SIZE(ops))
514*4882a593Smuzhiyun return -EINVAL;
515*4882a593Smuzhiyun if (ops[i].args > 1)
516*4882a593Smuzhiyun rc = kstrtouint(keystr, 0, &key);
517*4882a593Smuzhiyun if (rc >= 0 && ops[i].args > 2)
518*4882a593Smuzhiyun rc = kstrtouint(nkeystr, 0, &newkey);
519*4882a593Smuzhiyun if (rc < 0)
520*4882a593Smuzhiyun return rc;
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun if (i == OP_FREEZE) {
523*4882a593Smuzhiyun dev_dbg(dev, "freeze\n");
524*4882a593Smuzhiyun rc = nvdimm_security_freeze(nvdimm);
525*4882a593Smuzhiyun } else if (i == OP_DISABLE) {
526*4882a593Smuzhiyun dev_dbg(dev, "disable %u\n", key);
527*4882a593Smuzhiyun rc = security_disable(nvdimm, key);
528*4882a593Smuzhiyun } else if (i == OP_UPDATE || i == OP_MASTER_UPDATE) {
529*4882a593Smuzhiyun dev_dbg(dev, "%s %u %u\n", ops[i].name, key, newkey);
530*4882a593Smuzhiyun rc = security_update(nvdimm, key, newkey, i == OP_UPDATE
531*4882a593Smuzhiyun ? NVDIMM_USER : NVDIMM_MASTER);
532*4882a593Smuzhiyun } else if (i == OP_ERASE || i == OP_MASTER_ERASE) {
533*4882a593Smuzhiyun dev_dbg(dev, "%s %u\n", ops[i].name, key);
534*4882a593Smuzhiyun if (atomic_read(&nvdimm->busy)) {
535*4882a593Smuzhiyun dev_dbg(dev, "Unable to secure erase while DIMM active.\n");
536*4882a593Smuzhiyun return -EBUSY;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun rc = security_erase(nvdimm, key, i == OP_ERASE
539*4882a593Smuzhiyun ? NVDIMM_USER : NVDIMM_MASTER);
540*4882a593Smuzhiyun } else if (i == OP_OVERWRITE) {
541*4882a593Smuzhiyun dev_dbg(dev, "overwrite %u\n", key);
542*4882a593Smuzhiyun if (atomic_read(&nvdimm->busy)) {
543*4882a593Smuzhiyun dev_dbg(dev, "Unable to overwrite while DIMM active.\n");
544*4882a593Smuzhiyun return -EBUSY;
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun rc = security_overwrite(nvdimm, key);
547*4882a593Smuzhiyun } else
548*4882a593Smuzhiyun return -EINVAL;
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun if (rc == 0)
551*4882a593Smuzhiyun rc = len;
552*4882a593Smuzhiyun return rc;
553*4882a593Smuzhiyun }
554