xref: /OK3568_Linux_fs/kernel/drivers/nvdimm/nd-core.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun #ifndef __ND_CORE_H__
6*4882a593Smuzhiyun #define __ND_CORE_H__
7*4882a593Smuzhiyun #include <linux/libnvdimm.h>
8*4882a593Smuzhiyun #include <linux/device.h>
9*4882a593Smuzhiyun #include <linux/sizes.h>
10*4882a593Smuzhiyun #include <linux/mutex.h>
11*4882a593Smuzhiyun #include <linux/nd.h>
12*4882a593Smuzhiyun #include "nd.h"
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun extern struct list_head nvdimm_bus_list;
15*4882a593Smuzhiyun extern struct mutex nvdimm_bus_list_mutex;
16*4882a593Smuzhiyun extern int nvdimm_major;
17*4882a593Smuzhiyun extern struct workqueue_struct *nvdimm_wq;
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun struct nvdimm_bus {
20*4882a593Smuzhiyun 	struct nvdimm_bus_descriptor *nd_desc;
21*4882a593Smuzhiyun 	wait_queue_head_t wait;
22*4882a593Smuzhiyun 	struct list_head list;
23*4882a593Smuzhiyun 	struct device dev;
24*4882a593Smuzhiyun 	int id, probe_active;
25*4882a593Smuzhiyun 	atomic_t ioctl_active;
26*4882a593Smuzhiyun 	struct list_head mapping_list;
27*4882a593Smuzhiyun 	struct mutex reconfig_mutex;
28*4882a593Smuzhiyun 	struct badrange badrange;
29*4882a593Smuzhiyun };
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun struct nvdimm {
32*4882a593Smuzhiyun 	unsigned long flags;
33*4882a593Smuzhiyun 	void *provider_data;
34*4882a593Smuzhiyun 	unsigned long cmd_mask;
35*4882a593Smuzhiyun 	struct device dev;
36*4882a593Smuzhiyun 	atomic_t busy;
37*4882a593Smuzhiyun 	int id, num_flush;
38*4882a593Smuzhiyun 	struct resource *flush_wpq;
39*4882a593Smuzhiyun 	const char *dimm_id;
40*4882a593Smuzhiyun 	struct {
41*4882a593Smuzhiyun 		const struct nvdimm_security_ops *ops;
42*4882a593Smuzhiyun 		unsigned long flags;
43*4882a593Smuzhiyun 		unsigned long ext_flags;
44*4882a593Smuzhiyun 		unsigned int overwrite_tmo;
45*4882a593Smuzhiyun 		struct kernfs_node *overwrite_state;
46*4882a593Smuzhiyun 	} sec;
47*4882a593Smuzhiyun 	struct delayed_work dwork;
48*4882a593Smuzhiyun 	const struct nvdimm_fw_ops *fw_ops;
49*4882a593Smuzhiyun };
50*4882a593Smuzhiyun 
nvdimm_security_flags(struct nvdimm * nvdimm,enum nvdimm_passphrase_type ptype)51*4882a593Smuzhiyun static inline unsigned long nvdimm_security_flags(
52*4882a593Smuzhiyun 		struct nvdimm *nvdimm, enum nvdimm_passphrase_type ptype)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	u64 flags;
55*4882a593Smuzhiyun 	const u64 state_flags = 1UL << NVDIMM_SECURITY_DISABLED
56*4882a593Smuzhiyun 		| 1UL << NVDIMM_SECURITY_LOCKED
57*4882a593Smuzhiyun 		| 1UL << NVDIMM_SECURITY_UNLOCKED
58*4882a593Smuzhiyun 		| 1UL << NVDIMM_SECURITY_OVERWRITE;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	if (!nvdimm->sec.ops)
61*4882a593Smuzhiyun 		return 0;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	flags = nvdimm->sec.ops->get_flags(nvdimm, ptype);
64*4882a593Smuzhiyun 	/* disabled, locked, unlocked, and overwrite are mutually exclusive */
65*4882a593Smuzhiyun 	dev_WARN_ONCE(&nvdimm->dev, hweight64(flags & state_flags) > 1,
66*4882a593Smuzhiyun 			"reported invalid security state: %#llx\n",
67*4882a593Smuzhiyun 			(unsigned long long) flags);
68*4882a593Smuzhiyun 	return flags;
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun int nvdimm_security_freeze(struct nvdimm *nvdimm);
71*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_NVDIMM_KEYS)
72*4882a593Smuzhiyun ssize_t nvdimm_security_store(struct device *dev, const char *buf, size_t len);
73*4882a593Smuzhiyun void nvdimm_security_overwrite_query(struct work_struct *work);
74*4882a593Smuzhiyun #else
nvdimm_security_store(struct device * dev,const char * buf,size_t len)75*4882a593Smuzhiyun static inline ssize_t nvdimm_security_store(struct device *dev,
76*4882a593Smuzhiyun 		const char *buf, size_t len)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	return -EOPNOTSUPP;
79*4882a593Smuzhiyun }
nvdimm_security_overwrite_query(struct work_struct * work)80*4882a593Smuzhiyun static inline void nvdimm_security_overwrite_query(struct work_struct *work)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun #endif
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun /**
86*4882a593Smuzhiyun  * struct blk_alloc_info - tracking info for BLK dpa scanning
87*4882a593Smuzhiyun  * @nd_mapping: blk region mapping boundaries
88*4882a593Smuzhiyun  * @available: decremented in alias_dpa_busy as aliased PMEM is scanned
89*4882a593Smuzhiyun  * @busy: decremented in blk_dpa_busy to account for ranges already
90*4882a593Smuzhiyun  * 	  handled by alias_dpa_busy
91*4882a593Smuzhiyun  * @res: alias_dpa_busy interprets this a free space range that needs to
92*4882a593Smuzhiyun  * 	 be truncated to the valid BLK allocation starting DPA, blk_dpa_busy
93*4882a593Smuzhiyun  * 	 treats it as a busy range that needs the aliased PMEM ranges
94*4882a593Smuzhiyun  * 	 truncated.
95*4882a593Smuzhiyun  */
96*4882a593Smuzhiyun struct blk_alloc_info {
97*4882a593Smuzhiyun 	struct nd_mapping *nd_mapping;
98*4882a593Smuzhiyun 	resource_size_t available, busy;
99*4882a593Smuzhiyun 	struct resource *res;
100*4882a593Smuzhiyun };
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun bool is_nvdimm(struct device *dev);
103*4882a593Smuzhiyun bool is_nd_pmem(struct device *dev);
104*4882a593Smuzhiyun bool is_nd_volatile(struct device *dev);
105*4882a593Smuzhiyun bool is_nd_blk(struct device *dev);
is_nd_region(struct device * dev)106*4882a593Smuzhiyun static inline bool is_nd_region(struct device *dev)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	return is_nd_pmem(dev) || is_nd_blk(dev) || is_nd_volatile(dev);
109*4882a593Smuzhiyun }
is_memory(struct device * dev)110*4882a593Smuzhiyun static inline bool is_memory(struct device *dev)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun 	return is_nd_pmem(dev) || is_nd_volatile(dev);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev);
115*4882a593Smuzhiyun int __init nvdimm_bus_init(void);
116*4882a593Smuzhiyun void nvdimm_bus_exit(void);
117*4882a593Smuzhiyun void nvdimm_devs_exit(void);
118*4882a593Smuzhiyun struct nd_region;
119*4882a593Smuzhiyun void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev);
120*4882a593Smuzhiyun void nd_region_create_ns_seed(struct nd_region *nd_region);
121*4882a593Smuzhiyun void nd_region_create_btt_seed(struct nd_region *nd_region);
122*4882a593Smuzhiyun void nd_region_create_pfn_seed(struct nd_region *nd_region);
123*4882a593Smuzhiyun void nd_region_create_dax_seed(struct nd_region *nd_region);
124*4882a593Smuzhiyun int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus);
125*4882a593Smuzhiyun void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus);
126*4882a593Smuzhiyun void nd_synchronize(void);
127*4882a593Smuzhiyun void __nd_device_register(struct device *dev);
128*4882a593Smuzhiyun struct nd_label_id;
129*4882a593Smuzhiyun char *nd_label_gen_id(struct nd_label_id *label_id, u8 *uuid, u32 flags);
130*4882a593Smuzhiyun bool nd_is_uuid_unique(struct device *dev, u8 *uuid);
131*4882a593Smuzhiyun struct nd_region;
132*4882a593Smuzhiyun struct nvdimm_drvdata;
133*4882a593Smuzhiyun struct nd_mapping;
134*4882a593Smuzhiyun void nd_mapping_free_labels(struct nd_mapping *nd_mapping);
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun int __reserve_free_pmem(struct device *dev, void *data);
137*4882a593Smuzhiyun void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
138*4882a593Smuzhiyun 		       struct nd_mapping *nd_mapping);
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
141*4882a593Smuzhiyun 					   struct nd_mapping *nd_mapping);
142*4882a593Smuzhiyun resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region);
143*4882a593Smuzhiyun resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
144*4882a593Smuzhiyun 		struct nd_mapping *nd_mapping, resource_size_t *overlap);
145*4882a593Smuzhiyun resource_size_t nd_blk_available_dpa(struct nd_region *nd_region);
146*4882a593Smuzhiyun resource_size_t nd_region_available_dpa(struct nd_region *nd_region);
147*4882a593Smuzhiyun int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
148*4882a593Smuzhiyun 		resource_size_t size);
149*4882a593Smuzhiyun resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
150*4882a593Smuzhiyun 		struct nd_label_id *label_id);
151*4882a593Smuzhiyun int alias_dpa_busy(struct device *dev, void *data);
152*4882a593Smuzhiyun struct resource *nsblk_add_resource(struct nd_region *nd_region,
153*4882a593Smuzhiyun 		struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
154*4882a593Smuzhiyun 		resource_size_t start);
155*4882a593Smuzhiyun int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd);
156*4882a593Smuzhiyun void get_ndd(struct nvdimm_drvdata *ndd);
157*4882a593Smuzhiyun resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns);
158*4882a593Smuzhiyun void nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns);
159*4882a593Smuzhiyun void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns);
160*4882a593Smuzhiyun bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
161*4882a593Smuzhiyun 		struct nd_namespace_common **_ndns);
162*4882a593Smuzhiyun bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
163*4882a593Smuzhiyun 		struct nd_namespace_common **_ndns);
164*4882a593Smuzhiyun ssize_t nd_namespace_store(struct device *dev,
165*4882a593Smuzhiyun 		struct nd_namespace_common **_ndns, const char *buf,
166*4882a593Smuzhiyun 		size_t len);
167*4882a593Smuzhiyun struct nd_pfn *to_nd_pfn_safe(struct device *dev);
168*4882a593Smuzhiyun bool is_nvdimm_bus(struct device *dev);
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_ND_CLAIM)
171*4882a593Smuzhiyun int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio,
172*4882a593Smuzhiyun 		resource_size_t size);
173*4882a593Smuzhiyun void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio);
174*4882a593Smuzhiyun #else
devm_nsio_enable(struct device * dev,struct nd_namespace_io * nsio,resource_size_t size)175*4882a593Smuzhiyun static inline int devm_nsio_enable(struct device *dev,
176*4882a593Smuzhiyun 		struct nd_namespace_io *nsio, resource_size_t size)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun 	return -ENXIO;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun 
devm_nsio_disable(struct device * dev,struct nd_namespace_io * nsio)181*4882a593Smuzhiyun static inline void devm_nsio_disable(struct device *dev,
182*4882a593Smuzhiyun 		struct nd_namespace_io *nsio)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun #endif
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun #ifdef CONFIG_PROVE_LOCKING
188*4882a593Smuzhiyun extern struct class *nd_class;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun enum {
191*4882a593Smuzhiyun 	LOCK_BUS,
192*4882a593Smuzhiyun 	LOCK_NDCTL,
193*4882a593Smuzhiyun 	LOCK_REGION,
194*4882a593Smuzhiyun 	LOCK_DIMM = LOCK_REGION,
195*4882a593Smuzhiyun 	LOCK_NAMESPACE,
196*4882a593Smuzhiyun 	LOCK_CLAIM,
197*4882a593Smuzhiyun };
198*4882a593Smuzhiyun 
debug_nvdimm_lock(struct device * dev)199*4882a593Smuzhiyun static inline void debug_nvdimm_lock(struct device *dev)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun 	if (is_nd_region(dev))
202*4882a593Smuzhiyun 		mutex_lock_nested(&dev->lockdep_mutex, LOCK_REGION);
203*4882a593Smuzhiyun 	else if (is_nvdimm(dev))
204*4882a593Smuzhiyun 		mutex_lock_nested(&dev->lockdep_mutex, LOCK_DIMM);
205*4882a593Smuzhiyun 	else if (is_nd_btt(dev) || is_nd_pfn(dev) || is_nd_dax(dev))
206*4882a593Smuzhiyun 		mutex_lock_nested(&dev->lockdep_mutex, LOCK_CLAIM);
207*4882a593Smuzhiyun 	else if (dev->parent && (is_nd_region(dev->parent)))
208*4882a593Smuzhiyun 		mutex_lock_nested(&dev->lockdep_mutex, LOCK_NAMESPACE);
209*4882a593Smuzhiyun 	else if (is_nvdimm_bus(dev))
210*4882a593Smuzhiyun 		mutex_lock_nested(&dev->lockdep_mutex, LOCK_BUS);
211*4882a593Smuzhiyun 	else if (dev->class && dev->class == nd_class)
212*4882a593Smuzhiyun 		mutex_lock_nested(&dev->lockdep_mutex, LOCK_NDCTL);
213*4882a593Smuzhiyun 	else
214*4882a593Smuzhiyun 		dev_WARN(dev, "unknown lock level\n");
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun 
debug_nvdimm_unlock(struct device * dev)217*4882a593Smuzhiyun static inline void debug_nvdimm_unlock(struct device *dev)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun 	mutex_unlock(&dev->lockdep_mutex);
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun 
nd_device_lock(struct device * dev)222*4882a593Smuzhiyun static inline void nd_device_lock(struct device *dev)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun 	device_lock(dev);
225*4882a593Smuzhiyun 	debug_nvdimm_lock(dev);
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun 
nd_device_unlock(struct device * dev)228*4882a593Smuzhiyun static inline void nd_device_unlock(struct device *dev)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	debug_nvdimm_unlock(dev);
231*4882a593Smuzhiyun 	device_unlock(dev);
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun #else
nd_device_lock(struct device * dev)234*4882a593Smuzhiyun static inline void nd_device_lock(struct device *dev)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun 	device_lock(dev);
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun 
nd_device_unlock(struct device * dev)239*4882a593Smuzhiyun static inline void nd_device_unlock(struct device *dev)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun 	device_unlock(dev);
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
debug_nvdimm_lock(struct device * dev)244*4882a593Smuzhiyun static inline void debug_nvdimm_lock(struct device *dev)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun 
debug_nvdimm_unlock(struct device * dev)248*4882a593Smuzhiyun static inline void debug_nvdimm_unlock(struct device *dev)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun #endif
252*4882a593Smuzhiyun #endif /* __ND_CORE_H__ */
253