1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * nvmem framework core.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
6*4882a593Smuzhiyun * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/device.h>
10*4882a593Smuzhiyun #include <linux/export.h>
11*4882a593Smuzhiyun #include <linux/fs.h>
12*4882a593Smuzhiyun #include <linux/idr.h>
13*4882a593Smuzhiyun #include <linux/init.h>
14*4882a593Smuzhiyun #include <linux/kref.h>
15*4882a593Smuzhiyun #include <linux/module.h>
16*4882a593Smuzhiyun #include <linux/nvmem-consumer.h>
17*4882a593Smuzhiyun #include <linux/nvmem-provider.h>
18*4882a593Smuzhiyun #include <linux/gpio/consumer.h>
19*4882a593Smuzhiyun #include <linux/of.h>
20*4882a593Smuzhiyun #include <linux/slab.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun struct nvmem_device {
23*4882a593Smuzhiyun struct module *owner;
24*4882a593Smuzhiyun struct device dev;
25*4882a593Smuzhiyun int stride;
26*4882a593Smuzhiyun int word_size;
27*4882a593Smuzhiyun int id;
28*4882a593Smuzhiyun struct kref refcnt;
29*4882a593Smuzhiyun size_t size;
30*4882a593Smuzhiyun bool read_only;
31*4882a593Smuzhiyun bool root_only;
32*4882a593Smuzhiyun int flags;
33*4882a593Smuzhiyun enum nvmem_type type;
34*4882a593Smuzhiyun struct bin_attribute eeprom;
35*4882a593Smuzhiyun struct device *base_dev;
36*4882a593Smuzhiyun struct list_head cells;
37*4882a593Smuzhiyun nvmem_reg_read_t reg_read;
38*4882a593Smuzhiyun nvmem_reg_write_t reg_write;
39*4882a593Smuzhiyun struct gpio_desc *wp_gpio;
40*4882a593Smuzhiyun void *priv;
41*4882a593Smuzhiyun };
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun #define FLAG_COMPAT BIT(0)
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun struct nvmem_cell {
48*4882a593Smuzhiyun const char *name;
49*4882a593Smuzhiyun int offset;
50*4882a593Smuzhiyun int bytes;
51*4882a593Smuzhiyun int bit_offset;
52*4882a593Smuzhiyun int nbits;
53*4882a593Smuzhiyun struct device_node *np;
54*4882a593Smuzhiyun struct nvmem_device *nvmem;
55*4882a593Smuzhiyun struct list_head node;
56*4882a593Smuzhiyun };
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun static DEFINE_MUTEX(nvmem_mutex);
59*4882a593Smuzhiyun static DEFINE_IDA(nvmem_ida);
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun static DEFINE_MUTEX(nvmem_cell_mutex);
62*4882a593Smuzhiyun static LIST_HEAD(nvmem_cell_tables);
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun static DEFINE_MUTEX(nvmem_lookup_mutex);
65*4882a593Smuzhiyun static LIST_HEAD(nvmem_lookup_list);
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
68*4882a593Smuzhiyun
nvmem_reg_read(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes)69*4882a593Smuzhiyun static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
70*4882a593Smuzhiyun void *val, size_t bytes)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun if (nvmem->reg_read)
73*4882a593Smuzhiyun return nvmem->reg_read(nvmem->priv, offset, val, bytes);
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun return -EINVAL;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
nvmem_reg_write(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes)78*4882a593Smuzhiyun static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
79*4882a593Smuzhiyun void *val, size_t bytes)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun int ret;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun if (nvmem->reg_write) {
84*4882a593Smuzhiyun gpiod_set_value_cansleep(nvmem->wp_gpio, 0);
85*4882a593Smuzhiyun ret = nvmem->reg_write(nvmem->priv, offset, val, bytes);
86*4882a593Smuzhiyun gpiod_set_value_cansleep(nvmem->wp_gpio, 1);
87*4882a593Smuzhiyun return ret;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun return -EINVAL;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun #ifdef CONFIG_NVMEM_SYSFS
94*4882a593Smuzhiyun static const char * const nvmem_type_str[] = {
95*4882a593Smuzhiyun [NVMEM_TYPE_UNKNOWN] = "Unknown",
96*4882a593Smuzhiyun [NVMEM_TYPE_EEPROM] = "EEPROM",
97*4882a593Smuzhiyun [NVMEM_TYPE_OTP] = "OTP",
98*4882a593Smuzhiyun [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed",
99*4882a593Smuzhiyun };
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_LOCK_ALLOC
102*4882a593Smuzhiyun static struct lock_class_key eeprom_lock_key;
103*4882a593Smuzhiyun #endif
104*4882a593Smuzhiyun
type_show(struct device * dev,struct device_attribute * attr,char * buf)105*4882a593Smuzhiyun static ssize_t type_show(struct device *dev,
106*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun struct nvmem_device *nvmem = to_nvmem_device(dev);
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]);
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun static DEVICE_ATTR_RO(type);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun static struct attribute *nvmem_attrs[] = {
116*4882a593Smuzhiyun &dev_attr_type.attr,
117*4882a593Smuzhiyun NULL,
118*4882a593Smuzhiyun };
119*4882a593Smuzhiyun
bin_attr_nvmem_read(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t pos,size_t count)120*4882a593Smuzhiyun static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
121*4882a593Smuzhiyun struct bin_attribute *attr, char *buf,
122*4882a593Smuzhiyun loff_t pos, size_t count)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun struct device *dev;
125*4882a593Smuzhiyun struct nvmem_device *nvmem;
126*4882a593Smuzhiyun int rc;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun if (attr->private)
129*4882a593Smuzhiyun dev = attr->private;
130*4882a593Smuzhiyun else
131*4882a593Smuzhiyun dev = kobj_to_dev(kobj);
132*4882a593Smuzhiyun nvmem = to_nvmem_device(dev);
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun /* Stop the user from reading */
135*4882a593Smuzhiyun if (pos >= nvmem->size)
136*4882a593Smuzhiyun return 0;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun if (!IS_ALIGNED(pos, nvmem->stride))
139*4882a593Smuzhiyun return -EINVAL;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun if (count < nvmem->word_size)
142*4882a593Smuzhiyun return -EINVAL;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun if (pos + count > nvmem->size)
145*4882a593Smuzhiyun count = nvmem->size - pos;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun count = round_down(count, nvmem->word_size);
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun if (!nvmem->reg_read)
150*4882a593Smuzhiyun return -EPERM;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun rc = nvmem_reg_read(nvmem, pos, buf, count);
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun if (rc)
155*4882a593Smuzhiyun return rc;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun return count;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
bin_attr_nvmem_write(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t pos,size_t count)160*4882a593Smuzhiyun static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
161*4882a593Smuzhiyun struct bin_attribute *attr, char *buf,
162*4882a593Smuzhiyun loff_t pos, size_t count)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun struct device *dev;
165*4882a593Smuzhiyun struct nvmem_device *nvmem;
166*4882a593Smuzhiyun int rc;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun if (attr->private)
169*4882a593Smuzhiyun dev = attr->private;
170*4882a593Smuzhiyun else
171*4882a593Smuzhiyun dev = kobj_to_dev(kobj);
172*4882a593Smuzhiyun nvmem = to_nvmem_device(dev);
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun /* Stop the user from writing */
175*4882a593Smuzhiyun if (pos >= nvmem->size)
176*4882a593Smuzhiyun return -EFBIG;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun if (!IS_ALIGNED(pos, nvmem->stride))
179*4882a593Smuzhiyun return -EINVAL;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun if (count < nvmem->word_size)
182*4882a593Smuzhiyun return -EINVAL;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun if (pos + count > nvmem->size)
185*4882a593Smuzhiyun count = nvmem->size - pos;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun count = round_down(count, nvmem->word_size);
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun if (!nvmem->reg_write)
190*4882a593Smuzhiyun return -EPERM;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun rc = nvmem_reg_write(nvmem, pos, buf, count);
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun if (rc)
195*4882a593Smuzhiyun return rc;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun return count;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
nvmem_bin_attr_get_umode(struct nvmem_device * nvmem)200*4882a593Smuzhiyun static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun umode_t mode = 0400;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun if (!nvmem->root_only)
205*4882a593Smuzhiyun mode |= 0044;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun if (!nvmem->read_only)
208*4882a593Smuzhiyun mode |= 0200;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun if (!nvmem->reg_write)
211*4882a593Smuzhiyun mode &= ~0200;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun if (!nvmem->reg_read)
214*4882a593Smuzhiyun mode &= ~0444;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun return mode;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
nvmem_bin_attr_is_visible(struct kobject * kobj,struct bin_attribute * attr,int i)219*4882a593Smuzhiyun static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
220*4882a593Smuzhiyun struct bin_attribute *attr, int i)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun struct device *dev = kobj_to_dev(kobj);
223*4882a593Smuzhiyun struct nvmem_device *nvmem = to_nvmem_device(dev);
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun attr->size = nvmem->size;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun return nvmem_bin_attr_get_umode(nvmem);
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun /* default read/write permissions */
231*4882a593Smuzhiyun static struct bin_attribute bin_attr_rw_nvmem = {
232*4882a593Smuzhiyun .attr = {
233*4882a593Smuzhiyun .name = "nvmem",
234*4882a593Smuzhiyun .mode = 0644,
235*4882a593Smuzhiyun },
236*4882a593Smuzhiyun .read = bin_attr_nvmem_read,
237*4882a593Smuzhiyun .write = bin_attr_nvmem_write,
238*4882a593Smuzhiyun };
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun static struct bin_attribute *nvmem_bin_attributes[] = {
241*4882a593Smuzhiyun &bin_attr_rw_nvmem,
242*4882a593Smuzhiyun NULL,
243*4882a593Smuzhiyun };
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun static const struct attribute_group nvmem_bin_group = {
246*4882a593Smuzhiyun .bin_attrs = nvmem_bin_attributes,
247*4882a593Smuzhiyun .attrs = nvmem_attrs,
248*4882a593Smuzhiyun .is_bin_visible = nvmem_bin_attr_is_visible,
249*4882a593Smuzhiyun };
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun static const struct attribute_group *nvmem_dev_groups[] = {
252*4882a593Smuzhiyun &nvmem_bin_group,
253*4882a593Smuzhiyun NULL,
254*4882a593Smuzhiyun };
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun static struct bin_attribute bin_attr_nvmem_eeprom_compat = {
257*4882a593Smuzhiyun .attr = {
258*4882a593Smuzhiyun .name = "eeprom",
259*4882a593Smuzhiyun },
260*4882a593Smuzhiyun .read = bin_attr_nvmem_read,
261*4882a593Smuzhiyun .write = bin_attr_nvmem_write,
262*4882a593Smuzhiyun };
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun /*
265*4882a593Smuzhiyun * nvmem_setup_compat() - Create an additional binary entry in
266*4882a593Smuzhiyun * drivers sys directory, to be backwards compatible with the older
267*4882a593Smuzhiyun * drivers/misc/eeprom drivers.
268*4882a593Smuzhiyun */
nvmem_sysfs_setup_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)269*4882a593Smuzhiyun static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
270*4882a593Smuzhiyun const struct nvmem_config *config)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun int rval;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun if (!config->compat)
275*4882a593Smuzhiyun return 0;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun if (!config->base_dev)
278*4882a593Smuzhiyun return -EINVAL;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun nvmem->eeprom = bin_attr_nvmem_eeprom_compat;
281*4882a593Smuzhiyun nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem);
282*4882a593Smuzhiyun nvmem->eeprom.size = nvmem->size;
283*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_LOCK_ALLOC
284*4882a593Smuzhiyun nvmem->eeprom.attr.key = &eeprom_lock_key;
285*4882a593Smuzhiyun #endif
286*4882a593Smuzhiyun nvmem->eeprom.private = &nvmem->dev;
287*4882a593Smuzhiyun nvmem->base_dev = config->base_dev;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
290*4882a593Smuzhiyun if (rval) {
291*4882a593Smuzhiyun dev_err(&nvmem->dev,
292*4882a593Smuzhiyun "Failed to create eeprom binary file %d\n", rval);
293*4882a593Smuzhiyun return rval;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun nvmem->flags |= FLAG_COMPAT;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun return 0;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
nvmem_sysfs_remove_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)301*4882a593Smuzhiyun static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
302*4882a593Smuzhiyun const struct nvmem_config *config)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun if (config->compat)
305*4882a593Smuzhiyun device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun #else /* CONFIG_NVMEM_SYSFS */
309*4882a593Smuzhiyun
nvmem_sysfs_setup_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)310*4882a593Smuzhiyun static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
311*4882a593Smuzhiyun const struct nvmem_config *config)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun return -ENOSYS;
314*4882a593Smuzhiyun }
nvmem_sysfs_remove_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)315*4882a593Smuzhiyun static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
316*4882a593Smuzhiyun const struct nvmem_config *config)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun #endif /* CONFIG_NVMEM_SYSFS */
321*4882a593Smuzhiyun
nvmem_release(struct device * dev)322*4882a593Smuzhiyun static void nvmem_release(struct device *dev)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun struct nvmem_device *nvmem = to_nvmem_device(dev);
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun ida_free(&nvmem_ida, nvmem->id);
327*4882a593Smuzhiyun gpiod_put(nvmem->wp_gpio);
328*4882a593Smuzhiyun kfree(nvmem);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun static const struct device_type nvmem_provider_type = {
332*4882a593Smuzhiyun .release = nvmem_release,
333*4882a593Smuzhiyun };
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun static struct bus_type nvmem_bus_type = {
336*4882a593Smuzhiyun .name = "nvmem",
337*4882a593Smuzhiyun };
338*4882a593Smuzhiyun
nvmem_cell_drop(struct nvmem_cell * cell)339*4882a593Smuzhiyun static void nvmem_cell_drop(struct nvmem_cell *cell)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
342*4882a593Smuzhiyun mutex_lock(&nvmem_mutex);
343*4882a593Smuzhiyun list_del(&cell->node);
344*4882a593Smuzhiyun mutex_unlock(&nvmem_mutex);
345*4882a593Smuzhiyun of_node_put(cell->np);
346*4882a593Smuzhiyun kfree_const(cell->name);
347*4882a593Smuzhiyun kfree(cell);
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
nvmem_device_remove_all_cells(const struct nvmem_device * nvmem)350*4882a593Smuzhiyun static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun struct nvmem_cell *cell, *p;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun list_for_each_entry_safe(cell, p, &nvmem->cells, node)
355*4882a593Smuzhiyun nvmem_cell_drop(cell);
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun
nvmem_cell_add(struct nvmem_cell * cell)358*4882a593Smuzhiyun static void nvmem_cell_add(struct nvmem_cell *cell)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun mutex_lock(&nvmem_mutex);
361*4882a593Smuzhiyun list_add_tail(&cell->node, &cell->nvmem->cells);
362*4882a593Smuzhiyun mutex_unlock(&nvmem_mutex);
363*4882a593Smuzhiyun blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
nvmem_cell_info_to_nvmem_cell_nodup(struct nvmem_device * nvmem,const struct nvmem_cell_info * info,struct nvmem_cell * cell)366*4882a593Smuzhiyun static int nvmem_cell_info_to_nvmem_cell_nodup(struct nvmem_device *nvmem,
367*4882a593Smuzhiyun const struct nvmem_cell_info *info,
368*4882a593Smuzhiyun struct nvmem_cell *cell)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun cell->nvmem = nvmem;
371*4882a593Smuzhiyun cell->offset = info->offset;
372*4882a593Smuzhiyun cell->bytes = info->bytes;
373*4882a593Smuzhiyun cell->name = info->name;
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun cell->bit_offset = info->bit_offset;
376*4882a593Smuzhiyun cell->nbits = info->nbits;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun if (cell->nbits)
379*4882a593Smuzhiyun cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
380*4882a593Smuzhiyun BITS_PER_BYTE);
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
383*4882a593Smuzhiyun dev_err(&nvmem->dev,
384*4882a593Smuzhiyun "cell %s unaligned to nvmem stride %d\n",
385*4882a593Smuzhiyun cell->name ?: "<unknown>", nvmem->stride);
386*4882a593Smuzhiyun return -EINVAL;
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun return 0;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
nvmem_cell_info_to_nvmem_cell(struct nvmem_device * nvmem,const struct nvmem_cell_info * info,struct nvmem_cell * cell)392*4882a593Smuzhiyun static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
393*4882a593Smuzhiyun const struct nvmem_cell_info *info,
394*4882a593Smuzhiyun struct nvmem_cell *cell)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun int err;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun err = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, cell);
399*4882a593Smuzhiyun if (err)
400*4882a593Smuzhiyun return err;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun cell->name = kstrdup_const(info->name, GFP_KERNEL);
403*4882a593Smuzhiyun if (!cell->name)
404*4882a593Smuzhiyun return -ENOMEM;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun return 0;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun /**
410*4882a593Smuzhiyun * nvmem_add_cells() - Add cell information to an nvmem device
411*4882a593Smuzhiyun *
412*4882a593Smuzhiyun * @nvmem: nvmem device to add cells to.
413*4882a593Smuzhiyun * @info: nvmem cell info to add to the device
414*4882a593Smuzhiyun * @ncells: number of cells in info
415*4882a593Smuzhiyun *
416*4882a593Smuzhiyun * Return: 0 or negative error code on failure.
417*4882a593Smuzhiyun */
nvmem_add_cells(struct nvmem_device * nvmem,const struct nvmem_cell_info * info,int ncells)418*4882a593Smuzhiyun static int nvmem_add_cells(struct nvmem_device *nvmem,
419*4882a593Smuzhiyun const struct nvmem_cell_info *info,
420*4882a593Smuzhiyun int ncells)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun struct nvmem_cell **cells;
423*4882a593Smuzhiyun int i, rval;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL);
426*4882a593Smuzhiyun if (!cells)
427*4882a593Smuzhiyun return -ENOMEM;
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun for (i = 0; i < ncells; i++) {
430*4882a593Smuzhiyun cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL);
431*4882a593Smuzhiyun if (!cells[i]) {
432*4882a593Smuzhiyun rval = -ENOMEM;
433*4882a593Smuzhiyun goto err;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
437*4882a593Smuzhiyun if (rval) {
438*4882a593Smuzhiyun kfree(cells[i]);
439*4882a593Smuzhiyun goto err;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun nvmem_cell_add(cells[i]);
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun /* remove tmp array */
446*4882a593Smuzhiyun kfree(cells);
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun return 0;
449*4882a593Smuzhiyun err:
450*4882a593Smuzhiyun while (i--)
451*4882a593Smuzhiyun nvmem_cell_drop(cells[i]);
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun kfree(cells);
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun return rval;
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun /**
459*4882a593Smuzhiyun * nvmem_register_notifier() - Register a notifier block for nvmem events.
460*4882a593Smuzhiyun *
461*4882a593Smuzhiyun * @nb: notifier block to be called on nvmem events.
462*4882a593Smuzhiyun *
463*4882a593Smuzhiyun * Return: 0 on success, negative error number on failure.
464*4882a593Smuzhiyun */
nvmem_register_notifier(struct notifier_block * nb)465*4882a593Smuzhiyun int nvmem_register_notifier(struct notifier_block *nb)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun return blocking_notifier_chain_register(&nvmem_notifier, nb);
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvmem_register_notifier);
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun /**
472*4882a593Smuzhiyun * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
473*4882a593Smuzhiyun *
474*4882a593Smuzhiyun * @nb: notifier block to be unregistered.
475*4882a593Smuzhiyun *
476*4882a593Smuzhiyun * Return: 0 on success, negative error number on failure.
477*4882a593Smuzhiyun */
nvmem_unregister_notifier(struct notifier_block * nb)478*4882a593Smuzhiyun int nvmem_unregister_notifier(struct notifier_block *nb)
479*4882a593Smuzhiyun {
480*4882a593Smuzhiyun return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
483*4882a593Smuzhiyun
nvmem_add_cells_from_table(struct nvmem_device * nvmem)484*4882a593Smuzhiyun static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
485*4882a593Smuzhiyun {
486*4882a593Smuzhiyun const struct nvmem_cell_info *info;
487*4882a593Smuzhiyun struct nvmem_cell_table *table;
488*4882a593Smuzhiyun struct nvmem_cell *cell;
489*4882a593Smuzhiyun int rval = 0, i;
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun mutex_lock(&nvmem_cell_mutex);
492*4882a593Smuzhiyun list_for_each_entry(table, &nvmem_cell_tables, node) {
493*4882a593Smuzhiyun if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
494*4882a593Smuzhiyun for (i = 0; i < table->ncells; i++) {
495*4882a593Smuzhiyun info = &table->cells[i];
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun cell = kzalloc(sizeof(*cell), GFP_KERNEL);
498*4882a593Smuzhiyun if (!cell) {
499*4882a593Smuzhiyun rval = -ENOMEM;
500*4882a593Smuzhiyun goto out;
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun rval = nvmem_cell_info_to_nvmem_cell(nvmem,
504*4882a593Smuzhiyun info,
505*4882a593Smuzhiyun cell);
506*4882a593Smuzhiyun if (rval) {
507*4882a593Smuzhiyun kfree(cell);
508*4882a593Smuzhiyun goto out;
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun nvmem_cell_add(cell);
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun out:
517*4882a593Smuzhiyun mutex_unlock(&nvmem_cell_mutex);
518*4882a593Smuzhiyun return rval;
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun static struct nvmem_cell *
nvmem_find_cell_by_name(struct nvmem_device * nvmem,const char * cell_id)522*4882a593Smuzhiyun nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun struct nvmem_cell *iter, *cell = NULL;
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun mutex_lock(&nvmem_mutex);
527*4882a593Smuzhiyun list_for_each_entry(iter, &nvmem->cells, node) {
528*4882a593Smuzhiyun if (strcmp(cell_id, iter->name) == 0) {
529*4882a593Smuzhiyun cell = iter;
530*4882a593Smuzhiyun break;
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun mutex_unlock(&nvmem_mutex);
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun return cell;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun
nvmem_add_cells_from_of(struct nvmem_device * nvmem)538*4882a593Smuzhiyun static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
539*4882a593Smuzhiyun {
540*4882a593Smuzhiyun struct device_node *parent, *child;
541*4882a593Smuzhiyun struct device *dev = &nvmem->dev;
542*4882a593Smuzhiyun struct nvmem_cell *cell;
543*4882a593Smuzhiyun const __be32 *addr;
544*4882a593Smuzhiyun int len;
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun parent = dev->of_node;
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun for_each_child_of_node(parent, child) {
549*4882a593Smuzhiyun addr = of_get_property(child, "reg", &len);
550*4882a593Smuzhiyun if (!addr)
551*4882a593Smuzhiyun continue;
552*4882a593Smuzhiyun if (len < 2 * sizeof(u32)) {
553*4882a593Smuzhiyun dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
554*4882a593Smuzhiyun of_node_put(child);
555*4882a593Smuzhiyun return -EINVAL;
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun cell = kzalloc(sizeof(*cell), GFP_KERNEL);
559*4882a593Smuzhiyun if (!cell) {
560*4882a593Smuzhiyun of_node_put(child);
561*4882a593Smuzhiyun return -ENOMEM;
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun cell->nvmem = nvmem;
565*4882a593Smuzhiyun cell->offset = be32_to_cpup(addr++);
566*4882a593Smuzhiyun cell->bytes = be32_to_cpup(addr);
567*4882a593Smuzhiyun cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun addr = of_get_property(child, "bits", &len);
570*4882a593Smuzhiyun if (addr && len == (2 * sizeof(u32))) {
571*4882a593Smuzhiyun cell->bit_offset = be32_to_cpup(addr++);
572*4882a593Smuzhiyun cell->nbits = be32_to_cpup(addr);
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun if (cell->nbits)
576*4882a593Smuzhiyun cell->bytes = DIV_ROUND_UP(
577*4882a593Smuzhiyun cell->nbits + cell->bit_offset,
578*4882a593Smuzhiyun BITS_PER_BYTE);
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
581*4882a593Smuzhiyun dev_err(dev, "cell %s unaligned to nvmem stride %d\n",
582*4882a593Smuzhiyun cell->name, nvmem->stride);
583*4882a593Smuzhiyun /* Cells already added will be freed later. */
584*4882a593Smuzhiyun kfree_const(cell->name);
585*4882a593Smuzhiyun kfree(cell);
586*4882a593Smuzhiyun of_node_put(child);
587*4882a593Smuzhiyun return -EINVAL;
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun cell->np = of_node_get(child);
591*4882a593Smuzhiyun nvmem_cell_add(cell);
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun return 0;
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun /**
598*4882a593Smuzhiyun * nvmem_register() - Register a nvmem device for given nvmem_config.
599*4882a593Smuzhiyun * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
600*4882a593Smuzhiyun *
601*4882a593Smuzhiyun * @config: nvmem device configuration with which nvmem device is created.
602*4882a593Smuzhiyun *
603*4882a593Smuzhiyun * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
604*4882a593Smuzhiyun * on success.
605*4882a593Smuzhiyun */
606*4882a593Smuzhiyun
nvmem_register(const struct nvmem_config * config)607*4882a593Smuzhiyun struct nvmem_device *nvmem_register(const struct nvmem_config *config)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun struct nvmem_device *nvmem;
610*4882a593Smuzhiyun int rval;
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun if (!config->dev)
613*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun if (!config->reg_read && !config->reg_write)
616*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
619*4882a593Smuzhiyun if (!nvmem)
620*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun rval = ida_alloc(&nvmem_ida, GFP_KERNEL);
623*4882a593Smuzhiyun if (rval < 0) {
624*4882a593Smuzhiyun kfree(nvmem);
625*4882a593Smuzhiyun return ERR_PTR(rval);
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun if (config->wp_gpio)
629*4882a593Smuzhiyun nvmem->wp_gpio = config->wp_gpio;
630*4882a593Smuzhiyun else
631*4882a593Smuzhiyun nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
632*4882a593Smuzhiyun GPIOD_OUT_HIGH);
633*4882a593Smuzhiyun if (IS_ERR(nvmem->wp_gpio)) {
634*4882a593Smuzhiyun ida_free(&nvmem_ida, nvmem->id);
635*4882a593Smuzhiyun rval = PTR_ERR(nvmem->wp_gpio);
636*4882a593Smuzhiyun kfree(nvmem);
637*4882a593Smuzhiyun return ERR_PTR(rval);
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun kref_init(&nvmem->refcnt);
641*4882a593Smuzhiyun INIT_LIST_HEAD(&nvmem->cells);
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun nvmem->id = rval;
644*4882a593Smuzhiyun nvmem->owner = config->owner;
645*4882a593Smuzhiyun if (!nvmem->owner && config->dev->driver)
646*4882a593Smuzhiyun nvmem->owner = config->dev->driver->owner;
647*4882a593Smuzhiyun nvmem->stride = config->stride ?: 1;
648*4882a593Smuzhiyun nvmem->word_size = config->word_size ?: 1;
649*4882a593Smuzhiyun nvmem->size = config->size;
650*4882a593Smuzhiyun nvmem->dev.type = &nvmem_provider_type;
651*4882a593Smuzhiyun nvmem->dev.bus = &nvmem_bus_type;
652*4882a593Smuzhiyun nvmem->dev.parent = config->dev;
653*4882a593Smuzhiyun nvmem->root_only = config->root_only;
654*4882a593Smuzhiyun nvmem->priv = config->priv;
655*4882a593Smuzhiyun nvmem->type = config->type;
656*4882a593Smuzhiyun nvmem->reg_read = config->reg_read;
657*4882a593Smuzhiyun nvmem->reg_write = config->reg_write;
658*4882a593Smuzhiyun if (!config->no_of_node)
659*4882a593Smuzhiyun nvmem->dev.of_node = config->dev->of_node;
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun switch (config->id) {
662*4882a593Smuzhiyun case NVMEM_DEVID_NONE:
663*4882a593Smuzhiyun dev_set_name(&nvmem->dev, "%s", config->name);
664*4882a593Smuzhiyun break;
665*4882a593Smuzhiyun case NVMEM_DEVID_AUTO:
666*4882a593Smuzhiyun dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id);
667*4882a593Smuzhiyun break;
668*4882a593Smuzhiyun default:
669*4882a593Smuzhiyun dev_set_name(&nvmem->dev, "%s%d",
670*4882a593Smuzhiyun config->name ? : "nvmem",
671*4882a593Smuzhiyun config->name ? config->id : nvmem->id);
672*4882a593Smuzhiyun break;
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun nvmem->read_only = device_property_present(config->dev, "read-only") ||
676*4882a593Smuzhiyun config->read_only || !nvmem->reg_write;
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun #ifdef CONFIG_NVMEM_SYSFS
679*4882a593Smuzhiyun nvmem->dev.groups = nvmem_dev_groups;
680*4882a593Smuzhiyun #endif
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun rval = device_register(&nvmem->dev);
685*4882a593Smuzhiyun if (rval)
686*4882a593Smuzhiyun goto err_put_device;
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun if (config->compat) {
689*4882a593Smuzhiyun rval = nvmem_sysfs_setup_compat(nvmem, config);
690*4882a593Smuzhiyun if (rval)
691*4882a593Smuzhiyun goto err_device_del;
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun if (config->cells) {
695*4882a593Smuzhiyun rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
696*4882a593Smuzhiyun if (rval)
697*4882a593Smuzhiyun goto err_teardown_compat;
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun rval = nvmem_add_cells_from_table(nvmem);
701*4882a593Smuzhiyun if (rval)
702*4882a593Smuzhiyun goto err_remove_cells;
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun rval = nvmem_add_cells_from_of(nvmem);
705*4882a593Smuzhiyun if (rval)
706*4882a593Smuzhiyun goto err_remove_cells;
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun return nvmem;
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun err_remove_cells:
713*4882a593Smuzhiyun nvmem_device_remove_all_cells(nvmem);
714*4882a593Smuzhiyun err_teardown_compat:
715*4882a593Smuzhiyun if (config->compat)
716*4882a593Smuzhiyun nvmem_sysfs_remove_compat(nvmem, config);
717*4882a593Smuzhiyun err_device_del:
718*4882a593Smuzhiyun device_del(&nvmem->dev);
719*4882a593Smuzhiyun err_put_device:
720*4882a593Smuzhiyun put_device(&nvmem->dev);
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun return ERR_PTR(rval);
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvmem_register);
725*4882a593Smuzhiyun
nvmem_device_release(struct kref * kref)726*4882a593Smuzhiyun static void nvmem_device_release(struct kref *kref)
727*4882a593Smuzhiyun {
728*4882a593Smuzhiyun struct nvmem_device *nvmem;
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun nvmem = container_of(kref, struct nvmem_device, refcnt);
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun if (nvmem->flags & FLAG_COMPAT)
735*4882a593Smuzhiyun device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun nvmem_device_remove_all_cells(nvmem);
738*4882a593Smuzhiyun device_unregister(&nvmem->dev);
739*4882a593Smuzhiyun }
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun /**
742*4882a593Smuzhiyun * nvmem_unregister() - Unregister previously registered nvmem device
743*4882a593Smuzhiyun *
744*4882a593Smuzhiyun * @nvmem: Pointer to previously registered nvmem device.
745*4882a593Smuzhiyun */
nvmem_unregister(struct nvmem_device * nvmem)746*4882a593Smuzhiyun void nvmem_unregister(struct nvmem_device *nvmem)
747*4882a593Smuzhiyun {
748*4882a593Smuzhiyun kref_put(&nvmem->refcnt, nvmem_device_release);
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvmem_unregister);
751*4882a593Smuzhiyun
devm_nvmem_release(struct device * dev,void * res)752*4882a593Smuzhiyun static void devm_nvmem_release(struct device *dev, void *res)
753*4882a593Smuzhiyun {
754*4882a593Smuzhiyun nvmem_unregister(*(struct nvmem_device **)res);
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun /**
758*4882a593Smuzhiyun * devm_nvmem_register() - Register a managed nvmem device for given
759*4882a593Smuzhiyun * nvmem_config.
760*4882a593Smuzhiyun * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
761*4882a593Smuzhiyun *
762*4882a593Smuzhiyun * @dev: Device that uses the nvmem device.
763*4882a593Smuzhiyun * @config: nvmem device configuration with which nvmem device is created.
764*4882a593Smuzhiyun *
765*4882a593Smuzhiyun * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
766*4882a593Smuzhiyun * on success.
767*4882a593Smuzhiyun */
devm_nvmem_register(struct device * dev,const struct nvmem_config * config)768*4882a593Smuzhiyun struct nvmem_device *devm_nvmem_register(struct device *dev,
769*4882a593Smuzhiyun const struct nvmem_config *config)
770*4882a593Smuzhiyun {
771*4882a593Smuzhiyun struct nvmem_device **ptr, *nvmem;
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL);
774*4882a593Smuzhiyun if (!ptr)
775*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun nvmem = nvmem_register(config);
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun if (!IS_ERR(nvmem)) {
780*4882a593Smuzhiyun *ptr = nvmem;
781*4882a593Smuzhiyun devres_add(dev, ptr);
782*4882a593Smuzhiyun } else {
783*4882a593Smuzhiyun devres_free(ptr);
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun return nvmem;
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(devm_nvmem_register);
789*4882a593Smuzhiyun
devm_nvmem_match(struct device * dev,void * res,void * data)790*4882a593Smuzhiyun static int devm_nvmem_match(struct device *dev, void *res, void *data)
791*4882a593Smuzhiyun {
792*4882a593Smuzhiyun struct nvmem_device **r = res;
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun return *r == data;
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun /**
798*4882a593Smuzhiyun * devm_nvmem_unregister() - Unregister previously registered managed nvmem
799*4882a593Smuzhiyun * device.
800*4882a593Smuzhiyun *
801*4882a593Smuzhiyun * @dev: Device that uses the nvmem device.
802*4882a593Smuzhiyun * @nvmem: Pointer to previously registered nvmem device.
803*4882a593Smuzhiyun *
804*4882a593Smuzhiyun * Return: Will be negative on error or zero on success.
805*4882a593Smuzhiyun */
devm_nvmem_unregister(struct device * dev,struct nvmem_device * nvmem)806*4882a593Smuzhiyun int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
807*4882a593Smuzhiyun {
808*4882a593Smuzhiyun return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem);
809*4882a593Smuzhiyun }
810*4882a593Smuzhiyun EXPORT_SYMBOL(devm_nvmem_unregister);
811*4882a593Smuzhiyun
__nvmem_device_get(void * data,int (* match)(struct device * dev,const void * data))812*4882a593Smuzhiyun static struct nvmem_device *__nvmem_device_get(void *data,
813*4882a593Smuzhiyun int (*match)(struct device *dev, const void *data))
814*4882a593Smuzhiyun {
815*4882a593Smuzhiyun struct nvmem_device *nvmem = NULL;
816*4882a593Smuzhiyun struct device *dev;
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun mutex_lock(&nvmem_mutex);
819*4882a593Smuzhiyun dev = bus_find_device(&nvmem_bus_type, NULL, data, match);
820*4882a593Smuzhiyun if (dev)
821*4882a593Smuzhiyun nvmem = to_nvmem_device(dev);
822*4882a593Smuzhiyun mutex_unlock(&nvmem_mutex);
823*4882a593Smuzhiyun if (!nvmem)
824*4882a593Smuzhiyun return ERR_PTR(-EPROBE_DEFER);
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun if (!try_module_get(nvmem->owner)) {
827*4882a593Smuzhiyun dev_err(&nvmem->dev,
828*4882a593Smuzhiyun "could not increase module refcount for cell %s\n",
829*4882a593Smuzhiyun nvmem_dev_name(nvmem));
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun put_device(&nvmem->dev);
832*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun kref_get(&nvmem->refcnt);
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun return nvmem;
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun
__nvmem_device_put(struct nvmem_device * nvmem)840*4882a593Smuzhiyun static void __nvmem_device_put(struct nvmem_device *nvmem)
841*4882a593Smuzhiyun {
842*4882a593Smuzhiyun put_device(&nvmem->dev);
843*4882a593Smuzhiyun module_put(nvmem->owner);
844*4882a593Smuzhiyun kref_put(&nvmem->refcnt, nvmem_device_release);
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_OF)
848*4882a593Smuzhiyun /**
849*4882a593Smuzhiyun * of_nvmem_device_get() - Get nvmem device from a given id
850*4882a593Smuzhiyun *
851*4882a593Smuzhiyun * @np: Device tree node that uses the nvmem device.
852*4882a593Smuzhiyun * @id: nvmem name from nvmem-names property.
853*4882a593Smuzhiyun *
854*4882a593Smuzhiyun * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
855*4882a593Smuzhiyun * on success.
856*4882a593Smuzhiyun */
of_nvmem_device_get(struct device_node * np,const char * id)857*4882a593Smuzhiyun struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
858*4882a593Smuzhiyun {
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun struct device_node *nvmem_np;
861*4882a593Smuzhiyun struct nvmem_device *nvmem;
862*4882a593Smuzhiyun int index = 0;
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun if (id)
865*4882a593Smuzhiyun index = of_property_match_string(np, "nvmem-names", id);
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun nvmem_np = of_parse_phandle(np, "nvmem", index);
868*4882a593Smuzhiyun if (!nvmem_np)
869*4882a593Smuzhiyun return ERR_PTR(-ENOENT);
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
872*4882a593Smuzhiyun of_node_put(nvmem_np);
873*4882a593Smuzhiyun return nvmem;
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_nvmem_device_get);
876*4882a593Smuzhiyun #endif
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun /**
879*4882a593Smuzhiyun * nvmem_device_get() - Get nvmem device from a given id
880*4882a593Smuzhiyun *
881*4882a593Smuzhiyun * @dev: Device that uses the nvmem device.
882*4882a593Smuzhiyun * @dev_name: name of the requested nvmem device.
883*4882a593Smuzhiyun *
884*4882a593Smuzhiyun * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
885*4882a593Smuzhiyun * on success.
886*4882a593Smuzhiyun */
nvmem_device_get(struct device * dev,const char * dev_name)887*4882a593Smuzhiyun struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
888*4882a593Smuzhiyun {
889*4882a593Smuzhiyun if (dev->of_node) { /* try dt first */
890*4882a593Smuzhiyun struct nvmem_device *nvmem;
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun nvmem = of_nvmem_device_get(dev->of_node, dev_name);
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
895*4882a593Smuzhiyun return nvmem;
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun }
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun return __nvmem_device_get((void *)dev_name, device_match_name);
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvmem_device_get);
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun /**
904*4882a593Smuzhiyun * nvmem_device_find() - Find nvmem device with matching function
905*4882a593Smuzhiyun *
906*4882a593Smuzhiyun * @data: Data to pass to match function
907*4882a593Smuzhiyun * @match: Callback function to check device
908*4882a593Smuzhiyun *
909*4882a593Smuzhiyun * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
910*4882a593Smuzhiyun * on success.
911*4882a593Smuzhiyun */
nvmem_device_find(void * data,int (* match)(struct device * dev,const void * data))912*4882a593Smuzhiyun struct nvmem_device *nvmem_device_find(void *data,
913*4882a593Smuzhiyun int (*match)(struct device *dev, const void *data))
914*4882a593Smuzhiyun {
915*4882a593Smuzhiyun return __nvmem_device_get(data, match);
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvmem_device_find);
918*4882a593Smuzhiyun
devm_nvmem_device_match(struct device * dev,void * res,void * data)919*4882a593Smuzhiyun static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
920*4882a593Smuzhiyun {
921*4882a593Smuzhiyun struct nvmem_device **nvmem = res;
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun if (WARN_ON(!nvmem || !*nvmem))
924*4882a593Smuzhiyun return 0;
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun return *nvmem == data;
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun
devm_nvmem_device_release(struct device * dev,void * res)929*4882a593Smuzhiyun static void devm_nvmem_device_release(struct device *dev, void *res)
930*4882a593Smuzhiyun {
931*4882a593Smuzhiyun nvmem_device_put(*(struct nvmem_device **)res);
932*4882a593Smuzhiyun }
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun /**
935*4882a593Smuzhiyun * devm_nvmem_device_put() - put alredy got nvmem device
936*4882a593Smuzhiyun *
937*4882a593Smuzhiyun * @dev: Device that uses the nvmem device.
938*4882a593Smuzhiyun * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
939*4882a593Smuzhiyun * that needs to be released.
940*4882a593Smuzhiyun */
devm_nvmem_device_put(struct device * dev,struct nvmem_device * nvmem)941*4882a593Smuzhiyun void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
942*4882a593Smuzhiyun {
943*4882a593Smuzhiyun int ret;
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun ret = devres_release(dev, devm_nvmem_device_release,
946*4882a593Smuzhiyun devm_nvmem_device_match, nvmem);
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun WARN_ON(ret);
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun /**
953*4882a593Smuzhiyun * nvmem_device_put() - put alredy got nvmem device
954*4882a593Smuzhiyun *
955*4882a593Smuzhiyun * @nvmem: pointer to nvmem device that needs to be released.
956*4882a593Smuzhiyun */
nvmem_device_put(struct nvmem_device * nvmem)957*4882a593Smuzhiyun void nvmem_device_put(struct nvmem_device *nvmem)
958*4882a593Smuzhiyun {
959*4882a593Smuzhiyun __nvmem_device_put(nvmem);
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvmem_device_put);
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun /**
964*4882a593Smuzhiyun * devm_nvmem_device_get() - Get nvmem cell of device form a given id
965*4882a593Smuzhiyun *
966*4882a593Smuzhiyun * @dev: Device that requests the nvmem device.
967*4882a593Smuzhiyun * @id: name id for the requested nvmem device.
968*4882a593Smuzhiyun *
969*4882a593Smuzhiyun * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
970*4882a593Smuzhiyun * on success. The nvmem_cell will be freed by the automatically once the
971*4882a593Smuzhiyun * device is freed.
972*4882a593Smuzhiyun */
devm_nvmem_device_get(struct device * dev,const char * id)973*4882a593Smuzhiyun struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
974*4882a593Smuzhiyun {
975*4882a593Smuzhiyun struct nvmem_device **ptr, *nvmem;
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
978*4882a593Smuzhiyun if (!ptr)
979*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun nvmem = nvmem_device_get(dev, id);
982*4882a593Smuzhiyun if (!IS_ERR(nvmem)) {
983*4882a593Smuzhiyun *ptr = nvmem;
984*4882a593Smuzhiyun devres_add(dev, ptr);
985*4882a593Smuzhiyun } else {
986*4882a593Smuzhiyun devres_free(ptr);
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun return nvmem;
990*4882a593Smuzhiyun }
991*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun static struct nvmem_cell *
nvmem_cell_get_from_lookup(struct device * dev,const char * con_id)994*4882a593Smuzhiyun nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
995*4882a593Smuzhiyun {
996*4882a593Smuzhiyun struct nvmem_cell *cell = ERR_PTR(-ENOENT);
997*4882a593Smuzhiyun struct nvmem_cell_lookup *lookup;
998*4882a593Smuzhiyun struct nvmem_device *nvmem;
999*4882a593Smuzhiyun const char *dev_id;
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun if (!dev)
1002*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun dev_id = dev_name(dev);
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun mutex_lock(&nvmem_lookup_mutex);
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun list_for_each_entry(lookup, &nvmem_lookup_list, node) {
1009*4882a593Smuzhiyun if ((strcmp(lookup->dev_id, dev_id) == 0) &&
1010*4882a593Smuzhiyun (strcmp(lookup->con_id, con_id) == 0)) {
1011*4882a593Smuzhiyun /* This is the right entry. */
1012*4882a593Smuzhiyun nvmem = __nvmem_device_get((void *)lookup->nvmem_name,
1013*4882a593Smuzhiyun device_match_name);
1014*4882a593Smuzhiyun if (IS_ERR(nvmem)) {
1015*4882a593Smuzhiyun /* Provider may not be registered yet. */
1016*4882a593Smuzhiyun cell = ERR_CAST(nvmem);
1017*4882a593Smuzhiyun break;
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun cell = nvmem_find_cell_by_name(nvmem,
1021*4882a593Smuzhiyun lookup->cell_name);
1022*4882a593Smuzhiyun if (!cell) {
1023*4882a593Smuzhiyun __nvmem_device_put(nvmem);
1024*4882a593Smuzhiyun cell = ERR_PTR(-ENOENT);
1025*4882a593Smuzhiyun }
1026*4882a593Smuzhiyun break;
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun }
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun mutex_unlock(&nvmem_lookup_mutex);
1031*4882a593Smuzhiyun return cell;
1032*4882a593Smuzhiyun }
1033*4882a593Smuzhiyun
1034*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_OF)
1035*4882a593Smuzhiyun static struct nvmem_cell *
nvmem_find_cell_by_node(struct nvmem_device * nvmem,struct device_node * np)1036*4882a593Smuzhiyun nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np)
1037*4882a593Smuzhiyun {
1038*4882a593Smuzhiyun struct nvmem_cell *iter, *cell = NULL;
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun mutex_lock(&nvmem_mutex);
1041*4882a593Smuzhiyun list_for_each_entry(iter, &nvmem->cells, node) {
1042*4882a593Smuzhiyun if (np == iter->np) {
1043*4882a593Smuzhiyun cell = iter;
1044*4882a593Smuzhiyun break;
1045*4882a593Smuzhiyun }
1046*4882a593Smuzhiyun }
1047*4882a593Smuzhiyun mutex_unlock(&nvmem_mutex);
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun return cell;
1050*4882a593Smuzhiyun }
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun /**
1053*4882a593Smuzhiyun * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
1054*4882a593Smuzhiyun *
1055*4882a593Smuzhiyun * @np: Device tree node that uses the nvmem cell.
1056*4882a593Smuzhiyun * @id: nvmem cell name from nvmem-cell-names property, or NULL
1057*4882a593Smuzhiyun * for the cell at index 0 (the lone cell with no accompanying
1058*4882a593Smuzhiyun * nvmem-cell-names property).
1059*4882a593Smuzhiyun *
1060*4882a593Smuzhiyun * Return: Will be an ERR_PTR() on error or a valid pointer
1061*4882a593Smuzhiyun * to a struct nvmem_cell. The nvmem_cell will be freed by the
1062*4882a593Smuzhiyun * nvmem_cell_put().
1063*4882a593Smuzhiyun */
of_nvmem_cell_get(struct device_node * np,const char * id)1064*4882a593Smuzhiyun struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
1065*4882a593Smuzhiyun {
1066*4882a593Smuzhiyun struct device_node *cell_np, *nvmem_np;
1067*4882a593Smuzhiyun struct nvmem_device *nvmem;
1068*4882a593Smuzhiyun struct nvmem_cell *cell;
1069*4882a593Smuzhiyun int index = 0;
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyun /* if cell name exists, find index to the name */
1072*4882a593Smuzhiyun if (id)
1073*4882a593Smuzhiyun index = of_property_match_string(np, "nvmem-cell-names", id);
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun cell_np = of_parse_phandle(np, "nvmem-cells", index);
1076*4882a593Smuzhiyun if (!cell_np)
1077*4882a593Smuzhiyun return ERR_PTR(-ENOENT);
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun nvmem_np = of_get_next_parent(cell_np);
1080*4882a593Smuzhiyun if (!nvmem_np)
1081*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
1084*4882a593Smuzhiyun of_node_put(nvmem_np);
1085*4882a593Smuzhiyun if (IS_ERR(nvmem))
1086*4882a593Smuzhiyun return ERR_CAST(nvmem);
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun cell = nvmem_find_cell_by_node(nvmem, cell_np);
1089*4882a593Smuzhiyun if (!cell) {
1090*4882a593Smuzhiyun __nvmem_device_put(nvmem);
1091*4882a593Smuzhiyun return ERR_PTR(-ENOENT);
1092*4882a593Smuzhiyun }
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun return cell;
1095*4882a593Smuzhiyun }
1096*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
1097*4882a593Smuzhiyun #endif
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun /**
1100*4882a593Smuzhiyun * nvmem_cell_get() - Get nvmem cell of device form a given cell name
1101*4882a593Smuzhiyun *
1102*4882a593Smuzhiyun * @dev: Device that requests the nvmem cell.
1103*4882a593Smuzhiyun * @id: nvmem cell name to get (this corresponds with the name from the
1104*4882a593Smuzhiyun * nvmem-cell-names property for DT systems and with the con_id from
1105*4882a593Smuzhiyun * the lookup entry for non-DT systems).
1106*4882a593Smuzhiyun *
1107*4882a593Smuzhiyun * Return: Will be an ERR_PTR() on error or a valid pointer
1108*4882a593Smuzhiyun * to a struct nvmem_cell. The nvmem_cell will be freed by the
1109*4882a593Smuzhiyun * nvmem_cell_put().
1110*4882a593Smuzhiyun */
nvmem_cell_get(struct device * dev,const char * id)1111*4882a593Smuzhiyun struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
1112*4882a593Smuzhiyun {
1113*4882a593Smuzhiyun struct nvmem_cell *cell;
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun if (dev->of_node) { /* try dt first */
1116*4882a593Smuzhiyun cell = of_nvmem_cell_get(dev->of_node, id);
1117*4882a593Smuzhiyun if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
1118*4882a593Smuzhiyun return cell;
1119*4882a593Smuzhiyun }
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun /* NULL cell id only allowed for device tree; invalid otherwise */
1122*4882a593Smuzhiyun if (!id)
1123*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun return nvmem_cell_get_from_lookup(dev, id);
1126*4882a593Smuzhiyun }
1127*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvmem_cell_get);
1128*4882a593Smuzhiyun
devm_nvmem_cell_release(struct device * dev,void * res)1129*4882a593Smuzhiyun static void devm_nvmem_cell_release(struct device *dev, void *res)
1130*4882a593Smuzhiyun {
1131*4882a593Smuzhiyun nvmem_cell_put(*(struct nvmem_cell **)res);
1132*4882a593Smuzhiyun }
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun /**
1135*4882a593Smuzhiyun * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
1136*4882a593Smuzhiyun *
1137*4882a593Smuzhiyun * @dev: Device that requests the nvmem cell.
1138*4882a593Smuzhiyun * @id: nvmem cell name id to get.
1139*4882a593Smuzhiyun *
1140*4882a593Smuzhiyun * Return: Will be an ERR_PTR() on error or a valid pointer
1141*4882a593Smuzhiyun * to a struct nvmem_cell. The nvmem_cell will be freed by the
1142*4882a593Smuzhiyun * automatically once the device is freed.
1143*4882a593Smuzhiyun */
devm_nvmem_cell_get(struct device * dev,const char * id)1144*4882a593Smuzhiyun struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
1145*4882a593Smuzhiyun {
1146*4882a593Smuzhiyun struct nvmem_cell **ptr, *cell;
1147*4882a593Smuzhiyun
1148*4882a593Smuzhiyun ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
1149*4882a593Smuzhiyun if (!ptr)
1150*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
1151*4882a593Smuzhiyun
1152*4882a593Smuzhiyun cell = nvmem_cell_get(dev, id);
1153*4882a593Smuzhiyun if (!IS_ERR(cell)) {
1154*4882a593Smuzhiyun *ptr = cell;
1155*4882a593Smuzhiyun devres_add(dev, ptr);
1156*4882a593Smuzhiyun } else {
1157*4882a593Smuzhiyun devres_free(ptr);
1158*4882a593Smuzhiyun }
1159*4882a593Smuzhiyun
1160*4882a593Smuzhiyun return cell;
1161*4882a593Smuzhiyun }
1162*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
1163*4882a593Smuzhiyun
devm_nvmem_cell_match(struct device * dev,void * res,void * data)1164*4882a593Smuzhiyun static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
1165*4882a593Smuzhiyun {
1166*4882a593Smuzhiyun struct nvmem_cell **c = res;
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun if (WARN_ON(!c || !*c))
1169*4882a593Smuzhiyun return 0;
1170*4882a593Smuzhiyun
1171*4882a593Smuzhiyun return *c == data;
1172*4882a593Smuzhiyun }
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun /**
1175*4882a593Smuzhiyun * devm_nvmem_cell_put() - Release previously allocated nvmem cell
1176*4882a593Smuzhiyun * from devm_nvmem_cell_get.
1177*4882a593Smuzhiyun *
1178*4882a593Smuzhiyun * @dev: Device that requests the nvmem cell.
1179*4882a593Smuzhiyun * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
1180*4882a593Smuzhiyun */
devm_nvmem_cell_put(struct device * dev,struct nvmem_cell * cell)1181*4882a593Smuzhiyun void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
1182*4882a593Smuzhiyun {
1183*4882a593Smuzhiyun int ret;
1184*4882a593Smuzhiyun
1185*4882a593Smuzhiyun ret = devres_release(dev, devm_nvmem_cell_release,
1186*4882a593Smuzhiyun devm_nvmem_cell_match, cell);
1187*4882a593Smuzhiyun
1188*4882a593Smuzhiyun WARN_ON(ret);
1189*4882a593Smuzhiyun }
1190*4882a593Smuzhiyun EXPORT_SYMBOL(devm_nvmem_cell_put);
1191*4882a593Smuzhiyun
1192*4882a593Smuzhiyun /**
1193*4882a593Smuzhiyun * nvmem_cell_put() - Release previously allocated nvmem cell.
1194*4882a593Smuzhiyun *
1195*4882a593Smuzhiyun * @cell: Previously allocated nvmem cell by nvmem_cell_get().
1196*4882a593Smuzhiyun */
nvmem_cell_put(struct nvmem_cell * cell)1197*4882a593Smuzhiyun void nvmem_cell_put(struct nvmem_cell *cell)
1198*4882a593Smuzhiyun {
1199*4882a593Smuzhiyun struct nvmem_device *nvmem = cell->nvmem;
1200*4882a593Smuzhiyun
1201*4882a593Smuzhiyun __nvmem_device_put(nvmem);
1202*4882a593Smuzhiyun }
1203*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvmem_cell_put);
1204*4882a593Smuzhiyun
nvmem_shift_read_buffer_in_place(struct nvmem_cell * cell,void * buf)1205*4882a593Smuzhiyun static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
1206*4882a593Smuzhiyun {
1207*4882a593Smuzhiyun u8 *p, *b;
1208*4882a593Smuzhiyun int i, extra, bit_offset = cell->bit_offset;
1209*4882a593Smuzhiyun
1210*4882a593Smuzhiyun p = b = buf;
1211*4882a593Smuzhiyun if (bit_offset) {
1212*4882a593Smuzhiyun /* First shift */
1213*4882a593Smuzhiyun *b++ >>= bit_offset;
1214*4882a593Smuzhiyun
1215*4882a593Smuzhiyun /* setup rest of the bytes if any */
1216*4882a593Smuzhiyun for (i = 1; i < cell->bytes; i++) {
1217*4882a593Smuzhiyun /* Get bits from next byte and shift them towards msb */
1218*4882a593Smuzhiyun *p |= *b << (BITS_PER_BYTE - bit_offset);
1219*4882a593Smuzhiyun
1220*4882a593Smuzhiyun p = b;
1221*4882a593Smuzhiyun *b++ >>= bit_offset;
1222*4882a593Smuzhiyun }
1223*4882a593Smuzhiyun } else {
1224*4882a593Smuzhiyun /* point to the msb */
1225*4882a593Smuzhiyun p += cell->bytes - 1;
1226*4882a593Smuzhiyun }
1227*4882a593Smuzhiyun
1228*4882a593Smuzhiyun /* result fits in less bytes */
1229*4882a593Smuzhiyun extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
1230*4882a593Smuzhiyun while (--extra >= 0)
1231*4882a593Smuzhiyun *p-- = 0;
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun /* clear msb bits if any leftover in the last byte */
1234*4882a593Smuzhiyun if (cell->nbits % BITS_PER_BYTE)
1235*4882a593Smuzhiyun *p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0);
1236*4882a593Smuzhiyun }
1237*4882a593Smuzhiyun
__nvmem_cell_read(struct nvmem_device * nvmem,struct nvmem_cell * cell,void * buf,size_t * len)1238*4882a593Smuzhiyun static int __nvmem_cell_read(struct nvmem_device *nvmem,
1239*4882a593Smuzhiyun struct nvmem_cell *cell,
1240*4882a593Smuzhiyun void *buf, size_t *len)
1241*4882a593Smuzhiyun {
1242*4882a593Smuzhiyun int rc;
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
1245*4882a593Smuzhiyun
1246*4882a593Smuzhiyun if (rc)
1247*4882a593Smuzhiyun return rc;
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun /* shift bits in-place */
1250*4882a593Smuzhiyun if (cell->bit_offset || cell->nbits)
1251*4882a593Smuzhiyun nvmem_shift_read_buffer_in_place(cell, buf);
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun if (len)
1254*4882a593Smuzhiyun *len = cell->bytes;
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun return 0;
1257*4882a593Smuzhiyun }
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun /**
1260*4882a593Smuzhiyun * nvmem_cell_read() - Read a given nvmem cell
1261*4882a593Smuzhiyun *
1262*4882a593Smuzhiyun * @cell: nvmem cell to be read.
1263*4882a593Smuzhiyun * @len: pointer to length of cell which will be populated on successful read;
1264*4882a593Smuzhiyun * can be NULL.
1265*4882a593Smuzhiyun *
1266*4882a593Smuzhiyun * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
1267*4882a593Smuzhiyun * buffer should be freed by the consumer with a kfree().
1268*4882a593Smuzhiyun */
nvmem_cell_read(struct nvmem_cell * cell,size_t * len)1269*4882a593Smuzhiyun void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
1270*4882a593Smuzhiyun {
1271*4882a593Smuzhiyun struct nvmem_device *nvmem = cell->nvmem;
1272*4882a593Smuzhiyun u8 *buf;
1273*4882a593Smuzhiyun int rc;
1274*4882a593Smuzhiyun
1275*4882a593Smuzhiyun if (!nvmem)
1276*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
1277*4882a593Smuzhiyun
1278*4882a593Smuzhiyun buf = kzalloc(cell->bytes, GFP_KERNEL);
1279*4882a593Smuzhiyun if (!buf)
1280*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
1281*4882a593Smuzhiyun
1282*4882a593Smuzhiyun rc = __nvmem_cell_read(nvmem, cell, buf, len);
1283*4882a593Smuzhiyun if (rc) {
1284*4882a593Smuzhiyun kfree(buf);
1285*4882a593Smuzhiyun return ERR_PTR(rc);
1286*4882a593Smuzhiyun }
1287*4882a593Smuzhiyun
1288*4882a593Smuzhiyun return buf;
1289*4882a593Smuzhiyun }
1290*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvmem_cell_read);
1291*4882a593Smuzhiyun
nvmem_cell_prepare_write_buffer(struct nvmem_cell * cell,u8 * _buf,int len)1292*4882a593Smuzhiyun static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
1293*4882a593Smuzhiyun u8 *_buf, int len)
1294*4882a593Smuzhiyun {
1295*4882a593Smuzhiyun struct nvmem_device *nvmem = cell->nvmem;
1296*4882a593Smuzhiyun int i, rc, nbits, bit_offset = cell->bit_offset;
1297*4882a593Smuzhiyun u8 v, *p, *buf, *b, pbyte, pbits;
1298*4882a593Smuzhiyun
1299*4882a593Smuzhiyun nbits = cell->nbits;
1300*4882a593Smuzhiyun buf = kzalloc(cell->bytes, GFP_KERNEL);
1301*4882a593Smuzhiyun if (!buf)
1302*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
1303*4882a593Smuzhiyun
1304*4882a593Smuzhiyun memcpy(buf, _buf, len);
1305*4882a593Smuzhiyun p = b = buf;
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun if (bit_offset) {
1308*4882a593Smuzhiyun pbyte = *b;
1309*4882a593Smuzhiyun *b <<= bit_offset;
1310*4882a593Smuzhiyun
1311*4882a593Smuzhiyun /* setup the first byte with lsb bits from nvmem */
1312*4882a593Smuzhiyun rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
1313*4882a593Smuzhiyun if (rc)
1314*4882a593Smuzhiyun goto err;
1315*4882a593Smuzhiyun *b++ |= GENMASK(bit_offset - 1, 0) & v;
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun /* setup rest of the byte if any */
1318*4882a593Smuzhiyun for (i = 1; i < cell->bytes; i++) {
1319*4882a593Smuzhiyun /* Get last byte bits and shift them towards lsb */
1320*4882a593Smuzhiyun pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1321*4882a593Smuzhiyun pbyte = *b;
1322*4882a593Smuzhiyun p = b;
1323*4882a593Smuzhiyun *b <<= bit_offset;
1324*4882a593Smuzhiyun *b++ |= pbits;
1325*4882a593Smuzhiyun }
1326*4882a593Smuzhiyun }
1327*4882a593Smuzhiyun
1328*4882a593Smuzhiyun /* if it's not end on byte boundary */
1329*4882a593Smuzhiyun if ((nbits + bit_offset) % BITS_PER_BYTE) {
1330*4882a593Smuzhiyun /* setup the last byte with msb bits from nvmem */
1331*4882a593Smuzhiyun rc = nvmem_reg_read(nvmem,
1332*4882a593Smuzhiyun cell->offset + cell->bytes - 1, &v, 1);
1333*4882a593Smuzhiyun if (rc)
1334*4882a593Smuzhiyun goto err;
1335*4882a593Smuzhiyun *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1336*4882a593Smuzhiyun
1337*4882a593Smuzhiyun }
1338*4882a593Smuzhiyun
1339*4882a593Smuzhiyun return buf;
1340*4882a593Smuzhiyun err:
1341*4882a593Smuzhiyun kfree(buf);
1342*4882a593Smuzhiyun return ERR_PTR(rc);
1343*4882a593Smuzhiyun }
1344*4882a593Smuzhiyun
1345*4882a593Smuzhiyun /**
1346*4882a593Smuzhiyun * nvmem_cell_write() - Write to a given nvmem cell
1347*4882a593Smuzhiyun *
1348*4882a593Smuzhiyun * @cell: nvmem cell to be written.
1349*4882a593Smuzhiyun * @buf: Buffer to be written.
1350*4882a593Smuzhiyun * @len: length of buffer to be written to nvmem cell.
1351*4882a593Smuzhiyun *
1352*4882a593Smuzhiyun * Return: length of bytes written or negative on failure.
1353*4882a593Smuzhiyun */
nvmem_cell_write(struct nvmem_cell * cell,void * buf,size_t len)1354*4882a593Smuzhiyun int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1355*4882a593Smuzhiyun {
1356*4882a593Smuzhiyun struct nvmem_device *nvmem = cell->nvmem;
1357*4882a593Smuzhiyun int rc;
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun if (!nvmem || nvmem->read_only ||
1360*4882a593Smuzhiyun (cell->bit_offset == 0 && len != cell->bytes))
1361*4882a593Smuzhiyun return -EINVAL;
1362*4882a593Smuzhiyun
1363*4882a593Smuzhiyun if (cell->bit_offset || cell->nbits) {
1364*4882a593Smuzhiyun buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1365*4882a593Smuzhiyun if (IS_ERR(buf))
1366*4882a593Smuzhiyun return PTR_ERR(buf);
1367*4882a593Smuzhiyun }
1368*4882a593Smuzhiyun
1369*4882a593Smuzhiyun rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
1370*4882a593Smuzhiyun
1371*4882a593Smuzhiyun /* free the tmp buffer */
1372*4882a593Smuzhiyun if (cell->bit_offset || cell->nbits)
1373*4882a593Smuzhiyun kfree(buf);
1374*4882a593Smuzhiyun
1375*4882a593Smuzhiyun if (rc)
1376*4882a593Smuzhiyun return rc;
1377*4882a593Smuzhiyun
1378*4882a593Smuzhiyun return len;
1379*4882a593Smuzhiyun }
1380*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvmem_cell_write);
1381*4882a593Smuzhiyun
nvmem_cell_read_common(struct device * dev,const char * cell_id,void * val,size_t count)1382*4882a593Smuzhiyun static int nvmem_cell_read_common(struct device *dev, const char *cell_id,
1383*4882a593Smuzhiyun void *val, size_t count)
1384*4882a593Smuzhiyun {
1385*4882a593Smuzhiyun struct nvmem_cell *cell;
1386*4882a593Smuzhiyun void *buf;
1387*4882a593Smuzhiyun size_t len;
1388*4882a593Smuzhiyun
1389*4882a593Smuzhiyun cell = nvmem_cell_get(dev, cell_id);
1390*4882a593Smuzhiyun if (IS_ERR(cell))
1391*4882a593Smuzhiyun return PTR_ERR(cell);
1392*4882a593Smuzhiyun
1393*4882a593Smuzhiyun buf = nvmem_cell_read(cell, &len);
1394*4882a593Smuzhiyun if (IS_ERR(buf)) {
1395*4882a593Smuzhiyun nvmem_cell_put(cell);
1396*4882a593Smuzhiyun return PTR_ERR(buf);
1397*4882a593Smuzhiyun }
1398*4882a593Smuzhiyun if (len != count) {
1399*4882a593Smuzhiyun kfree(buf);
1400*4882a593Smuzhiyun nvmem_cell_put(cell);
1401*4882a593Smuzhiyun return -EINVAL;
1402*4882a593Smuzhiyun }
1403*4882a593Smuzhiyun memcpy(val, buf, count);
1404*4882a593Smuzhiyun kfree(buf);
1405*4882a593Smuzhiyun nvmem_cell_put(cell);
1406*4882a593Smuzhiyun
1407*4882a593Smuzhiyun return 0;
1408*4882a593Smuzhiyun }
1409*4882a593Smuzhiyun
1410*4882a593Smuzhiyun /**
1411*4882a593Smuzhiyun * nvmem_cell_read_u8() - Read a cell value as a u8
1412*4882a593Smuzhiyun *
1413*4882a593Smuzhiyun * @dev: Device that requests the nvmem cell.
1414*4882a593Smuzhiyun * @cell_id: Name of nvmem cell to read.
1415*4882a593Smuzhiyun * @val: pointer to output value.
1416*4882a593Smuzhiyun *
1417*4882a593Smuzhiyun * Return: 0 on success or negative errno.
1418*4882a593Smuzhiyun */
nvmem_cell_read_u8(struct device * dev,const char * cell_id,u8 * val)1419*4882a593Smuzhiyun int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val)
1420*4882a593Smuzhiyun {
1421*4882a593Smuzhiyun return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1422*4882a593Smuzhiyun }
1423*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvmem_cell_read_u8);
1424*4882a593Smuzhiyun
1425*4882a593Smuzhiyun /**
1426*4882a593Smuzhiyun * nvmem_cell_read_u16() - Read a cell value as a u16
1427*4882a593Smuzhiyun *
1428*4882a593Smuzhiyun * @dev: Device that requests the nvmem cell.
1429*4882a593Smuzhiyun * @cell_id: Name of nvmem cell to read.
1430*4882a593Smuzhiyun * @val: pointer to output value.
1431*4882a593Smuzhiyun *
1432*4882a593Smuzhiyun * Return: 0 on success or negative errno.
1433*4882a593Smuzhiyun */
nvmem_cell_read_u16(struct device * dev,const char * cell_id,u16 * val)1434*4882a593Smuzhiyun int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
1435*4882a593Smuzhiyun {
1436*4882a593Smuzhiyun return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1437*4882a593Smuzhiyun }
1438*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
1439*4882a593Smuzhiyun
1440*4882a593Smuzhiyun /**
1441*4882a593Smuzhiyun * nvmem_cell_read_u32() - Read a cell value as a u32
1442*4882a593Smuzhiyun *
1443*4882a593Smuzhiyun * @dev: Device that requests the nvmem cell.
1444*4882a593Smuzhiyun * @cell_id: Name of nvmem cell to read.
1445*4882a593Smuzhiyun * @val: pointer to output value.
1446*4882a593Smuzhiyun *
1447*4882a593Smuzhiyun * Return: 0 on success or negative errno.
1448*4882a593Smuzhiyun */
nvmem_cell_read_u32(struct device * dev,const char * cell_id,u32 * val)1449*4882a593Smuzhiyun int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1450*4882a593Smuzhiyun {
1451*4882a593Smuzhiyun return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1452*4882a593Smuzhiyun }
1453*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1454*4882a593Smuzhiyun
1455*4882a593Smuzhiyun /**
1456*4882a593Smuzhiyun * nvmem_cell_read_u64() - Read a cell value as a u64
1457*4882a593Smuzhiyun *
1458*4882a593Smuzhiyun * @dev: Device that requests the nvmem cell.
1459*4882a593Smuzhiyun * @cell_id: Name of nvmem cell to read.
1460*4882a593Smuzhiyun * @val: pointer to output value.
1461*4882a593Smuzhiyun *
1462*4882a593Smuzhiyun * Return: 0 on success or negative errno.
1463*4882a593Smuzhiyun */
nvmem_cell_read_u64(struct device * dev,const char * cell_id,u64 * val)1464*4882a593Smuzhiyun int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val)
1465*4882a593Smuzhiyun {
1466*4882a593Smuzhiyun return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1467*4882a593Smuzhiyun }
1468*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvmem_cell_read_u64);
1469*4882a593Smuzhiyun
1470*4882a593Smuzhiyun /**
1471*4882a593Smuzhiyun * nvmem_device_cell_read() - Read a given nvmem device and cell
1472*4882a593Smuzhiyun *
1473*4882a593Smuzhiyun * @nvmem: nvmem device to read from.
1474*4882a593Smuzhiyun * @info: nvmem cell info to be read.
1475*4882a593Smuzhiyun * @buf: buffer pointer which will be populated on successful read.
1476*4882a593Smuzhiyun *
1477*4882a593Smuzhiyun * Return: length of successful bytes read on success and negative
1478*4882a593Smuzhiyun * error code on error.
1479*4882a593Smuzhiyun */
nvmem_device_cell_read(struct nvmem_device * nvmem,struct nvmem_cell_info * info,void * buf)1480*4882a593Smuzhiyun ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1481*4882a593Smuzhiyun struct nvmem_cell_info *info, void *buf)
1482*4882a593Smuzhiyun {
1483*4882a593Smuzhiyun struct nvmem_cell cell;
1484*4882a593Smuzhiyun int rc;
1485*4882a593Smuzhiyun ssize_t len;
1486*4882a593Smuzhiyun
1487*4882a593Smuzhiyun if (!nvmem)
1488*4882a593Smuzhiyun return -EINVAL;
1489*4882a593Smuzhiyun
1490*4882a593Smuzhiyun rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell);
1491*4882a593Smuzhiyun if (rc)
1492*4882a593Smuzhiyun return rc;
1493*4882a593Smuzhiyun
1494*4882a593Smuzhiyun rc = __nvmem_cell_read(nvmem, &cell, buf, &len);
1495*4882a593Smuzhiyun if (rc)
1496*4882a593Smuzhiyun return rc;
1497*4882a593Smuzhiyun
1498*4882a593Smuzhiyun return len;
1499*4882a593Smuzhiyun }
1500*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1501*4882a593Smuzhiyun
1502*4882a593Smuzhiyun /**
1503*4882a593Smuzhiyun * nvmem_device_cell_write() - Write cell to a given nvmem device
1504*4882a593Smuzhiyun *
1505*4882a593Smuzhiyun * @nvmem: nvmem device to be written to.
1506*4882a593Smuzhiyun * @info: nvmem cell info to be written.
1507*4882a593Smuzhiyun * @buf: buffer to be written to cell.
1508*4882a593Smuzhiyun *
1509*4882a593Smuzhiyun * Return: length of bytes written or negative error code on failure.
1510*4882a593Smuzhiyun */
nvmem_device_cell_write(struct nvmem_device * nvmem,struct nvmem_cell_info * info,void * buf)1511*4882a593Smuzhiyun int nvmem_device_cell_write(struct nvmem_device *nvmem,
1512*4882a593Smuzhiyun struct nvmem_cell_info *info, void *buf)
1513*4882a593Smuzhiyun {
1514*4882a593Smuzhiyun struct nvmem_cell cell;
1515*4882a593Smuzhiyun int rc;
1516*4882a593Smuzhiyun
1517*4882a593Smuzhiyun if (!nvmem)
1518*4882a593Smuzhiyun return -EINVAL;
1519*4882a593Smuzhiyun
1520*4882a593Smuzhiyun rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell);
1521*4882a593Smuzhiyun if (rc)
1522*4882a593Smuzhiyun return rc;
1523*4882a593Smuzhiyun
1524*4882a593Smuzhiyun return nvmem_cell_write(&cell, buf, cell.bytes);
1525*4882a593Smuzhiyun }
1526*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
1527*4882a593Smuzhiyun
1528*4882a593Smuzhiyun /**
1529*4882a593Smuzhiyun * nvmem_device_read() - Read from a given nvmem device
1530*4882a593Smuzhiyun *
1531*4882a593Smuzhiyun * @nvmem: nvmem device to read from.
1532*4882a593Smuzhiyun * @offset: offset in nvmem device.
1533*4882a593Smuzhiyun * @bytes: number of bytes to read.
1534*4882a593Smuzhiyun * @buf: buffer pointer which will be populated on successful read.
1535*4882a593Smuzhiyun *
1536*4882a593Smuzhiyun * Return: length of successful bytes read on success and negative
1537*4882a593Smuzhiyun * error code on error.
1538*4882a593Smuzhiyun */
nvmem_device_read(struct nvmem_device * nvmem,unsigned int offset,size_t bytes,void * buf)1539*4882a593Smuzhiyun int nvmem_device_read(struct nvmem_device *nvmem,
1540*4882a593Smuzhiyun unsigned int offset,
1541*4882a593Smuzhiyun size_t bytes, void *buf)
1542*4882a593Smuzhiyun {
1543*4882a593Smuzhiyun int rc;
1544*4882a593Smuzhiyun
1545*4882a593Smuzhiyun if (!nvmem)
1546*4882a593Smuzhiyun return -EINVAL;
1547*4882a593Smuzhiyun
1548*4882a593Smuzhiyun rc = nvmem_reg_read(nvmem, offset, buf, bytes);
1549*4882a593Smuzhiyun
1550*4882a593Smuzhiyun if (rc)
1551*4882a593Smuzhiyun return rc;
1552*4882a593Smuzhiyun
1553*4882a593Smuzhiyun return bytes;
1554*4882a593Smuzhiyun }
1555*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvmem_device_read);
1556*4882a593Smuzhiyun
1557*4882a593Smuzhiyun /**
1558*4882a593Smuzhiyun * nvmem_device_write() - Write cell to a given nvmem device
1559*4882a593Smuzhiyun *
1560*4882a593Smuzhiyun * @nvmem: nvmem device to be written to.
1561*4882a593Smuzhiyun * @offset: offset in nvmem device.
1562*4882a593Smuzhiyun * @bytes: number of bytes to write.
1563*4882a593Smuzhiyun * @buf: buffer to be written.
1564*4882a593Smuzhiyun *
1565*4882a593Smuzhiyun * Return: length of bytes written or negative error code on failure.
1566*4882a593Smuzhiyun */
nvmem_device_write(struct nvmem_device * nvmem,unsigned int offset,size_t bytes,void * buf)1567*4882a593Smuzhiyun int nvmem_device_write(struct nvmem_device *nvmem,
1568*4882a593Smuzhiyun unsigned int offset,
1569*4882a593Smuzhiyun size_t bytes, void *buf)
1570*4882a593Smuzhiyun {
1571*4882a593Smuzhiyun int rc;
1572*4882a593Smuzhiyun
1573*4882a593Smuzhiyun if (!nvmem)
1574*4882a593Smuzhiyun return -EINVAL;
1575*4882a593Smuzhiyun
1576*4882a593Smuzhiyun rc = nvmem_reg_write(nvmem, offset, buf, bytes);
1577*4882a593Smuzhiyun
1578*4882a593Smuzhiyun if (rc)
1579*4882a593Smuzhiyun return rc;
1580*4882a593Smuzhiyun
1581*4882a593Smuzhiyun
1582*4882a593Smuzhiyun return bytes;
1583*4882a593Smuzhiyun }
1584*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvmem_device_write);
1585*4882a593Smuzhiyun
1586*4882a593Smuzhiyun /**
1587*4882a593Smuzhiyun * nvmem_add_cell_table() - register a table of cell info entries
1588*4882a593Smuzhiyun *
1589*4882a593Smuzhiyun * @table: table of cell info entries
1590*4882a593Smuzhiyun */
nvmem_add_cell_table(struct nvmem_cell_table * table)1591*4882a593Smuzhiyun void nvmem_add_cell_table(struct nvmem_cell_table *table)
1592*4882a593Smuzhiyun {
1593*4882a593Smuzhiyun mutex_lock(&nvmem_cell_mutex);
1594*4882a593Smuzhiyun list_add_tail(&table->node, &nvmem_cell_tables);
1595*4882a593Smuzhiyun mutex_unlock(&nvmem_cell_mutex);
1596*4882a593Smuzhiyun }
1597*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
1598*4882a593Smuzhiyun
1599*4882a593Smuzhiyun /**
1600*4882a593Smuzhiyun * nvmem_del_cell_table() - remove a previously registered cell info table
1601*4882a593Smuzhiyun *
1602*4882a593Smuzhiyun * @table: table of cell info entries
1603*4882a593Smuzhiyun */
nvmem_del_cell_table(struct nvmem_cell_table * table)1604*4882a593Smuzhiyun void nvmem_del_cell_table(struct nvmem_cell_table *table)
1605*4882a593Smuzhiyun {
1606*4882a593Smuzhiyun mutex_lock(&nvmem_cell_mutex);
1607*4882a593Smuzhiyun list_del(&table->node);
1608*4882a593Smuzhiyun mutex_unlock(&nvmem_cell_mutex);
1609*4882a593Smuzhiyun }
1610*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
1611*4882a593Smuzhiyun
1612*4882a593Smuzhiyun /**
1613*4882a593Smuzhiyun * nvmem_add_cell_lookups() - register a list of cell lookup entries
1614*4882a593Smuzhiyun *
1615*4882a593Smuzhiyun * @entries: array of cell lookup entries
1616*4882a593Smuzhiyun * @nentries: number of cell lookup entries in the array
1617*4882a593Smuzhiyun */
nvmem_add_cell_lookups(struct nvmem_cell_lookup * entries,size_t nentries)1618*4882a593Smuzhiyun void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1619*4882a593Smuzhiyun {
1620*4882a593Smuzhiyun int i;
1621*4882a593Smuzhiyun
1622*4882a593Smuzhiyun mutex_lock(&nvmem_lookup_mutex);
1623*4882a593Smuzhiyun for (i = 0; i < nentries; i++)
1624*4882a593Smuzhiyun list_add_tail(&entries[i].node, &nvmem_lookup_list);
1625*4882a593Smuzhiyun mutex_unlock(&nvmem_lookup_mutex);
1626*4882a593Smuzhiyun }
1627*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
1628*4882a593Smuzhiyun
1629*4882a593Smuzhiyun /**
1630*4882a593Smuzhiyun * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
1631*4882a593Smuzhiyun * entries
1632*4882a593Smuzhiyun *
1633*4882a593Smuzhiyun * @entries: array of cell lookup entries
1634*4882a593Smuzhiyun * @nentries: number of cell lookup entries in the array
1635*4882a593Smuzhiyun */
nvmem_del_cell_lookups(struct nvmem_cell_lookup * entries,size_t nentries)1636*4882a593Smuzhiyun void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1637*4882a593Smuzhiyun {
1638*4882a593Smuzhiyun int i;
1639*4882a593Smuzhiyun
1640*4882a593Smuzhiyun mutex_lock(&nvmem_lookup_mutex);
1641*4882a593Smuzhiyun for (i = 0; i < nentries; i++)
1642*4882a593Smuzhiyun list_del(&entries[i].node);
1643*4882a593Smuzhiyun mutex_unlock(&nvmem_lookup_mutex);
1644*4882a593Smuzhiyun }
1645*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
1646*4882a593Smuzhiyun
1647*4882a593Smuzhiyun /**
1648*4882a593Smuzhiyun * nvmem_dev_name() - Get the name of a given nvmem device.
1649*4882a593Smuzhiyun *
1650*4882a593Smuzhiyun * @nvmem: nvmem device.
1651*4882a593Smuzhiyun *
1652*4882a593Smuzhiyun * Return: name of the nvmem device.
1653*4882a593Smuzhiyun */
nvmem_dev_name(struct nvmem_device * nvmem)1654*4882a593Smuzhiyun const char *nvmem_dev_name(struct nvmem_device *nvmem)
1655*4882a593Smuzhiyun {
1656*4882a593Smuzhiyun return dev_name(&nvmem->dev);
1657*4882a593Smuzhiyun }
1658*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nvmem_dev_name);
1659*4882a593Smuzhiyun
nvmem_init(void)1660*4882a593Smuzhiyun static int __init nvmem_init(void)
1661*4882a593Smuzhiyun {
1662*4882a593Smuzhiyun return bus_register(&nvmem_bus_type);
1663*4882a593Smuzhiyun }
1664*4882a593Smuzhiyun
nvmem_exit(void)1665*4882a593Smuzhiyun static void __exit nvmem_exit(void)
1666*4882a593Smuzhiyun {
1667*4882a593Smuzhiyun bus_unregister(&nvmem_bus_type);
1668*4882a593Smuzhiyun }
1669*4882a593Smuzhiyun
1670*4882a593Smuzhiyun #ifdef CONFIG_ROCKCHIP_THUNDER_BOOT
1671*4882a593Smuzhiyun arch_initcall_sync(nvmem_init);
1672*4882a593Smuzhiyun #else
1673*4882a593Smuzhiyun subsys_initcall(nvmem_init);
1674*4882a593Smuzhiyun #endif
1675*4882a593Smuzhiyun module_exit(nvmem_exit);
1676*4882a593Smuzhiyun
1677*4882a593Smuzhiyun MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
1678*4882a593Smuzhiyun MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
1679*4882a593Smuzhiyun MODULE_DESCRIPTION("nvmem Driver Core");
1680*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
1681