1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2011-2016 Synaptics Incorporated
4*4882a593Smuzhiyun * Copyright (c) 2011 Unixphere
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * This driver provides the core support for a single RMI4-based device.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * The RMI4 specification can be found here (URL split for line length):
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * http://www.synaptics.com/sites/default/files/
11*4882a593Smuzhiyun * 511-000136-01-Rev-E-RMI4-Interfacing-Guide.pdf
12*4882a593Smuzhiyun */
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include <linux/bitmap.h>
15*4882a593Smuzhiyun #include <linux/delay.h>
16*4882a593Smuzhiyun #include <linux/fs.h>
17*4882a593Smuzhiyun #include <linux/irq.h>
18*4882a593Smuzhiyun #include <linux/pm.h>
19*4882a593Smuzhiyun #include <linux/slab.h>
20*4882a593Smuzhiyun #include <linux/of.h>
21*4882a593Smuzhiyun #include <linux/irqdomain.h>
22*4882a593Smuzhiyun #include <uapi/linux/input.h>
23*4882a593Smuzhiyun #include <linux/rmi.h>
24*4882a593Smuzhiyun #include "rmi_bus.h"
25*4882a593Smuzhiyun #include "rmi_driver.h"
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #define HAS_NONSTANDARD_PDT_MASK 0x40
28*4882a593Smuzhiyun #define RMI4_MAX_PAGE 0xff
29*4882a593Smuzhiyun #define RMI4_PAGE_SIZE 0x100
30*4882a593Smuzhiyun #define RMI4_PAGE_MASK 0xFF00
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #define RMI_DEVICE_RESET_CMD 0x01
33*4882a593Smuzhiyun #define DEFAULT_RESET_DELAY_MS 100
34*4882a593Smuzhiyun
rmi_free_function_list(struct rmi_device * rmi_dev)35*4882a593Smuzhiyun void rmi_free_function_list(struct rmi_device *rmi_dev)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun struct rmi_function *fn, *tmp;
38*4882a593Smuzhiyun struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Freeing function list\n");
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /* Doing it in the reverse order so F01 will be removed last */
43*4882a593Smuzhiyun list_for_each_entry_safe_reverse(fn, tmp,
44*4882a593Smuzhiyun &data->function_list, node) {
45*4882a593Smuzhiyun list_del(&fn->node);
46*4882a593Smuzhiyun rmi_unregister_function(fn);
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun devm_kfree(&rmi_dev->dev, data->irq_memory);
50*4882a593Smuzhiyun data->irq_memory = NULL;
51*4882a593Smuzhiyun data->irq_status = NULL;
52*4882a593Smuzhiyun data->fn_irq_bits = NULL;
53*4882a593Smuzhiyun data->current_irq_mask = NULL;
54*4882a593Smuzhiyun data->new_irq_mask = NULL;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun data->f01_container = NULL;
57*4882a593Smuzhiyun data->f34_container = NULL;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
reset_one_function(struct rmi_function * fn)60*4882a593Smuzhiyun static int reset_one_function(struct rmi_function *fn)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun struct rmi_function_handler *fh;
63*4882a593Smuzhiyun int retval = 0;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun if (!fn || !fn->dev.driver)
66*4882a593Smuzhiyun return 0;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun fh = to_rmi_function_handler(fn->dev.driver);
69*4882a593Smuzhiyun if (fh->reset) {
70*4882a593Smuzhiyun retval = fh->reset(fn);
71*4882a593Smuzhiyun if (retval < 0)
72*4882a593Smuzhiyun dev_err(&fn->dev, "Reset failed with code %d.\n",
73*4882a593Smuzhiyun retval);
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun return retval;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
configure_one_function(struct rmi_function * fn)79*4882a593Smuzhiyun static int configure_one_function(struct rmi_function *fn)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun struct rmi_function_handler *fh;
82*4882a593Smuzhiyun int retval = 0;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun if (!fn || !fn->dev.driver)
85*4882a593Smuzhiyun return 0;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun fh = to_rmi_function_handler(fn->dev.driver);
88*4882a593Smuzhiyun if (fh->config) {
89*4882a593Smuzhiyun retval = fh->config(fn);
90*4882a593Smuzhiyun if (retval < 0)
91*4882a593Smuzhiyun dev_err(&fn->dev, "Config failed with code %d.\n",
92*4882a593Smuzhiyun retval);
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun return retval;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
rmi_driver_process_reset_requests(struct rmi_device * rmi_dev)98*4882a593Smuzhiyun static int rmi_driver_process_reset_requests(struct rmi_device *rmi_dev)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
101*4882a593Smuzhiyun struct rmi_function *entry;
102*4882a593Smuzhiyun int retval;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun list_for_each_entry(entry, &data->function_list, node) {
105*4882a593Smuzhiyun retval = reset_one_function(entry);
106*4882a593Smuzhiyun if (retval < 0)
107*4882a593Smuzhiyun return retval;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun return 0;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
rmi_driver_process_config_requests(struct rmi_device * rmi_dev)113*4882a593Smuzhiyun static int rmi_driver_process_config_requests(struct rmi_device *rmi_dev)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
116*4882a593Smuzhiyun struct rmi_function *entry;
117*4882a593Smuzhiyun int retval;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun list_for_each_entry(entry, &data->function_list, node) {
120*4882a593Smuzhiyun retval = configure_one_function(entry);
121*4882a593Smuzhiyun if (retval < 0)
122*4882a593Smuzhiyun return retval;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun return 0;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
rmi_process_interrupt_requests(struct rmi_device * rmi_dev)128*4882a593Smuzhiyun static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
131*4882a593Smuzhiyun struct device *dev = &rmi_dev->dev;
132*4882a593Smuzhiyun int i;
133*4882a593Smuzhiyun int error;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun if (!data)
136*4882a593Smuzhiyun return 0;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun if (!data->attn_data.data) {
139*4882a593Smuzhiyun error = rmi_read_block(rmi_dev,
140*4882a593Smuzhiyun data->f01_container->fd.data_base_addr + 1,
141*4882a593Smuzhiyun data->irq_status, data->num_of_irq_regs);
142*4882a593Smuzhiyun if (error < 0) {
143*4882a593Smuzhiyun dev_err(dev, "Failed to read irqs, code=%d\n", error);
144*4882a593Smuzhiyun return error;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun mutex_lock(&data->irq_mutex);
149*4882a593Smuzhiyun bitmap_and(data->irq_status, data->irq_status, data->fn_irq_bits,
150*4882a593Smuzhiyun data->irq_count);
151*4882a593Smuzhiyun /*
152*4882a593Smuzhiyun * At this point, irq_status has all bits that are set in the
153*4882a593Smuzhiyun * interrupt status register and are enabled.
154*4882a593Smuzhiyun */
155*4882a593Smuzhiyun mutex_unlock(&data->irq_mutex);
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun for_each_set_bit(i, data->irq_status, data->irq_count)
158*4882a593Smuzhiyun handle_nested_irq(irq_find_mapping(data->irqdomain, i));
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun if (data->input)
161*4882a593Smuzhiyun input_sync(data->input);
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun return 0;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
rmi_set_attn_data(struct rmi_device * rmi_dev,unsigned long irq_status,void * data,size_t size)166*4882a593Smuzhiyun void rmi_set_attn_data(struct rmi_device *rmi_dev, unsigned long irq_status,
167*4882a593Smuzhiyun void *data, size_t size)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
170*4882a593Smuzhiyun struct rmi4_attn_data attn_data;
171*4882a593Smuzhiyun void *fifo_data;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun if (!drvdata->enabled)
174*4882a593Smuzhiyun return;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun fifo_data = kmemdup(data, size, GFP_ATOMIC);
177*4882a593Smuzhiyun if (!fifo_data)
178*4882a593Smuzhiyun return;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun attn_data.irq_status = irq_status;
181*4882a593Smuzhiyun attn_data.size = size;
182*4882a593Smuzhiyun attn_data.data = fifo_data;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun kfifo_put(&drvdata->attn_fifo, attn_data);
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(rmi_set_attn_data);
187*4882a593Smuzhiyun
rmi_irq_fn(int irq,void * dev_id)188*4882a593Smuzhiyun static irqreturn_t rmi_irq_fn(int irq, void *dev_id)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun struct rmi_device *rmi_dev = dev_id;
191*4882a593Smuzhiyun struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
192*4882a593Smuzhiyun struct rmi4_attn_data attn_data = {0};
193*4882a593Smuzhiyun int ret, count;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun count = kfifo_get(&drvdata->attn_fifo, &attn_data);
196*4882a593Smuzhiyun if (count) {
197*4882a593Smuzhiyun *(drvdata->irq_status) = attn_data.irq_status;
198*4882a593Smuzhiyun drvdata->attn_data = attn_data;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun ret = rmi_process_interrupt_requests(rmi_dev);
202*4882a593Smuzhiyun if (ret)
203*4882a593Smuzhiyun rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev,
204*4882a593Smuzhiyun "Failed to process interrupt request: %d\n", ret);
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun if (count) {
207*4882a593Smuzhiyun kfree(attn_data.data);
208*4882a593Smuzhiyun drvdata->attn_data.data = NULL;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun if (!kfifo_is_empty(&drvdata->attn_fifo))
212*4882a593Smuzhiyun return rmi_irq_fn(irq, dev_id);
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun return IRQ_HANDLED;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
rmi_irq_init(struct rmi_device * rmi_dev)217*4882a593Smuzhiyun static int rmi_irq_init(struct rmi_device *rmi_dev)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
220*4882a593Smuzhiyun struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
221*4882a593Smuzhiyun int irq_flags = irq_get_trigger_type(pdata->irq);
222*4882a593Smuzhiyun int ret;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun if (!irq_flags)
225*4882a593Smuzhiyun irq_flags = IRQF_TRIGGER_LOW;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun ret = devm_request_threaded_irq(&rmi_dev->dev, pdata->irq, NULL,
228*4882a593Smuzhiyun rmi_irq_fn, irq_flags | IRQF_ONESHOT,
229*4882a593Smuzhiyun dev_driver_string(rmi_dev->xport->dev),
230*4882a593Smuzhiyun rmi_dev);
231*4882a593Smuzhiyun if (ret < 0) {
232*4882a593Smuzhiyun dev_err(&rmi_dev->dev, "Failed to register interrupt %d\n",
233*4882a593Smuzhiyun pdata->irq);
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun return ret;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun data->enabled = true;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun return 0;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
rmi_find_function(struct rmi_device * rmi_dev,u8 number)243*4882a593Smuzhiyun struct rmi_function *rmi_find_function(struct rmi_device *rmi_dev, u8 number)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
246*4882a593Smuzhiyun struct rmi_function *entry;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun list_for_each_entry(entry, &data->function_list, node) {
249*4882a593Smuzhiyun if (entry->fd.function_number == number)
250*4882a593Smuzhiyun return entry;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun return NULL;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
suspend_one_function(struct rmi_function * fn)256*4882a593Smuzhiyun static int suspend_one_function(struct rmi_function *fn)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun struct rmi_function_handler *fh;
259*4882a593Smuzhiyun int retval = 0;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun if (!fn || !fn->dev.driver)
262*4882a593Smuzhiyun return 0;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun fh = to_rmi_function_handler(fn->dev.driver);
265*4882a593Smuzhiyun if (fh->suspend) {
266*4882a593Smuzhiyun retval = fh->suspend(fn);
267*4882a593Smuzhiyun if (retval < 0)
268*4882a593Smuzhiyun dev_err(&fn->dev, "Suspend failed with code %d.\n",
269*4882a593Smuzhiyun retval);
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun return retval;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
rmi_suspend_functions(struct rmi_device * rmi_dev)275*4882a593Smuzhiyun static int rmi_suspend_functions(struct rmi_device *rmi_dev)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
278*4882a593Smuzhiyun struct rmi_function *entry;
279*4882a593Smuzhiyun int retval;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun list_for_each_entry(entry, &data->function_list, node) {
282*4882a593Smuzhiyun retval = suspend_one_function(entry);
283*4882a593Smuzhiyun if (retval < 0)
284*4882a593Smuzhiyun return retval;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun return 0;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun
resume_one_function(struct rmi_function * fn)290*4882a593Smuzhiyun static int resume_one_function(struct rmi_function *fn)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun struct rmi_function_handler *fh;
293*4882a593Smuzhiyun int retval = 0;
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun if (!fn || !fn->dev.driver)
296*4882a593Smuzhiyun return 0;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun fh = to_rmi_function_handler(fn->dev.driver);
299*4882a593Smuzhiyun if (fh->resume) {
300*4882a593Smuzhiyun retval = fh->resume(fn);
301*4882a593Smuzhiyun if (retval < 0)
302*4882a593Smuzhiyun dev_err(&fn->dev, "Resume failed with code %d.\n",
303*4882a593Smuzhiyun retval);
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun return retval;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
rmi_resume_functions(struct rmi_device * rmi_dev)309*4882a593Smuzhiyun static int rmi_resume_functions(struct rmi_device *rmi_dev)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
312*4882a593Smuzhiyun struct rmi_function *entry;
313*4882a593Smuzhiyun int retval;
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun list_for_each_entry(entry, &data->function_list, node) {
316*4882a593Smuzhiyun retval = resume_one_function(entry);
317*4882a593Smuzhiyun if (retval < 0)
318*4882a593Smuzhiyun return retval;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun return 0;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
rmi_enable_sensor(struct rmi_device * rmi_dev)324*4882a593Smuzhiyun int rmi_enable_sensor(struct rmi_device *rmi_dev)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun int retval = 0;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun retval = rmi_driver_process_config_requests(rmi_dev);
329*4882a593Smuzhiyun if (retval < 0)
330*4882a593Smuzhiyun return retval;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun return rmi_process_interrupt_requests(rmi_dev);
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun /**
336*4882a593Smuzhiyun * rmi_driver_set_input_params - set input device id and other data.
337*4882a593Smuzhiyun *
338*4882a593Smuzhiyun * @rmi_dev: Pointer to an RMI device
339*4882a593Smuzhiyun * @input: Pointer to input device
340*4882a593Smuzhiyun *
341*4882a593Smuzhiyun */
rmi_driver_set_input_params(struct rmi_device * rmi_dev,struct input_dev * input)342*4882a593Smuzhiyun static int rmi_driver_set_input_params(struct rmi_device *rmi_dev,
343*4882a593Smuzhiyun struct input_dev *input)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun input->name = SYNAPTICS_INPUT_DEVICE_NAME;
346*4882a593Smuzhiyun input->id.vendor = SYNAPTICS_VENDOR_ID;
347*4882a593Smuzhiyun input->id.bustype = BUS_RMI;
348*4882a593Smuzhiyun return 0;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun
rmi_driver_set_input_name(struct rmi_device * rmi_dev,struct input_dev * input)351*4882a593Smuzhiyun static void rmi_driver_set_input_name(struct rmi_device *rmi_dev,
352*4882a593Smuzhiyun struct input_dev *input)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
355*4882a593Smuzhiyun const char *device_name = rmi_f01_get_product_ID(data->f01_container);
356*4882a593Smuzhiyun char *name;
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun name = devm_kasprintf(&rmi_dev->dev, GFP_KERNEL,
359*4882a593Smuzhiyun "Synaptics %s", device_name);
360*4882a593Smuzhiyun if (!name)
361*4882a593Smuzhiyun return;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun input->name = name;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
rmi_driver_set_irq_bits(struct rmi_device * rmi_dev,unsigned long * mask)366*4882a593Smuzhiyun static int rmi_driver_set_irq_bits(struct rmi_device *rmi_dev,
367*4882a593Smuzhiyun unsigned long *mask)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun int error = 0;
370*4882a593Smuzhiyun struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
371*4882a593Smuzhiyun struct device *dev = &rmi_dev->dev;
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun mutex_lock(&data->irq_mutex);
374*4882a593Smuzhiyun bitmap_or(data->new_irq_mask,
375*4882a593Smuzhiyun data->current_irq_mask, mask, data->irq_count);
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun error = rmi_write_block(rmi_dev,
378*4882a593Smuzhiyun data->f01_container->fd.control_base_addr + 1,
379*4882a593Smuzhiyun data->new_irq_mask, data->num_of_irq_regs);
380*4882a593Smuzhiyun if (error < 0) {
381*4882a593Smuzhiyun dev_err(dev, "%s: Failed to change enabled interrupts!",
382*4882a593Smuzhiyun __func__);
383*4882a593Smuzhiyun goto error_unlock;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun bitmap_copy(data->current_irq_mask, data->new_irq_mask,
386*4882a593Smuzhiyun data->num_of_irq_regs);
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun bitmap_or(data->fn_irq_bits, data->fn_irq_bits, mask, data->irq_count);
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun error_unlock:
391*4882a593Smuzhiyun mutex_unlock(&data->irq_mutex);
392*4882a593Smuzhiyun return error;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
rmi_driver_clear_irq_bits(struct rmi_device * rmi_dev,unsigned long * mask)395*4882a593Smuzhiyun static int rmi_driver_clear_irq_bits(struct rmi_device *rmi_dev,
396*4882a593Smuzhiyun unsigned long *mask)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun int error = 0;
399*4882a593Smuzhiyun struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
400*4882a593Smuzhiyun struct device *dev = &rmi_dev->dev;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun mutex_lock(&data->irq_mutex);
403*4882a593Smuzhiyun bitmap_andnot(data->fn_irq_bits,
404*4882a593Smuzhiyun data->fn_irq_bits, mask, data->irq_count);
405*4882a593Smuzhiyun bitmap_andnot(data->new_irq_mask,
406*4882a593Smuzhiyun data->current_irq_mask, mask, data->irq_count);
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun error = rmi_write_block(rmi_dev,
409*4882a593Smuzhiyun data->f01_container->fd.control_base_addr + 1,
410*4882a593Smuzhiyun data->new_irq_mask, data->num_of_irq_regs);
411*4882a593Smuzhiyun if (error < 0) {
412*4882a593Smuzhiyun dev_err(dev, "%s: Failed to change enabled interrupts!",
413*4882a593Smuzhiyun __func__);
414*4882a593Smuzhiyun goto error_unlock;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun bitmap_copy(data->current_irq_mask, data->new_irq_mask,
417*4882a593Smuzhiyun data->num_of_irq_regs);
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun error_unlock:
420*4882a593Smuzhiyun mutex_unlock(&data->irq_mutex);
421*4882a593Smuzhiyun return error;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun
rmi_driver_reset_handler(struct rmi_device * rmi_dev)424*4882a593Smuzhiyun static int rmi_driver_reset_handler(struct rmi_device *rmi_dev)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
427*4882a593Smuzhiyun int error;
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun /*
430*4882a593Smuzhiyun * Can get called before the driver is fully ready to deal with
431*4882a593Smuzhiyun * this situation.
432*4882a593Smuzhiyun */
433*4882a593Smuzhiyun if (!data || !data->f01_container) {
434*4882a593Smuzhiyun dev_warn(&rmi_dev->dev,
435*4882a593Smuzhiyun "Not ready to handle reset yet!\n");
436*4882a593Smuzhiyun return 0;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun error = rmi_read_block(rmi_dev,
440*4882a593Smuzhiyun data->f01_container->fd.control_base_addr + 1,
441*4882a593Smuzhiyun data->current_irq_mask, data->num_of_irq_regs);
442*4882a593Smuzhiyun if (error < 0) {
443*4882a593Smuzhiyun dev_err(&rmi_dev->dev, "%s: Failed to read current IRQ mask.\n",
444*4882a593Smuzhiyun __func__);
445*4882a593Smuzhiyun return error;
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun error = rmi_driver_process_reset_requests(rmi_dev);
449*4882a593Smuzhiyun if (error < 0)
450*4882a593Smuzhiyun return error;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun error = rmi_driver_process_config_requests(rmi_dev);
453*4882a593Smuzhiyun if (error < 0)
454*4882a593Smuzhiyun return error;
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun return 0;
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun
rmi_read_pdt_entry(struct rmi_device * rmi_dev,struct pdt_entry * entry,u16 pdt_address)459*4882a593Smuzhiyun static int rmi_read_pdt_entry(struct rmi_device *rmi_dev,
460*4882a593Smuzhiyun struct pdt_entry *entry, u16 pdt_address)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun u8 buf[RMI_PDT_ENTRY_SIZE];
463*4882a593Smuzhiyun int error;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun error = rmi_read_block(rmi_dev, pdt_address, buf, RMI_PDT_ENTRY_SIZE);
466*4882a593Smuzhiyun if (error) {
467*4882a593Smuzhiyun dev_err(&rmi_dev->dev, "Read PDT entry at %#06x failed, code: %d.\n",
468*4882a593Smuzhiyun pdt_address, error);
469*4882a593Smuzhiyun return error;
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun entry->page_start = pdt_address & RMI4_PAGE_MASK;
473*4882a593Smuzhiyun entry->query_base_addr = buf[0];
474*4882a593Smuzhiyun entry->command_base_addr = buf[1];
475*4882a593Smuzhiyun entry->control_base_addr = buf[2];
476*4882a593Smuzhiyun entry->data_base_addr = buf[3];
477*4882a593Smuzhiyun entry->interrupt_source_count = buf[4] & RMI_PDT_INT_SOURCE_COUNT_MASK;
478*4882a593Smuzhiyun entry->function_version = (buf[4] & RMI_PDT_FUNCTION_VERSION_MASK) >> 5;
479*4882a593Smuzhiyun entry->function_number = buf[5];
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun return 0;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
rmi_driver_copy_pdt_to_fd(const struct pdt_entry * pdt,struct rmi_function_descriptor * fd)484*4882a593Smuzhiyun static void rmi_driver_copy_pdt_to_fd(const struct pdt_entry *pdt,
485*4882a593Smuzhiyun struct rmi_function_descriptor *fd)
486*4882a593Smuzhiyun {
487*4882a593Smuzhiyun fd->query_base_addr = pdt->query_base_addr + pdt->page_start;
488*4882a593Smuzhiyun fd->command_base_addr = pdt->command_base_addr + pdt->page_start;
489*4882a593Smuzhiyun fd->control_base_addr = pdt->control_base_addr + pdt->page_start;
490*4882a593Smuzhiyun fd->data_base_addr = pdt->data_base_addr + pdt->page_start;
491*4882a593Smuzhiyun fd->function_number = pdt->function_number;
492*4882a593Smuzhiyun fd->interrupt_source_count = pdt->interrupt_source_count;
493*4882a593Smuzhiyun fd->function_version = pdt->function_version;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun #define RMI_SCAN_CONTINUE 0
497*4882a593Smuzhiyun #define RMI_SCAN_DONE 1
498*4882a593Smuzhiyun
rmi_scan_pdt_page(struct rmi_device * rmi_dev,int page,int * empty_pages,void * ctx,int (* callback)(struct rmi_device * rmi_dev,void * ctx,const struct pdt_entry * entry))499*4882a593Smuzhiyun static int rmi_scan_pdt_page(struct rmi_device *rmi_dev,
500*4882a593Smuzhiyun int page,
501*4882a593Smuzhiyun int *empty_pages,
502*4882a593Smuzhiyun void *ctx,
503*4882a593Smuzhiyun int (*callback)(struct rmi_device *rmi_dev,
504*4882a593Smuzhiyun void *ctx,
505*4882a593Smuzhiyun const struct pdt_entry *entry))
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
508*4882a593Smuzhiyun struct pdt_entry pdt_entry;
509*4882a593Smuzhiyun u16 page_start = RMI4_PAGE_SIZE * page;
510*4882a593Smuzhiyun u16 pdt_start = page_start + PDT_START_SCAN_LOCATION;
511*4882a593Smuzhiyun u16 pdt_end = page_start + PDT_END_SCAN_LOCATION;
512*4882a593Smuzhiyun u16 addr;
513*4882a593Smuzhiyun int error;
514*4882a593Smuzhiyun int retval;
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun for (addr = pdt_start; addr >= pdt_end; addr -= RMI_PDT_ENTRY_SIZE) {
517*4882a593Smuzhiyun error = rmi_read_pdt_entry(rmi_dev, &pdt_entry, addr);
518*4882a593Smuzhiyun if (error)
519*4882a593Smuzhiyun return error;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun if (RMI4_END_OF_PDT(pdt_entry.function_number))
522*4882a593Smuzhiyun break;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun retval = callback(rmi_dev, ctx, &pdt_entry);
525*4882a593Smuzhiyun if (retval != RMI_SCAN_CONTINUE)
526*4882a593Smuzhiyun return retval;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun /*
530*4882a593Smuzhiyun * Count number of empty PDT pages. If a gap of two pages
531*4882a593Smuzhiyun * or more is found, stop scanning.
532*4882a593Smuzhiyun */
533*4882a593Smuzhiyun if (addr == pdt_start)
534*4882a593Smuzhiyun ++*empty_pages;
535*4882a593Smuzhiyun else
536*4882a593Smuzhiyun *empty_pages = 0;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun return (data->bootloader_mode || *empty_pages >= 2) ?
539*4882a593Smuzhiyun RMI_SCAN_DONE : RMI_SCAN_CONTINUE;
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun
rmi_scan_pdt(struct rmi_device * rmi_dev,void * ctx,int (* callback)(struct rmi_device * rmi_dev,void * ctx,const struct pdt_entry * entry))542*4882a593Smuzhiyun int rmi_scan_pdt(struct rmi_device *rmi_dev, void *ctx,
543*4882a593Smuzhiyun int (*callback)(struct rmi_device *rmi_dev,
544*4882a593Smuzhiyun void *ctx, const struct pdt_entry *entry))
545*4882a593Smuzhiyun {
546*4882a593Smuzhiyun int page;
547*4882a593Smuzhiyun int empty_pages = 0;
548*4882a593Smuzhiyun int retval = RMI_SCAN_DONE;
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun for (page = 0; page <= RMI4_MAX_PAGE; page++) {
551*4882a593Smuzhiyun retval = rmi_scan_pdt_page(rmi_dev, page, &empty_pages,
552*4882a593Smuzhiyun ctx, callback);
553*4882a593Smuzhiyun if (retval != RMI_SCAN_CONTINUE)
554*4882a593Smuzhiyun break;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun return retval < 0 ? retval : 0;
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun
rmi_read_register_desc(struct rmi_device * d,u16 addr,struct rmi_register_descriptor * rdesc)560*4882a593Smuzhiyun int rmi_read_register_desc(struct rmi_device *d, u16 addr,
561*4882a593Smuzhiyun struct rmi_register_descriptor *rdesc)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun int ret;
564*4882a593Smuzhiyun u8 size_presence_reg;
565*4882a593Smuzhiyun u8 buf[35];
566*4882a593Smuzhiyun int presense_offset = 1;
567*4882a593Smuzhiyun u8 *struct_buf;
568*4882a593Smuzhiyun int reg;
569*4882a593Smuzhiyun int offset = 0;
570*4882a593Smuzhiyun int map_offset = 0;
571*4882a593Smuzhiyun int i;
572*4882a593Smuzhiyun int b;
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun /*
575*4882a593Smuzhiyun * The first register of the register descriptor is the size of
576*4882a593Smuzhiyun * the register descriptor's presense register.
577*4882a593Smuzhiyun */
578*4882a593Smuzhiyun ret = rmi_read(d, addr, &size_presence_reg);
579*4882a593Smuzhiyun if (ret)
580*4882a593Smuzhiyun return ret;
581*4882a593Smuzhiyun ++addr;
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun if (size_presence_reg < 0 || size_presence_reg > 35)
584*4882a593Smuzhiyun return -EIO;
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun memset(buf, 0, sizeof(buf));
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun /*
589*4882a593Smuzhiyun * The presence register contains the size of the register structure
590*4882a593Smuzhiyun * and a bitmap which identified which packet registers are present
591*4882a593Smuzhiyun * for this particular register type (ie query, control, or data).
592*4882a593Smuzhiyun */
593*4882a593Smuzhiyun ret = rmi_read_block(d, addr, buf, size_presence_reg);
594*4882a593Smuzhiyun if (ret)
595*4882a593Smuzhiyun return ret;
596*4882a593Smuzhiyun ++addr;
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun if (buf[0] == 0) {
599*4882a593Smuzhiyun presense_offset = 3;
600*4882a593Smuzhiyun rdesc->struct_size = buf[1] | (buf[2] << 8);
601*4882a593Smuzhiyun } else {
602*4882a593Smuzhiyun rdesc->struct_size = buf[0];
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun for (i = presense_offset; i < size_presence_reg; i++) {
606*4882a593Smuzhiyun for (b = 0; b < 8; b++) {
607*4882a593Smuzhiyun if (buf[i] & (0x1 << b))
608*4882a593Smuzhiyun bitmap_set(rdesc->presense_map, map_offset, 1);
609*4882a593Smuzhiyun ++map_offset;
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun rdesc->num_registers = bitmap_weight(rdesc->presense_map,
614*4882a593Smuzhiyun RMI_REG_DESC_PRESENSE_BITS);
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun rdesc->registers = devm_kcalloc(&d->dev,
617*4882a593Smuzhiyun rdesc->num_registers,
618*4882a593Smuzhiyun sizeof(struct rmi_register_desc_item),
619*4882a593Smuzhiyun GFP_KERNEL);
620*4882a593Smuzhiyun if (!rdesc->registers)
621*4882a593Smuzhiyun return -ENOMEM;
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun /*
624*4882a593Smuzhiyun * Allocate a temporary buffer to hold the register structure.
625*4882a593Smuzhiyun * I'm not using devm_kzalloc here since it will not be retained
626*4882a593Smuzhiyun * after exiting this function
627*4882a593Smuzhiyun */
628*4882a593Smuzhiyun struct_buf = kzalloc(rdesc->struct_size, GFP_KERNEL);
629*4882a593Smuzhiyun if (!struct_buf)
630*4882a593Smuzhiyun return -ENOMEM;
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun /*
633*4882a593Smuzhiyun * The register structure contains information about every packet
634*4882a593Smuzhiyun * register of this type. This includes the size of the packet
635*4882a593Smuzhiyun * register and a bitmap of all subpackets contained in the packet
636*4882a593Smuzhiyun * register.
637*4882a593Smuzhiyun */
638*4882a593Smuzhiyun ret = rmi_read_block(d, addr, struct_buf, rdesc->struct_size);
639*4882a593Smuzhiyun if (ret)
640*4882a593Smuzhiyun goto free_struct_buff;
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun reg = find_first_bit(rdesc->presense_map, RMI_REG_DESC_PRESENSE_BITS);
643*4882a593Smuzhiyun for (i = 0; i < rdesc->num_registers; i++) {
644*4882a593Smuzhiyun struct rmi_register_desc_item *item = &rdesc->registers[i];
645*4882a593Smuzhiyun int reg_size = struct_buf[offset];
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun ++offset;
648*4882a593Smuzhiyun if (reg_size == 0) {
649*4882a593Smuzhiyun reg_size = struct_buf[offset] |
650*4882a593Smuzhiyun (struct_buf[offset + 1] << 8);
651*4882a593Smuzhiyun offset += 2;
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun if (reg_size == 0) {
655*4882a593Smuzhiyun reg_size = struct_buf[offset] |
656*4882a593Smuzhiyun (struct_buf[offset + 1] << 8) |
657*4882a593Smuzhiyun (struct_buf[offset + 2] << 16) |
658*4882a593Smuzhiyun (struct_buf[offset + 3] << 24);
659*4882a593Smuzhiyun offset += 4;
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun item->reg = reg;
663*4882a593Smuzhiyun item->reg_size = reg_size;
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun map_offset = 0;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun do {
668*4882a593Smuzhiyun for (b = 0; b < 7; b++) {
669*4882a593Smuzhiyun if (struct_buf[offset] & (0x1 << b))
670*4882a593Smuzhiyun bitmap_set(item->subpacket_map,
671*4882a593Smuzhiyun map_offset, 1);
672*4882a593Smuzhiyun ++map_offset;
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun } while (struct_buf[offset++] & 0x80);
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun item->num_subpackets = bitmap_weight(item->subpacket_map,
677*4882a593Smuzhiyun RMI_REG_DESC_SUBPACKET_BITS);
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun rmi_dbg(RMI_DEBUG_CORE, &d->dev,
680*4882a593Smuzhiyun "%s: reg: %d reg size: %ld subpackets: %d\n", __func__,
681*4882a593Smuzhiyun item->reg, item->reg_size, item->num_subpackets);
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun reg = find_next_bit(rdesc->presense_map,
684*4882a593Smuzhiyun RMI_REG_DESC_PRESENSE_BITS, reg + 1);
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun free_struct_buff:
688*4882a593Smuzhiyun kfree(struct_buf);
689*4882a593Smuzhiyun return ret;
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun
rmi_get_register_desc_item(struct rmi_register_descriptor * rdesc,u16 reg)692*4882a593Smuzhiyun const struct rmi_register_desc_item *rmi_get_register_desc_item(
693*4882a593Smuzhiyun struct rmi_register_descriptor *rdesc, u16 reg)
694*4882a593Smuzhiyun {
695*4882a593Smuzhiyun const struct rmi_register_desc_item *item;
696*4882a593Smuzhiyun int i;
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun for (i = 0; i < rdesc->num_registers; i++) {
699*4882a593Smuzhiyun item = &rdesc->registers[i];
700*4882a593Smuzhiyun if (item->reg == reg)
701*4882a593Smuzhiyun return item;
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun return NULL;
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun
rmi_register_desc_calc_size(struct rmi_register_descriptor * rdesc)707*4882a593Smuzhiyun size_t rmi_register_desc_calc_size(struct rmi_register_descriptor *rdesc)
708*4882a593Smuzhiyun {
709*4882a593Smuzhiyun const struct rmi_register_desc_item *item;
710*4882a593Smuzhiyun int i;
711*4882a593Smuzhiyun size_t size = 0;
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun for (i = 0; i < rdesc->num_registers; i++) {
714*4882a593Smuzhiyun item = &rdesc->registers[i];
715*4882a593Smuzhiyun size += item->reg_size;
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun return size;
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun /* Compute the register offset relative to the base address */
rmi_register_desc_calc_reg_offset(struct rmi_register_descriptor * rdesc,u16 reg)721*4882a593Smuzhiyun int rmi_register_desc_calc_reg_offset(
722*4882a593Smuzhiyun struct rmi_register_descriptor *rdesc, u16 reg)
723*4882a593Smuzhiyun {
724*4882a593Smuzhiyun const struct rmi_register_desc_item *item;
725*4882a593Smuzhiyun int offset = 0;
726*4882a593Smuzhiyun int i;
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun for (i = 0; i < rdesc->num_registers; i++) {
729*4882a593Smuzhiyun item = &rdesc->registers[i];
730*4882a593Smuzhiyun if (item->reg == reg)
731*4882a593Smuzhiyun return offset;
732*4882a593Smuzhiyun ++offset;
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun return -1;
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun
rmi_register_desc_has_subpacket(const struct rmi_register_desc_item * item,u8 subpacket)737*4882a593Smuzhiyun bool rmi_register_desc_has_subpacket(const struct rmi_register_desc_item *item,
738*4882a593Smuzhiyun u8 subpacket)
739*4882a593Smuzhiyun {
740*4882a593Smuzhiyun return find_next_bit(item->subpacket_map, RMI_REG_DESC_PRESENSE_BITS,
741*4882a593Smuzhiyun subpacket) == subpacket;
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun
rmi_check_bootloader_mode(struct rmi_device * rmi_dev,const struct pdt_entry * pdt)744*4882a593Smuzhiyun static int rmi_check_bootloader_mode(struct rmi_device *rmi_dev,
745*4882a593Smuzhiyun const struct pdt_entry *pdt)
746*4882a593Smuzhiyun {
747*4882a593Smuzhiyun struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
748*4882a593Smuzhiyun int ret;
749*4882a593Smuzhiyun u8 status;
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun if (pdt->function_number == 0x34 && pdt->function_version > 1) {
752*4882a593Smuzhiyun ret = rmi_read(rmi_dev, pdt->data_base_addr, &status);
753*4882a593Smuzhiyun if (ret) {
754*4882a593Smuzhiyun dev_err(&rmi_dev->dev,
755*4882a593Smuzhiyun "Failed to read F34 status: %d.\n", ret);
756*4882a593Smuzhiyun return ret;
757*4882a593Smuzhiyun }
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun if (status & BIT(7))
760*4882a593Smuzhiyun data->bootloader_mode = true;
761*4882a593Smuzhiyun } else if (pdt->function_number == 0x01) {
762*4882a593Smuzhiyun ret = rmi_read(rmi_dev, pdt->data_base_addr, &status);
763*4882a593Smuzhiyun if (ret) {
764*4882a593Smuzhiyun dev_err(&rmi_dev->dev,
765*4882a593Smuzhiyun "Failed to read F01 status: %d.\n", ret);
766*4882a593Smuzhiyun return ret;
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun if (status & BIT(6))
770*4882a593Smuzhiyun data->bootloader_mode = true;
771*4882a593Smuzhiyun }
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun return 0;
774*4882a593Smuzhiyun }
775*4882a593Smuzhiyun
rmi_count_irqs(struct rmi_device * rmi_dev,void * ctx,const struct pdt_entry * pdt)776*4882a593Smuzhiyun static int rmi_count_irqs(struct rmi_device *rmi_dev,
777*4882a593Smuzhiyun void *ctx, const struct pdt_entry *pdt)
778*4882a593Smuzhiyun {
779*4882a593Smuzhiyun int *irq_count = ctx;
780*4882a593Smuzhiyun int ret;
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun *irq_count += pdt->interrupt_source_count;
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun ret = rmi_check_bootloader_mode(rmi_dev, pdt);
785*4882a593Smuzhiyun if (ret < 0)
786*4882a593Smuzhiyun return ret;
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun return RMI_SCAN_CONTINUE;
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun
rmi_initial_reset(struct rmi_device * rmi_dev,void * ctx,const struct pdt_entry * pdt)791*4882a593Smuzhiyun int rmi_initial_reset(struct rmi_device *rmi_dev, void *ctx,
792*4882a593Smuzhiyun const struct pdt_entry *pdt)
793*4882a593Smuzhiyun {
794*4882a593Smuzhiyun int error;
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun if (pdt->function_number == 0x01) {
797*4882a593Smuzhiyun u16 cmd_addr = pdt->page_start + pdt->command_base_addr;
798*4882a593Smuzhiyun u8 cmd_buf = RMI_DEVICE_RESET_CMD;
799*4882a593Smuzhiyun const struct rmi_device_platform_data *pdata =
800*4882a593Smuzhiyun rmi_get_platform_data(rmi_dev);
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun if (rmi_dev->xport->ops->reset) {
803*4882a593Smuzhiyun error = rmi_dev->xport->ops->reset(rmi_dev->xport,
804*4882a593Smuzhiyun cmd_addr);
805*4882a593Smuzhiyun if (error)
806*4882a593Smuzhiyun return error;
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun return RMI_SCAN_DONE;
809*4882a593Smuzhiyun }
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Sending reset\n");
812*4882a593Smuzhiyun error = rmi_write_block(rmi_dev, cmd_addr, &cmd_buf, 1);
813*4882a593Smuzhiyun if (error) {
814*4882a593Smuzhiyun dev_err(&rmi_dev->dev,
815*4882a593Smuzhiyun "Initial reset failed. Code = %d.\n", error);
816*4882a593Smuzhiyun return error;
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun mdelay(pdata->reset_delay_ms ?: DEFAULT_RESET_DELAY_MS);
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun return RMI_SCAN_DONE;
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun /* F01 should always be on page 0. If we don't find it there, fail. */
825*4882a593Smuzhiyun return pdt->page_start == 0 ? RMI_SCAN_CONTINUE : -ENODEV;
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun
rmi_create_function(struct rmi_device * rmi_dev,void * ctx,const struct pdt_entry * pdt)828*4882a593Smuzhiyun static int rmi_create_function(struct rmi_device *rmi_dev,
829*4882a593Smuzhiyun void *ctx, const struct pdt_entry *pdt)
830*4882a593Smuzhiyun {
831*4882a593Smuzhiyun struct device *dev = &rmi_dev->dev;
832*4882a593Smuzhiyun struct rmi_driver_data *data = dev_get_drvdata(dev);
833*4882a593Smuzhiyun int *current_irq_count = ctx;
834*4882a593Smuzhiyun struct rmi_function *fn;
835*4882a593Smuzhiyun int i;
836*4882a593Smuzhiyun int error;
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun rmi_dbg(RMI_DEBUG_CORE, dev, "Initializing F%02X.\n",
839*4882a593Smuzhiyun pdt->function_number);
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun fn = kzalloc(sizeof(struct rmi_function) +
842*4882a593Smuzhiyun BITS_TO_LONGS(data->irq_count) * sizeof(unsigned long),
843*4882a593Smuzhiyun GFP_KERNEL);
844*4882a593Smuzhiyun if (!fn) {
845*4882a593Smuzhiyun dev_err(dev, "Failed to allocate memory for F%02X\n",
846*4882a593Smuzhiyun pdt->function_number);
847*4882a593Smuzhiyun return -ENOMEM;
848*4882a593Smuzhiyun }
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun INIT_LIST_HEAD(&fn->node);
851*4882a593Smuzhiyun rmi_driver_copy_pdt_to_fd(pdt, &fn->fd);
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun fn->rmi_dev = rmi_dev;
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun fn->num_of_irqs = pdt->interrupt_source_count;
856*4882a593Smuzhiyun fn->irq_pos = *current_irq_count;
857*4882a593Smuzhiyun *current_irq_count += fn->num_of_irqs;
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun for (i = 0; i < fn->num_of_irqs; i++)
860*4882a593Smuzhiyun set_bit(fn->irq_pos + i, fn->irq_mask);
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun error = rmi_register_function(fn);
863*4882a593Smuzhiyun if (error)
864*4882a593Smuzhiyun return error;
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun if (pdt->function_number == 0x01)
867*4882a593Smuzhiyun data->f01_container = fn;
868*4882a593Smuzhiyun else if (pdt->function_number == 0x34)
869*4882a593Smuzhiyun data->f34_container = fn;
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun list_add_tail(&fn->node, &data->function_list);
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun return RMI_SCAN_CONTINUE;
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun
rmi_enable_irq(struct rmi_device * rmi_dev,bool clear_wake)876*4882a593Smuzhiyun void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake)
877*4882a593Smuzhiyun {
878*4882a593Smuzhiyun struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
879*4882a593Smuzhiyun struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
880*4882a593Smuzhiyun int irq = pdata->irq;
881*4882a593Smuzhiyun int irq_flags;
882*4882a593Smuzhiyun int retval;
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun mutex_lock(&data->enabled_mutex);
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun if (data->enabled)
887*4882a593Smuzhiyun goto out;
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun enable_irq(irq);
890*4882a593Smuzhiyun data->enabled = true;
891*4882a593Smuzhiyun if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) {
892*4882a593Smuzhiyun retval = disable_irq_wake(irq);
893*4882a593Smuzhiyun if (retval)
894*4882a593Smuzhiyun dev_warn(&rmi_dev->dev,
895*4882a593Smuzhiyun "Failed to disable irq for wake: %d\n",
896*4882a593Smuzhiyun retval);
897*4882a593Smuzhiyun }
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun /*
900*4882a593Smuzhiyun * Call rmi_process_interrupt_requests() after enabling irq,
901*4882a593Smuzhiyun * otherwise we may lose interrupt on edge-triggered systems.
902*4882a593Smuzhiyun */
903*4882a593Smuzhiyun irq_flags = irq_get_trigger_type(pdata->irq);
904*4882a593Smuzhiyun if (irq_flags & IRQ_TYPE_EDGE_BOTH)
905*4882a593Smuzhiyun rmi_process_interrupt_requests(rmi_dev);
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun out:
908*4882a593Smuzhiyun mutex_unlock(&data->enabled_mutex);
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun
rmi_disable_irq(struct rmi_device * rmi_dev,bool enable_wake)911*4882a593Smuzhiyun void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake)
912*4882a593Smuzhiyun {
913*4882a593Smuzhiyun struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev);
914*4882a593Smuzhiyun struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
915*4882a593Smuzhiyun struct rmi4_attn_data attn_data = {0};
916*4882a593Smuzhiyun int irq = pdata->irq;
917*4882a593Smuzhiyun int retval, count;
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun mutex_lock(&data->enabled_mutex);
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun if (!data->enabled)
922*4882a593Smuzhiyun goto out;
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun data->enabled = false;
925*4882a593Smuzhiyun disable_irq(irq);
926*4882a593Smuzhiyun if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) {
927*4882a593Smuzhiyun retval = enable_irq_wake(irq);
928*4882a593Smuzhiyun if (retval)
929*4882a593Smuzhiyun dev_warn(&rmi_dev->dev,
930*4882a593Smuzhiyun "Failed to enable irq for wake: %d\n",
931*4882a593Smuzhiyun retval);
932*4882a593Smuzhiyun }
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun /* make sure the fifo is clean */
935*4882a593Smuzhiyun while (!kfifo_is_empty(&data->attn_fifo)) {
936*4882a593Smuzhiyun count = kfifo_get(&data->attn_fifo, &attn_data);
937*4882a593Smuzhiyun if (count)
938*4882a593Smuzhiyun kfree(attn_data.data);
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun out:
942*4882a593Smuzhiyun mutex_unlock(&data->enabled_mutex);
943*4882a593Smuzhiyun }
944*4882a593Smuzhiyun
rmi_driver_suspend(struct rmi_device * rmi_dev,bool enable_wake)945*4882a593Smuzhiyun int rmi_driver_suspend(struct rmi_device *rmi_dev, bool enable_wake)
946*4882a593Smuzhiyun {
947*4882a593Smuzhiyun int retval;
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun retval = rmi_suspend_functions(rmi_dev);
950*4882a593Smuzhiyun if (retval)
951*4882a593Smuzhiyun dev_warn(&rmi_dev->dev, "Failed to suspend functions: %d\n",
952*4882a593Smuzhiyun retval);
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun rmi_disable_irq(rmi_dev, enable_wake);
955*4882a593Smuzhiyun return retval;
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(rmi_driver_suspend);
958*4882a593Smuzhiyun
rmi_driver_resume(struct rmi_device * rmi_dev,bool clear_wake)959*4882a593Smuzhiyun int rmi_driver_resume(struct rmi_device *rmi_dev, bool clear_wake)
960*4882a593Smuzhiyun {
961*4882a593Smuzhiyun int retval;
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun rmi_enable_irq(rmi_dev, clear_wake);
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun retval = rmi_resume_functions(rmi_dev);
966*4882a593Smuzhiyun if (retval)
967*4882a593Smuzhiyun dev_warn(&rmi_dev->dev, "Failed to suspend functions: %d\n",
968*4882a593Smuzhiyun retval);
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun return retval;
971*4882a593Smuzhiyun }
972*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(rmi_driver_resume);
973*4882a593Smuzhiyun
rmi_driver_remove(struct device * dev)974*4882a593Smuzhiyun static int rmi_driver_remove(struct device *dev)
975*4882a593Smuzhiyun {
976*4882a593Smuzhiyun struct rmi_device *rmi_dev = to_rmi_device(dev);
977*4882a593Smuzhiyun struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
978*4882a593Smuzhiyun
979*4882a593Smuzhiyun rmi_disable_irq(rmi_dev, false);
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun irq_domain_remove(data->irqdomain);
982*4882a593Smuzhiyun data->irqdomain = NULL;
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun rmi_f34_remove_sysfs(rmi_dev);
985*4882a593Smuzhiyun rmi_free_function_list(rmi_dev);
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun return 0;
988*4882a593Smuzhiyun }
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun #ifdef CONFIG_OF
rmi_driver_of_probe(struct device * dev,struct rmi_device_platform_data * pdata)991*4882a593Smuzhiyun static int rmi_driver_of_probe(struct device *dev,
992*4882a593Smuzhiyun struct rmi_device_platform_data *pdata)
993*4882a593Smuzhiyun {
994*4882a593Smuzhiyun int retval;
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun retval = rmi_of_property_read_u32(dev, &pdata->reset_delay_ms,
997*4882a593Smuzhiyun "syna,reset-delay-ms", 1);
998*4882a593Smuzhiyun if (retval)
999*4882a593Smuzhiyun return retval;
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun return 0;
1002*4882a593Smuzhiyun }
1003*4882a593Smuzhiyun #else
rmi_driver_of_probe(struct device * dev,struct rmi_device_platform_data * pdata)1004*4882a593Smuzhiyun static inline int rmi_driver_of_probe(struct device *dev,
1005*4882a593Smuzhiyun struct rmi_device_platform_data *pdata)
1006*4882a593Smuzhiyun {
1007*4882a593Smuzhiyun return -ENODEV;
1008*4882a593Smuzhiyun }
1009*4882a593Smuzhiyun #endif
1010*4882a593Smuzhiyun
rmi_probe_interrupts(struct rmi_driver_data * data)1011*4882a593Smuzhiyun int rmi_probe_interrupts(struct rmi_driver_data *data)
1012*4882a593Smuzhiyun {
1013*4882a593Smuzhiyun struct rmi_device *rmi_dev = data->rmi_dev;
1014*4882a593Smuzhiyun struct device *dev = &rmi_dev->dev;
1015*4882a593Smuzhiyun struct fwnode_handle *fwnode = rmi_dev->xport->dev->fwnode;
1016*4882a593Smuzhiyun int irq_count = 0;
1017*4882a593Smuzhiyun size_t size;
1018*4882a593Smuzhiyun int retval;
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun /*
1021*4882a593Smuzhiyun * We need to count the IRQs and allocate their storage before scanning
1022*4882a593Smuzhiyun * the PDT and creating the function entries, because adding a new
1023*4882a593Smuzhiyun * function can trigger events that result in the IRQ related storage
1024*4882a593Smuzhiyun * being accessed.
1025*4882a593Smuzhiyun */
1026*4882a593Smuzhiyun rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Counting IRQs.\n", __func__);
1027*4882a593Smuzhiyun data->bootloader_mode = false;
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_count_irqs);
1030*4882a593Smuzhiyun if (retval < 0) {
1031*4882a593Smuzhiyun dev_err(dev, "IRQ counting failed with code %d.\n", retval);
1032*4882a593Smuzhiyun return retval;
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun if (data->bootloader_mode)
1036*4882a593Smuzhiyun dev_warn(dev, "Device in bootloader mode.\n");
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun /* Allocate and register a linear revmap irq_domain */
1039*4882a593Smuzhiyun data->irqdomain = irq_domain_create_linear(fwnode, irq_count,
1040*4882a593Smuzhiyun &irq_domain_simple_ops,
1041*4882a593Smuzhiyun data);
1042*4882a593Smuzhiyun if (!data->irqdomain) {
1043*4882a593Smuzhiyun dev_err(&rmi_dev->dev, "Failed to create IRQ domain\n");
1044*4882a593Smuzhiyun return -ENOMEM;
1045*4882a593Smuzhiyun }
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun data->irq_count = irq_count;
1048*4882a593Smuzhiyun data->num_of_irq_regs = (data->irq_count + 7) / 8;
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun size = BITS_TO_LONGS(data->irq_count) * sizeof(unsigned long);
1051*4882a593Smuzhiyun data->irq_memory = devm_kcalloc(dev, size, 4, GFP_KERNEL);
1052*4882a593Smuzhiyun if (!data->irq_memory) {
1053*4882a593Smuzhiyun dev_err(dev, "Failed to allocate memory for irq masks.\n");
1054*4882a593Smuzhiyun return -ENOMEM;
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun data->irq_status = data->irq_memory + size * 0;
1058*4882a593Smuzhiyun data->fn_irq_bits = data->irq_memory + size * 1;
1059*4882a593Smuzhiyun data->current_irq_mask = data->irq_memory + size * 2;
1060*4882a593Smuzhiyun data->new_irq_mask = data->irq_memory + size * 3;
1061*4882a593Smuzhiyun
1062*4882a593Smuzhiyun return retval;
1063*4882a593Smuzhiyun }
1064*4882a593Smuzhiyun
rmi_init_functions(struct rmi_driver_data * data)1065*4882a593Smuzhiyun int rmi_init_functions(struct rmi_driver_data *data)
1066*4882a593Smuzhiyun {
1067*4882a593Smuzhiyun struct rmi_device *rmi_dev = data->rmi_dev;
1068*4882a593Smuzhiyun struct device *dev = &rmi_dev->dev;
1069*4882a593Smuzhiyun int irq_count = 0;
1070*4882a593Smuzhiyun int retval;
1071*4882a593Smuzhiyun
1072*4882a593Smuzhiyun rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Creating functions.\n", __func__);
1073*4882a593Smuzhiyun retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_create_function);
1074*4882a593Smuzhiyun if (retval < 0) {
1075*4882a593Smuzhiyun dev_err(dev, "Function creation failed with code %d.\n",
1076*4882a593Smuzhiyun retval);
1077*4882a593Smuzhiyun goto err_destroy_functions;
1078*4882a593Smuzhiyun }
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun if (!data->f01_container) {
1081*4882a593Smuzhiyun dev_err(dev, "Missing F01 container!\n");
1082*4882a593Smuzhiyun retval = -EINVAL;
1083*4882a593Smuzhiyun goto err_destroy_functions;
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun
1086*4882a593Smuzhiyun retval = rmi_read_block(rmi_dev,
1087*4882a593Smuzhiyun data->f01_container->fd.control_base_addr + 1,
1088*4882a593Smuzhiyun data->current_irq_mask, data->num_of_irq_regs);
1089*4882a593Smuzhiyun if (retval < 0) {
1090*4882a593Smuzhiyun dev_err(dev, "%s: Failed to read current IRQ mask.\n",
1091*4882a593Smuzhiyun __func__);
1092*4882a593Smuzhiyun goto err_destroy_functions;
1093*4882a593Smuzhiyun }
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun return 0;
1096*4882a593Smuzhiyun
1097*4882a593Smuzhiyun err_destroy_functions:
1098*4882a593Smuzhiyun rmi_free_function_list(rmi_dev);
1099*4882a593Smuzhiyun return retval;
1100*4882a593Smuzhiyun }
1101*4882a593Smuzhiyun
rmi_driver_probe(struct device * dev)1102*4882a593Smuzhiyun static int rmi_driver_probe(struct device *dev)
1103*4882a593Smuzhiyun {
1104*4882a593Smuzhiyun struct rmi_driver *rmi_driver;
1105*4882a593Smuzhiyun struct rmi_driver_data *data;
1106*4882a593Smuzhiyun struct rmi_device_platform_data *pdata;
1107*4882a593Smuzhiyun struct rmi_device *rmi_dev;
1108*4882a593Smuzhiyun int retval;
1109*4882a593Smuzhiyun
1110*4882a593Smuzhiyun rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Starting probe.\n",
1111*4882a593Smuzhiyun __func__);
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun if (!rmi_is_physical_device(dev)) {
1114*4882a593Smuzhiyun rmi_dbg(RMI_DEBUG_CORE, dev, "Not a physical device.\n");
1115*4882a593Smuzhiyun return -ENODEV;
1116*4882a593Smuzhiyun }
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun rmi_dev = to_rmi_device(dev);
1119*4882a593Smuzhiyun rmi_driver = to_rmi_driver(dev->driver);
1120*4882a593Smuzhiyun rmi_dev->driver = rmi_driver;
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun pdata = rmi_get_platform_data(rmi_dev);
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun if (rmi_dev->xport->dev->of_node) {
1125*4882a593Smuzhiyun retval = rmi_driver_of_probe(rmi_dev->xport->dev, pdata);
1126*4882a593Smuzhiyun if (retval)
1127*4882a593Smuzhiyun return retval;
1128*4882a593Smuzhiyun }
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun data = devm_kzalloc(dev, sizeof(struct rmi_driver_data), GFP_KERNEL);
1131*4882a593Smuzhiyun if (!data)
1132*4882a593Smuzhiyun return -ENOMEM;
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun INIT_LIST_HEAD(&data->function_list);
1135*4882a593Smuzhiyun data->rmi_dev = rmi_dev;
1136*4882a593Smuzhiyun dev_set_drvdata(&rmi_dev->dev, data);
1137*4882a593Smuzhiyun
1138*4882a593Smuzhiyun /*
1139*4882a593Smuzhiyun * Right before a warm boot, the sensor might be in some unusual state,
1140*4882a593Smuzhiyun * such as F54 diagnostics, or F34 bootloader mode after a firmware
1141*4882a593Smuzhiyun * or configuration update. In order to clear the sensor to a known
1142*4882a593Smuzhiyun * state and/or apply any updates, we issue a initial reset to clear any
1143*4882a593Smuzhiyun * previous settings and force it into normal operation.
1144*4882a593Smuzhiyun *
1145*4882a593Smuzhiyun * We have to do this before actually building the PDT because
1146*4882a593Smuzhiyun * the reflash updates (if any) might cause various registers to move
1147*4882a593Smuzhiyun * around.
1148*4882a593Smuzhiyun *
1149*4882a593Smuzhiyun * For a number of reasons, this initial reset may fail to return
1150*4882a593Smuzhiyun * within the specified time, but we'll still be able to bring up the
1151*4882a593Smuzhiyun * driver normally after that failure. This occurs most commonly in
1152*4882a593Smuzhiyun * a cold boot situation (where then firmware takes longer to come up
1153*4882a593Smuzhiyun * than from a warm boot) and the reset_delay_ms in the platform data
1154*4882a593Smuzhiyun * has been set too short to accommodate that. Since the sensor will
1155*4882a593Smuzhiyun * eventually come up and be usable, we don't want to just fail here
1156*4882a593Smuzhiyun * and leave the customer's device unusable. So we warn them, and
1157*4882a593Smuzhiyun * continue processing.
1158*4882a593Smuzhiyun */
1159*4882a593Smuzhiyun retval = rmi_scan_pdt(rmi_dev, NULL, rmi_initial_reset);
1160*4882a593Smuzhiyun if (retval < 0)
1161*4882a593Smuzhiyun dev_warn(dev, "RMI initial reset failed! Continuing in spite of this.\n");
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun retval = rmi_read(rmi_dev, PDT_PROPERTIES_LOCATION, &data->pdt_props);
1164*4882a593Smuzhiyun if (retval < 0) {
1165*4882a593Smuzhiyun /*
1166*4882a593Smuzhiyun * we'll print out a warning and continue since
1167*4882a593Smuzhiyun * failure to get the PDT properties is not a cause to fail
1168*4882a593Smuzhiyun */
1169*4882a593Smuzhiyun dev_warn(dev, "Could not read PDT properties from %#06x (code %d). Assuming 0x00.\n",
1170*4882a593Smuzhiyun PDT_PROPERTIES_LOCATION, retval);
1171*4882a593Smuzhiyun }
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun mutex_init(&data->irq_mutex);
1174*4882a593Smuzhiyun mutex_init(&data->enabled_mutex);
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun retval = rmi_probe_interrupts(data);
1177*4882a593Smuzhiyun if (retval)
1178*4882a593Smuzhiyun goto err;
1179*4882a593Smuzhiyun
1180*4882a593Smuzhiyun if (rmi_dev->xport->input) {
1181*4882a593Smuzhiyun /*
1182*4882a593Smuzhiyun * The transport driver already has an input device.
1183*4882a593Smuzhiyun * In some cases it is preferable to reuse the transport
1184*4882a593Smuzhiyun * devices input device instead of creating a new one here.
1185*4882a593Smuzhiyun * One example is some HID touchpads report "pass-through"
1186*4882a593Smuzhiyun * button events are not reported by rmi registers.
1187*4882a593Smuzhiyun */
1188*4882a593Smuzhiyun data->input = rmi_dev->xport->input;
1189*4882a593Smuzhiyun } else {
1190*4882a593Smuzhiyun data->input = devm_input_allocate_device(dev);
1191*4882a593Smuzhiyun if (!data->input) {
1192*4882a593Smuzhiyun dev_err(dev, "%s: Failed to allocate input device.\n",
1193*4882a593Smuzhiyun __func__);
1194*4882a593Smuzhiyun retval = -ENOMEM;
1195*4882a593Smuzhiyun goto err;
1196*4882a593Smuzhiyun }
1197*4882a593Smuzhiyun rmi_driver_set_input_params(rmi_dev, data->input);
1198*4882a593Smuzhiyun data->input->phys = devm_kasprintf(dev, GFP_KERNEL,
1199*4882a593Smuzhiyun "%s/input0", dev_name(dev));
1200*4882a593Smuzhiyun }
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun retval = rmi_init_functions(data);
1203*4882a593Smuzhiyun if (retval)
1204*4882a593Smuzhiyun goto err;
1205*4882a593Smuzhiyun
1206*4882a593Smuzhiyun retval = rmi_f34_create_sysfs(rmi_dev);
1207*4882a593Smuzhiyun if (retval)
1208*4882a593Smuzhiyun goto err;
1209*4882a593Smuzhiyun
1210*4882a593Smuzhiyun if (data->input) {
1211*4882a593Smuzhiyun rmi_driver_set_input_name(rmi_dev, data->input);
1212*4882a593Smuzhiyun if (!rmi_dev->xport->input) {
1213*4882a593Smuzhiyun retval = input_register_device(data->input);
1214*4882a593Smuzhiyun if (retval) {
1215*4882a593Smuzhiyun dev_err(dev, "%s: Failed to register input device.\n",
1216*4882a593Smuzhiyun __func__);
1217*4882a593Smuzhiyun goto err_destroy_functions;
1218*4882a593Smuzhiyun }
1219*4882a593Smuzhiyun }
1220*4882a593Smuzhiyun }
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun retval = rmi_irq_init(rmi_dev);
1223*4882a593Smuzhiyun if (retval < 0)
1224*4882a593Smuzhiyun goto err_destroy_functions;
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun if (data->f01_container->dev.driver) {
1227*4882a593Smuzhiyun /* Driver already bound, so enable ATTN now. */
1228*4882a593Smuzhiyun retval = rmi_enable_sensor(rmi_dev);
1229*4882a593Smuzhiyun if (retval)
1230*4882a593Smuzhiyun goto err_disable_irq;
1231*4882a593Smuzhiyun }
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun return 0;
1234*4882a593Smuzhiyun
1235*4882a593Smuzhiyun err_disable_irq:
1236*4882a593Smuzhiyun rmi_disable_irq(rmi_dev, false);
1237*4882a593Smuzhiyun err_destroy_functions:
1238*4882a593Smuzhiyun rmi_free_function_list(rmi_dev);
1239*4882a593Smuzhiyun err:
1240*4882a593Smuzhiyun return retval;
1241*4882a593Smuzhiyun }
1242*4882a593Smuzhiyun
1243*4882a593Smuzhiyun static struct rmi_driver rmi_physical_driver = {
1244*4882a593Smuzhiyun .driver = {
1245*4882a593Smuzhiyun .owner = THIS_MODULE,
1246*4882a593Smuzhiyun .name = "rmi4_physical",
1247*4882a593Smuzhiyun .bus = &rmi_bus_type,
1248*4882a593Smuzhiyun .probe = rmi_driver_probe,
1249*4882a593Smuzhiyun .remove = rmi_driver_remove,
1250*4882a593Smuzhiyun },
1251*4882a593Smuzhiyun .reset_handler = rmi_driver_reset_handler,
1252*4882a593Smuzhiyun .clear_irq_bits = rmi_driver_clear_irq_bits,
1253*4882a593Smuzhiyun .set_irq_bits = rmi_driver_set_irq_bits,
1254*4882a593Smuzhiyun .set_input_params = rmi_driver_set_input_params,
1255*4882a593Smuzhiyun };
1256*4882a593Smuzhiyun
rmi_is_physical_driver(struct device_driver * drv)1257*4882a593Smuzhiyun bool rmi_is_physical_driver(struct device_driver *drv)
1258*4882a593Smuzhiyun {
1259*4882a593Smuzhiyun return drv == &rmi_physical_driver.driver;
1260*4882a593Smuzhiyun }
1261*4882a593Smuzhiyun
rmi_register_physical_driver(void)1262*4882a593Smuzhiyun int __init rmi_register_physical_driver(void)
1263*4882a593Smuzhiyun {
1264*4882a593Smuzhiyun int error;
1265*4882a593Smuzhiyun
1266*4882a593Smuzhiyun error = driver_register(&rmi_physical_driver.driver);
1267*4882a593Smuzhiyun if (error) {
1268*4882a593Smuzhiyun pr_err("%s: driver register failed, code=%d.\n", __func__,
1269*4882a593Smuzhiyun error);
1270*4882a593Smuzhiyun return error;
1271*4882a593Smuzhiyun }
1272*4882a593Smuzhiyun
1273*4882a593Smuzhiyun return 0;
1274*4882a593Smuzhiyun }
1275*4882a593Smuzhiyun
rmi_unregister_physical_driver(void)1276*4882a593Smuzhiyun void __exit rmi_unregister_physical_driver(void)
1277*4882a593Smuzhiyun {
1278*4882a593Smuzhiyun driver_unregister(&rmi_physical_driver.driver);
1279*4882a593Smuzhiyun }
1280