1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * HSI core.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2010 Nokia Corporation. All rights reserved.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Contact: Carlos Chinea <carlos.chinea@nokia.com>
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun #include <linux/hsi/hsi.h>
10*4882a593Smuzhiyun #include <linux/compiler.h>
11*4882a593Smuzhiyun #include <linux/list.h>
12*4882a593Smuzhiyun #include <linux/kobject.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include <linux/string.h>
15*4882a593Smuzhiyun #include <linux/notifier.h>
16*4882a593Smuzhiyun #include <linux/of.h>
17*4882a593Smuzhiyun #include <linux/of_device.h>
18*4882a593Smuzhiyun #include "hsi_core.h"
19*4882a593Smuzhiyun
modalias_show(struct device * dev,struct device_attribute * a __maybe_unused,char * buf)20*4882a593Smuzhiyun static ssize_t modalias_show(struct device *dev,
21*4882a593Smuzhiyun struct device_attribute *a __maybe_unused, char *buf)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun return sprintf(buf, "hsi:%s\n", dev_name(dev));
24*4882a593Smuzhiyun }
25*4882a593Smuzhiyun static DEVICE_ATTR_RO(modalias);
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun static struct attribute *hsi_bus_dev_attrs[] = {
28*4882a593Smuzhiyun &dev_attr_modalias.attr,
29*4882a593Smuzhiyun NULL,
30*4882a593Smuzhiyun };
31*4882a593Smuzhiyun ATTRIBUTE_GROUPS(hsi_bus_dev);
32*4882a593Smuzhiyun
hsi_bus_uevent(struct device * dev,struct kobj_uevent_env * env)33*4882a593Smuzhiyun static int hsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun add_uevent_var(env, "MODALIAS=hsi:%s", dev_name(dev));
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun return 0;
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun
hsi_bus_match(struct device * dev,struct device_driver * driver)40*4882a593Smuzhiyun static int hsi_bus_match(struct device *dev, struct device_driver *driver)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun if (of_driver_match_device(dev, driver))
43*4882a593Smuzhiyun return true;
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun if (strcmp(dev_name(dev), driver->name) == 0)
46*4882a593Smuzhiyun return true;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun return false;
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun static struct bus_type hsi_bus_type = {
52*4882a593Smuzhiyun .name = "hsi",
53*4882a593Smuzhiyun .dev_groups = hsi_bus_dev_groups,
54*4882a593Smuzhiyun .match = hsi_bus_match,
55*4882a593Smuzhiyun .uevent = hsi_bus_uevent,
56*4882a593Smuzhiyun };
57*4882a593Smuzhiyun
hsi_client_release(struct device * dev)58*4882a593Smuzhiyun static void hsi_client_release(struct device *dev)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun struct hsi_client *cl = to_hsi_client(dev);
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun kfree(cl->tx_cfg.channels);
63*4882a593Smuzhiyun kfree(cl->rx_cfg.channels);
64*4882a593Smuzhiyun kfree(cl);
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun
hsi_new_client(struct hsi_port * port,struct hsi_board_info * info)67*4882a593Smuzhiyun struct hsi_client *hsi_new_client(struct hsi_port *port,
68*4882a593Smuzhiyun struct hsi_board_info *info)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun struct hsi_client *cl;
71*4882a593Smuzhiyun size_t size;
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun cl = kzalloc(sizeof(*cl), GFP_KERNEL);
74*4882a593Smuzhiyun if (!cl)
75*4882a593Smuzhiyun goto err;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun cl->tx_cfg = info->tx_cfg;
78*4882a593Smuzhiyun if (cl->tx_cfg.channels) {
79*4882a593Smuzhiyun size = cl->tx_cfg.num_channels * sizeof(*cl->tx_cfg.channels);
80*4882a593Smuzhiyun cl->tx_cfg.channels = kmemdup(info->tx_cfg.channels, size,
81*4882a593Smuzhiyun GFP_KERNEL);
82*4882a593Smuzhiyun if (!cl->tx_cfg.channels)
83*4882a593Smuzhiyun goto err_tx;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun cl->rx_cfg = info->rx_cfg;
87*4882a593Smuzhiyun if (cl->rx_cfg.channels) {
88*4882a593Smuzhiyun size = cl->rx_cfg.num_channels * sizeof(*cl->rx_cfg.channels);
89*4882a593Smuzhiyun cl->rx_cfg.channels = kmemdup(info->rx_cfg.channels, size,
90*4882a593Smuzhiyun GFP_KERNEL);
91*4882a593Smuzhiyun if (!cl->rx_cfg.channels)
92*4882a593Smuzhiyun goto err_rx;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun cl->device.bus = &hsi_bus_type;
96*4882a593Smuzhiyun cl->device.parent = &port->device;
97*4882a593Smuzhiyun cl->device.release = hsi_client_release;
98*4882a593Smuzhiyun dev_set_name(&cl->device, "%s", info->name);
99*4882a593Smuzhiyun cl->device.platform_data = info->platform_data;
100*4882a593Smuzhiyun if (info->archdata)
101*4882a593Smuzhiyun cl->device.archdata = *info->archdata;
102*4882a593Smuzhiyun if (device_register(&cl->device) < 0) {
103*4882a593Smuzhiyun pr_err("hsi: failed to register client: %s\n", info->name);
104*4882a593Smuzhiyun put_device(&cl->device);
105*4882a593Smuzhiyun goto err;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun return cl;
109*4882a593Smuzhiyun err_rx:
110*4882a593Smuzhiyun kfree(cl->tx_cfg.channels);
111*4882a593Smuzhiyun err_tx:
112*4882a593Smuzhiyun kfree(cl);
113*4882a593Smuzhiyun err:
114*4882a593Smuzhiyun return NULL;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hsi_new_client);
117*4882a593Smuzhiyun
hsi_scan_board_info(struct hsi_controller * hsi)118*4882a593Smuzhiyun static void hsi_scan_board_info(struct hsi_controller *hsi)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun struct hsi_cl_info *cl_info;
121*4882a593Smuzhiyun struct hsi_port *p;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun list_for_each_entry(cl_info, &hsi_board_list, list)
124*4882a593Smuzhiyun if (cl_info->info.hsi_id == hsi->id) {
125*4882a593Smuzhiyun p = hsi_find_port_num(hsi, cl_info->info.port);
126*4882a593Smuzhiyun if (!p)
127*4882a593Smuzhiyun continue;
128*4882a593Smuzhiyun hsi_new_client(p, &cl_info->info);
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun #ifdef CONFIG_OF
133*4882a593Smuzhiyun static struct hsi_board_info hsi_char_dev_info = {
134*4882a593Smuzhiyun .name = "hsi_char",
135*4882a593Smuzhiyun };
136*4882a593Smuzhiyun
hsi_of_property_parse_mode(struct device_node * client,char * name,unsigned int * result)137*4882a593Smuzhiyun static int hsi_of_property_parse_mode(struct device_node *client, char *name,
138*4882a593Smuzhiyun unsigned int *result)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun const char *mode;
141*4882a593Smuzhiyun int err;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun err = of_property_read_string(client, name, &mode);
144*4882a593Smuzhiyun if (err < 0)
145*4882a593Smuzhiyun return err;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun if (strcmp(mode, "stream") == 0)
148*4882a593Smuzhiyun *result = HSI_MODE_STREAM;
149*4882a593Smuzhiyun else if (strcmp(mode, "frame") == 0)
150*4882a593Smuzhiyun *result = HSI_MODE_FRAME;
151*4882a593Smuzhiyun else
152*4882a593Smuzhiyun return -EINVAL;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun return 0;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
hsi_of_property_parse_flow(struct device_node * client,char * name,unsigned int * result)157*4882a593Smuzhiyun static int hsi_of_property_parse_flow(struct device_node *client, char *name,
158*4882a593Smuzhiyun unsigned int *result)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun const char *flow;
161*4882a593Smuzhiyun int err;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun err = of_property_read_string(client, name, &flow);
164*4882a593Smuzhiyun if (err < 0)
165*4882a593Smuzhiyun return err;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun if (strcmp(flow, "synchronized") == 0)
168*4882a593Smuzhiyun *result = HSI_FLOW_SYNC;
169*4882a593Smuzhiyun else if (strcmp(flow, "pipeline") == 0)
170*4882a593Smuzhiyun *result = HSI_FLOW_PIPE;
171*4882a593Smuzhiyun else
172*4882a593Smuzhiyun return -EINVAL;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun return 0;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
hsi_of_property_parse_arb_mode(struct device_node * client,char * name,unsigned int * result)177*4882a593Smuzhiyun static int hsi_of_property_parse_arb_mode(struct device_node *client,
178*4882a593Smuzhiyun char *name, unsigned int *result)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun const char *arb_mode;
181*4882a593Smuzhiyun int err;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun err = of_property_read_string(client, name, &arb_mode);
184*4882a593Smuzhiyun if (err < 0)
185*4882a593Smuzhiyun return err;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun if (strcmp(arb_mode, "round-robin") == 0)
188*4882a593Smuzhiyun *result = HSI_ARB_RR;
189*4882a593Smuzhiyun else if (strcmp(arb_mode, "priority") == 0)
190*4882a593Smuzhiyun *result = HSI_ARB_PRIO;
191*4882a593Smuzhiyun else
192*4882a593Smuzhiyun return -EINVAL;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun return 0;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
hsi_add_client_from_dt(struct hsi_port * port,struct device_node * client)197*4882a593Smuzhiyun static void hsi_add_client_from_dt(struct hsi_port *port,
198*4882a593Smuzhiyun struct device_node *client)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun struct hsi_client *cl;
201*4882a593Smuzhiyun struct hsi_channel channel;
202*4882a593Smuzhiyun struct property *prop;
203*4882a593Smuzhiyun char name[32];
204*4882a593Smuzhiyun int length, cells, err, i, max_chan, mode;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun cl = kzalloc(sizeof(*cl), GFP_KERNEL);
207*4882a593Smuzhiyun if (!cl)
208*4882a593Smuzhiyun return;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun err = of_modalias_node(client, name, sizeof(name));
211*4882a593Smuzhiyun if (err)
212*4882a593Smuzhiyun goto err;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun err = hsi_of_property_parse_mode(client, "hsi-mode", &mode);
215*4882a593Smuzhiyun if (err) {
216*4882a593Smuzhiyun err = hsi_of_property_parse_mode(client, "hsi-rx-mode",
217*4882a593Smuzhiyun &cl->rx_cfg.mode);
218*4882a593Smuzhiyun if (err)
219*4882a593Smuzhiyun goto err;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun err = hsi_of_property_parse_mode(client, "hsi-tx-mode",
222*4882a593Smuzhiyun &cl->tx_cfg.mode);
223*4882a593Smuzhiyun if (err)
224*4882a593Smuzhiyun goto err;
225*4882a593Smuzhiyun } else {
226*4882a593Smuzhiyun cl->rx_cfg.mode = mode;
227*4882a593Smuzhiyun cl->tx_cfg.mode = mode;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun err = of_property_read_u32(client, "hsi-speed-kbps",
231*4882a593Smuzhiyun &cl->tx_cfg.speed);
232*4882a593Smuzhiyun if (err)
233*4882a593Smuzhiyun goto err;
234*4882a593Smuzhiyun cl->rx_cfg.speed = cl->tx_cfg.speed;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun err = hsi_of_property_parse_flow(client, "hsi-flow",
237*4882a593Smuzhiyun &cl->rx_cfg.flow);
238*4882a593Smuzhiyun if (err)
239*4882a593Smuzhiyun goto err;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun err = hsi_of_property_parse_arb_mode(client, "hsi-arb-mode",
242*4882a593Smuzhiyun &cl->rx_cfg.arb_mode);
243*4882a593Smuzhiyun if (err)
244*4882a593Smuzhiyun goto err;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun prop = of_find_property(client, "hsi-channel-ids", &length);
247*4882a593Smuzhiyun if (!prop) {
248*4882a593Smuzhiyun err = -EINVAL;
249*4882a593Smuzhiyun goto err;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun cells = length / sizeof(u32);
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun cl->rx_cfg.num_channels = cells;
255*4882a593Smuzhiyun cl->tx_cfg.num_channels = cells;
256*4882a593Smuzhiyun cl->rx_cfg.channels = kcalloc(cells, sizeof(channel), GFP_KERNEL);
257*4882a593Smuzhiyun if (!cl->rx_cfg.channels) {
258*4882a593Smuzhiyun err = -ENOMEM;
259*4882a593Smuzhiyun goto err;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun cl->tx_cfg.channels = kcalloc(cells, sizeof(channel), GFP_KERNEL);
263*4882a593Smuzhiyun if (!cl->tx_cfg.channels) {
264*4882a593Smuzhiyun err = -ENOMEM;
265*4882a593Smuzhiyun goto err2;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun max_chan = 0;
269*4882a593Smuzhiyun for (i = 0; i < cells; i++) {
270*4882a593Smuzhiyun err = of_property_read_u32_index(client, "hsi-channel-ids", i,
271*4882a593Smuzhiyun &channel.id);
272*4882a593Smuzhiyun if (err)
273*4882a593Smuzhiyun goto err3;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun err = of_property_read_string_index(client, "hsi-channel-names",
276*4882a593Smuzhiyun i, &channel.name);
277*4882a593Smuzhiyun if (err)
278*4882a593Smuzhiyun channel.name = NULL;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun if (channel.id > max_chan)
281*4882a593Smuzhiyun max_chan = channel.id;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun cl->rx_cfg.channels[i] = channel;
284*4882a593Smuzhiyun cl->tx_cfg.channels[i] = channel;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun cl->rx_cfg.num_hw_channels = max_chan + 1;
288*4882a593Smuzhiyun cl->tx_cfg.num_hw_channels = max_chan + 1;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun cl->device.bus = &hsi_bus_type;
291*4882a593Smuzhiyun cl->device.parent = &port->device;
292*4882a593Smuzhiyun cl->device.release = hsi_client_release;
293*4882a593Smuzhiyun cl->device.of_node = client;
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun dev_set_name(&cl->device, "%s", name);
296*4882a593Smuzhiyun if (device_register(&cl->device) < 0) {
297*4882a593Smuzhiyun pr_err("hsi: failed to register client: %s\n", name);
298*4882a593Smuzhiyun put_device(&cl->device);
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun return;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun err3:
304*4882a593Smuzhiyun kfree(cl->tx_cfg.channels);
305*4882a593Smuzhiyun err2:
306*4882a593Smuzhiyun kfree(cl->rx_cfg.channels);
307*4882a593Smuzhiyun err:
308*4882a593Smuzhiyun kfree(cl);
309*4882a593Smuzhiyun pr_err("hsi client: missing or incorrect of property: err=%d\n", err);
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
hsi_add_clients_from_dt(struct hsi_port * port,struct device_node * clients)312*4882a593Smuzhiyun void hsi_add_clients_from_dt(struct hsi_port *port, struct device_node *clients)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun struct device_node *child;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun /* register hsi-char device */
317*4882a593Smuzhiyun hsi_new_client(port, &hsi_char_dev_info);
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun for_each_available_child_of_node(clients, child)
320*4882a593Smuzhiyun hsi_add_client_from_dt(port, child);
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hsi_add_clients_from_dt);
323*4882a593Smuzhiyun #endif
324*4882a593Smuzhiyun
hsi_remove_client(struct device * dev,void * data __maybe_unused)325*4882a593Smuzhiyun int hsi_remove_client(struct device *dev, void *data __maybe_unused)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun device_unregister(dev);
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun return 0;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hsi_remove_client);
332*4882a593Smuzhiyun
hsi_remove_port(struct device * dev,void * data __maybe_unused)333*4882a593Smuzhiyun static int hsi_remove_port(struct device *dev, void *data __maybe_unused)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun device_for_each_child(dev, NULL, hsi_remove_client);
336*4882a593Smuzhiyun device_unregister(dev);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun return 0;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
hsi_controller_release(struct device * dev)341*4882a593Smuzhiyun static void hsi_controller_release(struct device *dev)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun struct hsi_controller *hsi = to_hsi_controller(dev);
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun kfree(hsi->port);
346*4882a593Smuzhiyun kfree(hsi);
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
hsi_port_release(struct device * dev)349*4882a593Smuzhiyun static void hsi_port_release(struct device *dev)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun kfree(to_hsi_port(dev));
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun /**
355*4882a593Smuzhiyun * hsi_unregister_port - Unregister an HSI port
356*4882a593Smuzhiyun * @port: The HSI port to unregister
357*4882a593Smuzhiyun */
hsi_port_unregister_clients(struct hsi_port * port)358*4882a593Smuzhiyun void hsi_port_unregister_clients(struct hsi_port *port)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun device_for_each_child(&port->device, NULL, hsi_remove_client);
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hsi_port_unregister_clients);
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun /**
365*4882a593Smuzhiyun * hsi_unregister_controller - Unregister an HSI controller
366*4882a593Smuzhiyun * @hsi: The HSI controller to register
367*4882a593Smuzhiyun */
hsi_unregister_controller(struct hsi_controller * hsi)368*4882a593Smuzhiyun void hsi_unregister_controller(struct hsi_controller *hsi)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun device_for_each_child(&hsi->device, NULL, hsi_remove_port);
371*4882a593Smuzhiyun device_unregister(&hsi->device);
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hsi_unregister_controller);
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun /**
376*4882a593Smuzhiyun * hsi_register_controller - Register an HSI controller and its ports
377*4882a593Smuzhiyun * @hsi: The HSI controller to register
378*4882a593Smuzhiyun *
379*4882a593Smuzhiyun * Returns -errno on failure, 0 on success.
380*4882a593Smuzhiyun */
hsi_register_controller(struct hsi_controller * hsi)381*4882a593Smuzhiyun int hsi_register_controller(struct hsi_controller *hsi)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun unsigned int i;
384*4882a593Smuzhiyun int err;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun err = device_add(&hsi->device);
387*4882a593Smuzhiyun if (err < 0)
388*4882a593Smuzhiyun return err;
389*4882a593Smuzhiyun for (i = 0; i < hsi->num_ports; i++) {
390*4882a593Smuzhiyun hsi->port[i]->device.parent = &hsi->device;
391*4882a593Smuzhiyun err = device_add(&hsi->port[i]->device);
392*4882a593Smuzhiyun if (err < 0)
393*4882a593Smuzhiyun goto out;
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun /* Populate HSI bus with HSI clients */
396*4882a593Smuzhiyun hsi_scan_board_info(hsi);
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun return 0;
399*4882a593Smuzhiyun out:
400*4882a593Smuzhiyun while (i-- > 0)
401*4882a593Smuzhiyun device_del(&hsi->port[i]->device);
402*4882a593Smuzhiyun device_del(&hsi->device);
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun return err;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hsi_register_controller);
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun /**
409*4882a593Smuzhiyun * hsi_register_client_driver - Register an HSI client to the HSI bus
410*4882a593Smuzhiyun * @drv: HSI client driver to register
411*4882a593Smuzhiyun *
412*4882a593Smuzhiyun * Returns -errno on failure, 0 on success.
413*4882a593Smuzhiyun */
hsi_register_client_driver(struct hsi_client_driver * drv)414*4882a593Smuzhiyun int hsi_register_client_driver(struct hsi_client_driver *drv)
415*4882a593Smuzhiyun {
416*4882a593Smuzhiyun drv->driver.bus = &hsi_bus_type;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun return driver_register(&drv->driver);
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hsi_register_client_driver);
421*4882a593Smuzhiyun
hsi_dummy_msg(struct hsi_msg * msg __maybe_unused)422*4882a593Smuzhiyun static inline int hsi_dummy_msg(struct hsi_msg *msg __maybe_unused)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun return 0;
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun
hsi_dummy_cl(struct hsi_client * cl __maybe_unused)427*4882a593Smuzhiyun static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun return 0;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun /**
433*4882a593Smuzhiyun * hsi_put_controller - Free an HSI controller
434*4882a593Smuzhiyun *
435*4882a593Smuzhiyun * @hsi: Pointer to the HSI controller to freed
436*4882a593Smuzhiyun *
437*4882a593Smuzhiyun * HSI controller drivers should only use this function if they need
438*4882a593Smuzhiyun * to free their allocated hsi_controller structures before a successful
439*4882a593Smuzhiyun * call to hsi_register_controller. Other use is not allowed.
440*4882a593Smuzhiyun */
hsi_put_controller(struct hsi_controller * hsi)441*4882a593Smuzhiyun void hsi_put_controller(struct hsi_controller *hsi)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun unsigned int i;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun if (!hsi)
446*4882a593Smuzhiyun return;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun for (i = 0; i < hsi->num_ports; i++)
449*4882a593Smuzhiyun if (hsi->port && hsi->port[i])
450*4882a593Smuzhiyun put_device(&hsi->port[i]->device);
451*4882a593Smuzhiyun put_device(&hsi->device);
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hsi_put_controller);
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun /**
456*4882a593Smuzhiyun * hsi_alloc_controller - Allocate an HSI controller and its ports
457*4882a593Smuzhiyun * @n_ports: Number of ports on the HSI controller
458*4882a593Smuzhiyun * @flags: Kernel allocation flags
459*4882a593Smuzhiyun *
460*4882a593Smuzhiyun * Return NULL on failure or a pointer to an hsi_controller on success.
461*4882a593Smuzhiyun */
hsi_alloc_controller(unsigned int n_ports,gfp_t flags)462*4882a593Smuzhiyun struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags)
463*4882a593Smuzhiyun {
464*4882a593Smuzhiyun struct hsi_controller *hsi;
465*4882a593Smuzhiyun struct hsi_port **port;
466*4882a593Smuzhiyun unsigned int i;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun if (!n_ports)
469*4882a593Smuzhiyun return NULL;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun hsi = kzalloc(sizeof(*hsi), flags);
472*4882a593Smuzhiyun if (!hsi)
473*4882a593Smuzhiyun return NULL;
474*4882a593Smuzhiyun port = kcalloc(n_ports, sizeof(*port), flags);
475*4882a593Smuzhiyun if (!port) {
476*4882a593Smuzhiyun kfree(hsi);
477*4882a593Smuzhiyun return NULL;
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun hsi->num_ports = n_ports;
480*4882a593Smuzhiyun hsi->port = port;
481*4882a593Smuzhiyun hsi->device.release = hsi_controller_release;
482*4882a593Smuzhiyun device_initialize(&hsi->device);
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun for (i = 0; i < n_ports; i++) {
485*4882a593Smuzhiyun port[i] = kzalloc(sizeof(**port), flags);
486*4882a593Smuzhiyun if (port[i] == NULL)
487*4882a593Smuzhiyun goto out;
488*4882a593Smuzhiyun port[i]->num = i;
489*4882a593Smuzhiyun port[i]->async = hsi_dummy_msg;
490*4882a593Smuzhiyun port[i]->setup = hsi_dummy_cl;
491*4882a593Smuzhiyun port[i]->flush = hsi_dummy_cl;
492*4882a593Smuzhiyun port[i]->start_tx = hsi_dummy_cl;
493*4882a593Smuzhiyun port[i]->stop_tx = hsi_dummy_cl;
494*4882a593Smuzhiyun port[i]->release = hsi_dummy_cl;
495*4882a593Smuzhiyun mutex_init(&port[i]->lock);
496*4882a593Smuzhiyun BLOCKING_INIT_NOTIFIER_HEAD(&port[i]->n_head);
497*4882a593Smuzhiyun dev_set_name(&port[i]->device, "port%d", i);
498*4882a593Smuzhiyun hsi->port[i]->device.release = hsi_port_release;
499*4882a593Smuzhiyun device_initialize(&hsi->port[i]->device);
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun return hsi;
503*4882a593Smuzhiyun out:
504*4882a593Smuzhiyun hsi_put_controller(hsi);
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun return NULL;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hsi_alloc_controller);
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun /**
511*4882a593Smuzhiyun * hsi_free_msg - Free an HSI message
512*4882a593Smuzhiyun * @msg: Pointer to the HSI message
513*4882a593Smuzhiyun *
514*4882a593Smuzhiyun * Client is responsible to free the buffers pointed by the scatterlists.
515*4882a593Smuzhiyun */
hsi_free_msg(struct hsi_msg * msg)516*4882a593Smuzhiyun void hsi_free_msg(struct hsi_msg *msg)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun if (!msg)
519*4882a593Smuzhiyun return;
520*4882a593Smuzhiyun sg_free_table(&msg->sgt);
521*4882a593Smuzhiyun kfree(msg);
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hsi_free_msg);
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun /**
526*4882a593Smuzhiyun * hsi_alloc_msg - Allocate an HSI message
527*4882a593Smuzhiyun * @nents: Number of memory entries
528*4882a593Smuzhiyun * @flags: Kernel allocation flags
529*4882a593Smuzhiyun *
530*4882a593Smuzhiyun * nents can be 0. This mainly makes sense for read transfer.
531*4882a593Smuzhiyun * In that case, HSI drivers will call the complete callback when
532*4882a593Smuzhiyun * there is data to be read without consuming it.
533*4882a593Smuzhiyun *
534*4882a593Smuzhiyun * Return NULL on failure or a pointer to an hsi_msg on success.
535*4882a593Smuzhiyun */
hsi_alloc_msg(unsigned int nents,gfp_t flags)536*4882a593Smuzhiyun struct hsi_msg *hsi_alloc_msg(unsigned int nents, gfp_t flags)
537*4882a593Smuzhiyun {
538*4882a593Smuzhiyun struct hsi_msg *msg;
539*4882a593Smuzhiyun int err;
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun msg = kzalloc(sizeof(*msg), flags);
542*4882a593Smuzhiyun if (!msg)
543*4882a593Smuzhiyun return NULL;
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun if (!nents)
546*4882a593Smuzhiyun return msg;
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun err = sg_alloc_table(&msg->sgt, nents, flags);
549*4882a593Smuzhiyun if (unlikely(err)) {
550*4882a593Smuzhiyun kfree(msg);
551*4882a593Smuzhiyun msg = NULL;
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun return msg;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hsi_alloc_msg);
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun /**
559*4882a593Smuzhiyun * hsi_async - Submit an HSI transfer to the controller
560*4882a593Smuzhiyun * @cl: HSI client sending the transfer
561*4882a593Smuzhiyun * @msg: The HSI transfer passed to controller
562*4882a593Smuzhiyun *
563*4882a593Smuzhiyun * The HSI message must have the channel, ttype, complete and destructor
564*4882a593Smuzhiyun * fields set beforehand. If nents > 0 then the client has to initialize
565*4882a593Smuzhiyun * also the scatterlists to point to the buffers to write to or read from.
566*4882a593Smuzhiyun *
567*4882a593Smuzhiyun * HSI controllers relay on pre-allocated buffers from their clients and they
568*4882a593Smuzhiyun * do not allocate buffers on their own.
569*4882a593Smuzhiyun *
570*4882a593Smuzhiyun * Once the HSI message transfer finishes, the HSI controller calls the
571*4882a593Smuzhiyun * complete callback with the status and actual_len fields of the HSI message
572*4882a593Smuzhiyun * updated. The complete callback can be called before returning from
573*4882a593Smuzhiyun * hsi_async.
574*4882a593Smuzhiyun *
575*4882a593Smuzhiyun * Returns -errno on failure or 0 on success
576*4882a593Smuzhiyun */
hsi_async(struct hsi_client * cl,struct hsi_msg * msg)577*4882a593Smuzhiyun int hsi_async(struct hsi_client *cl, struct hsi_msg *msg)
578*4882a593Smuzhiyun {
579*4882a593Smuzhiyun struct hsi_port *port = hsi_get_port(cl);
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun if (!hsi_port_claimed(cl))
582*4882a593Smuzhiyun return -EACCES;
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun WARN_ON_ONCE(!msg->destructor || !msg->complete);
585*4882a593Smuzhiyun msg->cl = cl;
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun return port->async(msg);
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hsi_async);
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun /**
592*4882a593Smuzhiyun * hsi_claim_port - Claim the HSI client's port
593*4882a593Smuzhiyun * @cl: HSI client that wants to claim its port
594*4882a593Smuzhiyun * @share: Flag to indicate if the client wants to share the port or not.
595*4882a593Smuzhiyun *
596*4882a593Smuzhiyun * Returns -errno on failure, 0 on success.
597*4882a593Smuzhiyun */
hsi_claim_port(struct hsi_client * cl,unsigned int share)598*4882a593Smuzhiyun int hsi_claim_port(struct hsi_client *cl, unsigned int share)
599*4882a593Smuzhiyun {
600*4882a593Smuzhiyun struct hsi_port *port = hsi_get_port(cl);
601*4882a593Smuzhiyun int err = 0;
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun mutex_lock(&port->lock);
604*4882a593Smuzhiyun if ((port->claimed) && (!port->shared || !share)) {
605*4882a593Smuzhiyun err = -EBUSY;
606*4882a593Smuzhiyun goto out;
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun if (!try_module_get(to_hsi_controller(port->device.parent)->owner)) {
609*4882a593Smuzhiyun err = -ENODEV;
610*4882a593Smuzhiyun goto out;
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun port->claimed++;
613*4882a593Smuzhiyun port->shared = !!share;
614*4882a593Smuzhiyun cl->pclaimed = 1;
615*4882a593Smuzhiyun out:
616*4882a593Smuzhiyun mutex_unlock(&port->lock);
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun return err;
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hsi_claim_port);
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun /**
623*4882a593Smuzhiyun * hsi_release_port - Release the HSI client's port
624*4882a593Smuzhiyun * @cl: HSI client which previously claimed its port
625*4882a593Smuzhiyun */
hsi_release_port(struct hsi_client * cl)626*4882a593Smuzhiyun void hsi_release_port(struct hsi_client *cl)
627*4882a593Smuzhiyun {
628*4882a593Smuzhiyun struct hsi_port *port = hsi_get_port(cl);
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun mutex_lock(&port->lock);
631*4882a593Smuzhiyun /* Allow HW driver to do some cleanup */
632*4882a593Smuzhiyun port->release(cl);
633*4882a593Smuzhiyun if (cl->pclaimed)
634*4882a593Smuzhiyun port->claimed--;
635*4882a593Smuzhiyun BUG_ON(port->claimed < 0);
636*4882a593Smuzhiyun cl->pclaimed = 0;
637*4882a593Smuzhiyun if (!port->claimed)
638*4882a593Smuzhiyun port->shared = 0;
639*4882a593Smuzhiyun module_put(to_hsi_controller(port->device.parent)->owner);
640*4882a593Smuzhiyun mutex_unlock(&port->lock);
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hsi_release_port);
643*4882a593Smuzhiyun
hsi_event_notifier_call(struct notifier_block * nb,unsigned long event,void * data __maybe_unused)644*4882a593Smuzhiyun static int hsi_event_notifier_call(struct notifier_block *nb,
645*4882a593Smuzhiyun unsigned long event, void *data __maybe_unused)
646*4882a593Smuzhiyun {
647*4882a593Smuzhiyun struct hsi_client *cl = container_of(nb, struct hsi_client, nb);
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun (*cl->ehandler)(cl, event);
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun return 0;
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun /**
655*4882a593Smuzhiyun * hsi_register_port_event - Register a client to receive port events
656*4882a593Smuzhiyun * @cl: HSI client that wants to receive port events
657*4882a593Smuzhiyun * @handler: Event handler callback
658*4882a593Smuzhiyun *
659*4882a593Smuzhiyun * Clients should register a callback to be able to receive
660*4882a593Smuzhiyun * events from the ports. Registration should happen after
661*4882a593Smuzhiyun * claiming the port.
662*4882a593Smuzhiyun * The handler can be called in interrupt context.
663*4882a593Smuzhiyun *
664*4882a593Smuzhiyun * Returns -errno on error, or 0 on success.
665*4882a593Smuzhiyun */
hsi_register_port_event(struct hsi_client * cl,void (* handler)(struct hsi_client *,unsigned long))666*4882a593Smuzhiyun int hsi_register_port_event(struct hsi_client *cl,
667*4882a593Smuzhiyun void (*handler)(struct hsi_client *, unsigned long))
668*4882a593Smuzhiyun {
669*4882a593Smuzhiyun struct hsi_port *port = hsi_get_port(cl);
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun if (!handler || cl->ehandler)
672*4882a593Smuzhiyun return -EINVAL;
673*4882a593Smuzhiyun if (!hsi_port_claimed(cl))
674*4882a593Smuzhiyun return -EACCES;
675*4882a593Smuzhiyun cl->ehandler = handler;
676*4882a593Smuzhiyun cl->nb.notifier_call = hsi_event_notifier_call;
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun return blocking_notifier_chain_register(&port->n_head, &cl->nb);
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hsi_register_port_event);
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun /**
683*4882a593Smuzhiyun * hsi_unregister_port_event - Stop receiving port events for a client
684*4882a593Smuzhiyun * @cl: HSI client that wants to stop receiving port events
685*4882a593Smuzhiyun *
686*4882a593Smuzhiyun * Clients should call this function before releasing their associated
687*4882a593Smuzhiyun * port.
688*4882a593Smuzhiyun *
689*4882a593Smuzhiyun * Returns -errno on error, or 0 on success.
690*4882a593Smuzhiyun */
hsi_unregister_port_event(struct hsi_client * cl)691*4882a593Smuzhiyun int hsi_unregister_port_event(struct hsi_client *cl)
692*4882a593Smuzhiyun {
693*4882a593Smuzhiyun struct hsi_port *port = hsi_get_port(cl);
694*4882a593Smuzhiyun int err;
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun WARN_ON(!hsi_port_claimed(cl));
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun err = blocking_notifier_chain_unregister(&port->n_head, &cl->nb);
699*4882a593Smuzhiyun if (!err)
700*4882a593Smuzhiyun cl->ehandler = NULL;
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun return err;
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hsi_unregister_port_event);
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun /**
707*4882a593Smuzhiyun * hsi_event - Notifies clients about port events
708*4882a593Smuzhiyun * @port: Port where the event occurred
709*4882a593Smuzhiyun * @event: The event type
710*4882a593Smuzhiyun *
711*4882a593Smuzhiyun * Clients should not be concerned about wake line behavior. However, due
712*4882a593Smuzhiyun * to a race condition in HSI HW protocol, clients need to be notified
713*4882a593Smuzhiyun * about wake line changes, so they can implement a workaround for it.
714*4882a593Smuzhiyun *
715*4882a593Smuzhiyun * Events:
716*4882a593Smuzhiyun * HSI_EVENT_START_RX - Incoming wake line high
717*4882a593Smuzhiyun * HSI_EVENT_STOP_RX - Incoming wake line down
718*4882a593Smuzhiyun *
719*4882a593Smuzhiyun * Returns -errno on error, or 0 on success.
720*4882a593Smuzhiyun */
hsi_event(struct hsi_port * port,unsigned long event)721*4882a593Smuzhiyun int hsi_event(struct hsi_port *port, unsigned long event)
722*4882a593Smuzhiyun {
723*4882a593Smuzhiyun return blocking_notifier_call_chain(&port->n_head, event, NULL);
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hsi_event);
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun /**
728*4882a593Smuzhiyun * hsi_get_channel_id_by_name - acquire channel id by channel name
729*4882a593Smuzhiyun * @cl: HSI client, which uses the channel
730*4882a593Smuzhiyun * @name: name the channel is known under
731*4882a593Smuzhiyun *
732*4882a593Smuzhiyun * Clients can call this function to get the hsi channel ids similar to
733*4882a593Smuzhiyun * requesting IRQs or GPIOs by name. This function assumes the same
734*4882a593Smuzhiyun * channel configuration is used for RX and TX.
735*4882a593Smuzhiyun *
736*4882a593Smuzhiyun * Returns -errno on error or channel id on success.
737*4882a593Smuzhiyun */
hsi_get_channel_id_by_name(struct hsi_client * cl,char * name)738*4882a593Smuzhiyun int hsi_get_channel_id_by_name(struct hsi_client *cl, char *name)
739*4882a593Smuzhiyun {
740*4882a593Smuzhiyun int i;
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun if (!cl->rx_cfg.channels)
743*4882a593Smuzhiyun return -ENOENT;
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun for (i = 0; i < cl->rx_cfg.num_channels; i++)
746*4882a593Smuzhiyun if (!strcmp(cl->rx_cfg.channels[i].name, name))
747*4882a593Smuzhiyun return cl->rx_cfg.channels[i].id;
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun return -ENXIO;
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hsi_get_channel_id_by_name);
752*4882a593Smuzhiyun
hsi_init(void)753*4882a593Smuzhiyun static int __init hsi_init(void)
754*4882a593Smuzhiyun {
755*4882a593Smuzhiyun return bus_register(&hsi_bus_type);
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun postcore_initcall(hsi_init);
758*4882a593Smuzhiyun
hsi_exit(void)759*4882a593Smuzhiyun static void __exit hsi_exit(void)
760*4882a593Smuzhiyun {
761*4882a593Smuzhiyun bus_unregister(&hsi_bus_type);
762*4882a593Smuzhiyun }
763*4882a593Smuzhiyun module_exit(hsi_exit);
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>");
766*4882a593Smuzhiyun MODULE_DESCRIPTION("High-speed Synchronous Serial Interface (HSI) framework");
767*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
768