1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Greybus Host Device
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright 2014-2015 Google Inc.
6*4882a593Smuzhiyun * Copyright 2014-2015 Linaro Ltd.
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/kernel.h>
10*4882a593Smuzhiyun #include <linux/slab.h>
11*4882a593Smuzhiyun #include <linux/greybus.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include "greybus_trace.h"
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_create);
16*4882a593Smuzhiyun EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_release);
17*4882a593Smuzhiyun EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_add);
18*4882a593Smuzhiyun EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_del);
19*4882a593Smuzhiyun EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_in);
20*4882a593Smuzhiyun EXPORT_TRACEPOINT_SYMBOL_GPL(gb_message_submit);
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun static struct ida gb_hd_bus_id_map;
23*4882a593Smuzhiyun
gb_hd_output(struct gb_host_device * hd,void * req,u16 size,u8 cmd,bool async)24*4882a593Smuzhiyun int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
25*4882a593Smuzhiyun bool async)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun if (!hd || !hd->driver || !hd->driver->output)
28*4882a593Smuzhiyun return -EINVAL;
29*4882a593Smuzhiyun return hd->driver->output(hd, req, size, cmd, async);
30*4882a593Smuzhiyun }
31*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(gb_hd_output);
32*4882a593Smuzhiyun
bus_id_show(struct device * dev,struct device_attribute * attr,char * buf)33*4882a593Smuzhiyun static ssize_t bus_id_show(struct device *dev,
34*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun struct gb_host_device *hd = to_gb_host_device(dev);
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun return sprintf(buf, "%d\n", hd->bus_id);
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun static DEVICE_ATTR_RO(bus_id);
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun static struct attribute *bus_attrs[] = {
43*4882a593Smuzhiyun &dev_attr_bus_id.attr,
44*4882a593Smuzhiyun NULL
45*4882a593Smuzhiyun };
46*4882a593Smuzhiyun ATTRIBUTE_GROUPS(bus);
47*4882a593Smuzhiyun
gb_hd_cport_reserve(struct gb_host_device * hd,u16 cport_id)48*4882a593Smuzhiyun int gb_hd_cport_reserve(struct gb_host_device *hd, u16 cport_id)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun struct ida *id_map = &hd->cport_id_map;
51*4882a593Smuzhiyun int ret;
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun ret = ida_simple_get(id_map, cport_id, cport_id + 1, GFP_KERNEL);
54*4882a593Smuzhiyun if (ret < 0) {
55*4882a593Smuzhiyun dev_err(&hd->dev, "failed to reserve cport %u\n", cport_id);
56*4882a593Smuzhiyun return ret;
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun return 0;
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(gb_hd_cport_reserve);
62*4882a593Smuzhiyun
gb_hd_cport_release_reserved(struct gb_host_device * hd,u16 cport_id)63*4882a593Smuzhiyun void gb_hd_cport_release_reserved(struct gb_host_device *hd, u16 cport_id)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun struct ida *id_map = &hd->cport_id_map;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun ida_simple_remove(id_map, cport_id);
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(gb_hd_cport_release_reserved);
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun /* Locking: Caller guarantees serialisation */
gb_hd_cport_allocate(struct gb_host_device * hd,int cport_id,unsigned long flags)72*4882a593Smuzhiyun int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id,
73*4882a593Smuzhiyun unsigned long flags)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun struct ida *id_map = &hd->cport_id_map;
76*4882a593Smuzhiyun int ida_start, ida_end;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun if (hd->driver->cport_allocate)
79*4882a593Smuzhiyun return hd->driver->cport_allocate(hd, cport_id, flags);
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun if (cport_id < 0) {
82*4882a593Smuzhiyun ida_start = 0;
83*4882a593Smuzhiyun ida_end = hd->num_cports;
84*4882a593Smuzhiyun } else if (cport_id < hd->num_cports) {
85*4882a593Smuzhiyun ida_start = cport_id;
86*4882a593Smuzhiyun ida_end = cport_id + 1;
87*4882a593Smuzhiyun } else {
88*4882a593Smuzhiyun dev_err(&hd->dev, "cport %d not available\n", cport_id);
89*4882a593Smuzhiyun return -EINVAL;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun return ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun /* Locking: Caller guarantees serialisation */
gb_hd_cport_release(struct gb_host_device * hd,u16 cport_id)96*4882a593Smuzhiyun void gb_hd_cport_release(struct gb_host_device *hd, u16 cport_id)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun if (hd->driver->cport_release) {
99*4882a593Smuzhiyun hd->driver->cport_release(hd, cport_id);
100*4882a593Smuzhiyun return;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun ida_simple_remove(&hd->cport_id_map, cport_id);
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
gb_hd_release(struct device * dev)106*4882a593Smuzhiyun static void gb_hd_release(struct device *dev)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun struct gb_host_device *hd = to_gb_host_device(dev);
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun trace_gb_hd_release(hd);
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun if (hd->svc)
113*4882a593Smuzhiyun gb_svc_put(hd->svc);
114*4882a593Smuzhiyun ida_simple_remove(&gb_hd_bus_id_map, hd->bus_id);
115*4882a593Smuzhiyun ida_destroy(&hd->cport_id_map);
116*4882a593Smuzhiyun kfree(hd);
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun struct device_type greybus_hd_type = {
120*4882a593Smuzhiyun .name = "greybus_host_device",
121*4882a593Smuzhiyun .release = gb_hd_release,
122*4882a593Smuzhiyun };
123*4882a593Smuzhiyun
gb_hd_create(struct gb_hd_driver * driver,struct device * parent,size_t buffer_size_max,size_t num_cports)124*4882a593Smuzhiyun struct gb_host_device *gb_hd_create(struct gb_hd_driver *driver,
125*4882a593Smuzhiyun struct device *parent,
126*4882a593Smuzhiyun size_t buffer_size_max,
127*4882a593Smuzhiyun size_t num_cports)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun struct gb_host_device *hd;
130*4882a593Smuzhiyun int ret;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /*
133*4882a593Smuzhiyun * Validate that the driver implements all of the callbacks
134*4882a593Smuzhiyun * so that we don't have to every time we make them.
135*4882a593Smuzhiyun */
136*4882a593Smuzhiyun if ((!driver->message_send) || (!driver->message_cancel)) {
137*4882a593Smuzhiyun dev_err(parent, "mandatory hd-callbacks missing\n");
138*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun if (buffer_size_max < GB_OPERATION_MESSAGE_SIZE_MIN) {
142*4882a593Smuzhiyun dev_err(parent, "greybus host-device buffers too small\n");
143*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun if (num_cports == 0 || num_cports > CPORT_ID_MAX + 1) {
147*4882a593Smuzhiyun dev_err(parent, "Invalid number of CPorts: %zu\n", num_cports);
148*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun /*
152*4882a593Smuzhiyun * Make sure to never allocate messages larger than what the Greybus
153*4882a593Smuzhiyun * protocol supports.
154*4882a593Smuzhiyun */
155*4882a593Smuzhiyun if (buffer_size_max > GB_OPERATION_MESSAGE_SIZE_MAX) {
156*4882a593Smuzhiyun dev_warn(parent, "limiting buffer size to %u\n",
157*4882a593Smuzhiyun GB_OPERATION_MESSAGE_SIZE_MAX);
158*4882a593Smuzhiyun buffer_size_max = GB_OPERATION_MESSAGE_SIZE_MAX;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun hd = kzalloc(sizeof(*hd) + driver->hd_priv_size, GFP_KERNEL);
162*4882a593Smuzhiyun if (!hd)
163*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun ret = ida_simple_get(&gb_hd_bus_id_map, 1, 0, GFP_KERNEL);
166*4882a593Smuzhiyun if (ret < 0) {
167*4882a593Smuzhiyun kfree(hd);
168*4882a593Smuzhiyun return ERR_PTR(ret);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun hd->bus_id = ret;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun hd->driver = driver;
173*4882a593Smuzhiyun INIT_LIST_HEAD(&hd->modules);
174*4882a593Smuzhiyun INIT_LIST_HEAD(&hd->connections);
175*4882a593Smuzhiyun ida_init(&hd->cport_id_map);
176*4882a593Smuzhiyun hd->buffer_size_max = buffer_size_max;
177*4882a593Smuzhiyun hd->num_cports = num_cports;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun hd->dev.parent = parent;
180*4882a593Smuzhiyun hd->dev.bus = &greybus_bus_type;
181*4882a593Smuzhiyun hd->dev.type = &greybus_hd_type;
182*4882a593Smuzhiyun hd->dev.groups = bus_groups;
183*4882a593Smuzhiyun hd->dev.dma_mask = hd->dev.parent->dma_mask;
184*4882a593Smuzhiyun device_initialize(&hd->dev);
185*4882a593Smuzhiyun dev_set_name(&hd->dev, "greybus%d", hd->bus_id);
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun trace_gb_hd_create(hd);
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun hd->svc = gb_svc_create(hd);
190*4882a593Smuzhiyun if (!hd->svc) {
191*4882a593Smuzhiyun dev_err(&hd->dev, "failed to create svc\n");
192*4882a593Smuzhiyun put_device(&hd->dev);
193*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun return hd;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(gb_hd_create);
199*4882a593Smuzhiyun
gb_hd_add(struct gb_host_device * hd)200*4882a593Smuzhiyun int gb_hd_add(struct gb_host_device *hd)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun int ret;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun ret = device_add(&hd->dev);
205*4882a593Smuzhiyun if (ret)
206*4882a593Smuzhiyun return ret;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun ret = gb_svc_add(hd->svc);
209*4882a593Smuzhiyun if (ret) {
210*4882a593Smuzhiyun device_del(&hd->dev);
211*4882a593Smuzhiyun return ret;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun trace_gb_hd_add(hd);
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun return 0;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(gb_hd_add);
219*4882a593Smuzhiyun
gb_hd_del(struct gb_host_device * hd)220*4882a593Smuzhiyun void gb_hd_del(struct gb_host_device *hd)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun trace_gb_hd_del(hd);
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun /*
225*4882a593Smuzhiyun * Tear down the svc and flush any on-going hotplug processing before
226*4882a593Smuzhiyun * removing the remaining interfaces.
227*4882a593Smuzhiyun */
228*4882a593Smuzhiyun gb_svc_del(hd->svc);
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun device_del(&hd->dev);
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(gb_hd_del);
233*4882a593Smuzhiyun
gb_hd_shutdown(struct gb_host_device * hd)234*4882a593Smuzhiyun void gb_hd_shutdown(struct gb_host_device *hd)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun gb_svc_del(hd->svc);
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(gb_hd_shutdown);
239*4882a593Smuzhiyun
gb_hd_put(struct gb_host_device * hd)240*4882a593Smuzhiyun void gb_hd_put(struct gb_host_device *hd)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun put_device(&hd->dev);
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(gb_hd_put);
245*4882a593Smuzhiyun
gb_hd_init(void)246*4882a593Smuzhiyun int __init gb_hd_init(void)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun ida_init(&gb_hd_bus_id_map);
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun return 0;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
gb_hd_exit(void)253*4882a593Smuzhiyun void gb_hd_exit(void)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun ida_destroy(&gb_hd_bus_id_map);
256*4882a593Smuzhiyun }
257