1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Greybus bundles
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright 2014-2015 Google Inc.
6*4882a593Smuzhiyun * Copyright 2014-2015 Linaro Ltd.
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/greybus.h>
10*4882a593Smuzhiyun #include "greybus_trace.h"
11*4882a593Smuzhiyun
bundle_class_show(struct device * dev,struct device_attribute * attr,char * buf)12*4882a593Smuzhiyun static ssize_t bundle_class_show(struct device *dev,
13*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
14*4882a593Smuzhiyun {
15*4882a593Smuzhiyun struct gb_bundle *bundle = to_gb_bundle(dev);
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun return sprintf(buf, "0x%02x\n", bundle->class);
18*4882a593Smuzhiyun }
19*4882a593Smuzhiyun static DEVICE_ATTR_RO(bundle_class);
20*4882a593Smuzhiyun
bundle_id_show(struct device * dev,struct device_attribute * attr,char * buf)21*4882a593Smuzhiyun static ssize_t bundle_id_show(struct device *dev,
22*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun struct gb_bundle *bundle = to_gb_bundle(dev);
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun return sprintf(buf, "%u\n", bundle->id);
27*4882a593Smuzhiyun }
28*4882a593Smuzhiyun static DEVICE_ATTR_RO(bundle_id);
29*4882a593Smuzhiyun
state_show(struct device * dev,struct device_attribute * attr,char * buf)30*4882a593Smuzhiyun static ssize_t state_show(struct device *dev, struct device_attribute *attr,
31*4882a593Smuzhiyun char *buf)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun struct gb_bundle *bundle = to_gb_bundle(dev);
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun if (!bundle->state)
36*4882a593Smuzhiyun return sprintf(buf, "\n");
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun return sprintf(buf, "%s\n", bundle->state);
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun
state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)41*4882a593Smuzhiyun static ssize_t state_store(struct device *dev, struct device_attribute *attr,
42*4882a593Smuzhiyun const char *buf, size_t size)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun struct gb_bundle *bundle = to_gb_bundle(dev);
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun kfree(bundle->state);
47*4882a593Smuzhiyun bundle->state = kstrdup(buf, GFP_KERNEL);
48*4882a593Smuzhiyun if (!bundle->state)
49*4882a593Smuzhiyun return -ENOMEM;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun /* Tell userspace that the file contents changed */
52*4882a593Smuzhiyun sysfs_notify(&bundle->dev.kobj, NULL, "state");
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun return size;
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun static DEVICE_ATTR_RW(state);
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun static struct attribute *bundle_attrs[] = {
59*4882a593Smuzhiyun &dev_attr_bundle_class.attr,
60*4882a593Smuzhiyun &dev_attr_bundle_id.attr,
61*4882a593Smuzhiyun &dev_attr_state.attr,
62*4882a593Smuzhiyun NULL,
63*4882a593Smuzhiyun };
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun ATTRIBUTE_GROUPS(bundle);
66*4882a593Smuzhiyun
gb_bundle_find(struct gb_interface * intf,u8 bundle_id)67*4882a593Smuzhiyun static struct gb_bundle *gb_bundle_find(struct gb_interface *intf,
68*4882a593Smuzhiyun u8 bundle_id)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun struct gb_bundle *bundle;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun list_for_each_entry(bundle, &intf->bundles, links) {
73*4882a593Smuzhiyun if (bundle->id == bundle_id)
74*4882a593Smuzhiyun return bundle;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun return NULL;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
gb_bundle_release(struct device * dev)80*4882a593Smuzhiyun static void gb_bundle_release(struct device *dev)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun struct gb_bundle *bundle = to_gb_bundle(dev);
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun trace_gb_bundle_release(bundle);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun kfree(bundle->state);
87*4882a593Smuzhiyun kfree(bundle->cport_desc);
88*4882a593Smuzhiyun kfree(bundle);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun #ifdef CONFIG_PM
gb_bundle_disable_all_connections(struct gb_bundle * bundle)92*4882a593Smuzhiyun static void gb_bundle_disable_all_connections(struct gb_bundle *bundle)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun struct gb_connection *connection;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun list_for_each_entry(connection, &bundle->connections, bundle_links)
97*4882a593Smuzhiyun gb_connection_disable(connection);
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
gb_bundle_enable_all_connections(struct gb_bundle * bundle)100*4882a593Smuzhiyun static void gb_bundle_enable_all_connections(struct gb_bundle *bundle)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun struct gb_connection *connection;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun list_for_each_entry(connection, &bundle->connections, bundle_links)
105*4882a593Smuzhiyun gb_connection_enable(connection);
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
gb_bundle_suspend(struct device * dev)108*4882a593Smuzhiyun static int gb_bundle_suspend(struct device *dev)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun struct gb_bundle *bundle = to_gb_bundle(dev);
111*4882a593Smuzhiyun const struct dev_pm_ops *pm = dev->driver->pm;
112*4882a593Smuzhiyun int ret;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun if (pm && pm->runtime_suspend) {
115*4882a593Smuzhiyun ret = pm->runtime_suspend(&bundle->dev);
116*4882a593Smuzhiyun if (ret)
117*4882a593Smuzhiyun return ret;
118*4882a593Smuzhiyun } else {
119*4882a593Smuzhiyun gb_bundle_disable_all_connections(bundle);
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun ret = gb_control_bundle_suspend(bundle->intf->control, bundle->id);
123*4882a593Smuzhiyun if (ret) {
124*4882a593Smuzhiyun if (pm && pm->runtime_resume)
125*4882a593Smuzhiyun ret = pm->runtime_resume(dev);
126*4882a593Smuzhiyun else
127*4882a593Smuzhiyun gb_bundle_enable_all_connections(bundle);
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun return ret;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun return 0;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
gb_bundle_resume(struct device * dev)135*4882a593Smuzhiyun static int gb_bundle_resume(struct device *dev)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun struct gb_bundle *bundle = to_gb_bundle(dev);
138*4882a593Smuzhiyun const struct dev_pm_ops *pm = dev->driver->pm;
139*4882a593Smuzhiyun int ret;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun ret = gb_control_bundle_resume(bundle->intf->control, bundle->id);
142*4882a593Smuzhiyun if (ret)
143*4882a593Smuzhiyun return ret;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun if (pm && pm->runtime_resume) {
146*4882a593Smuzhiyun ret = pm->runtime_resume(dev);
147*4882a593Smuzhiyun if (ret)
148*4882a593Smuzhiyun return ret;
149*4882a593Smuzhiyun } else {
150*4882a593Smuzhiyun gb_bundle_enable_all_connections(bundle);
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun return 0;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
gb_bundle_idle(struct device * dev)156*4882a593Smuzhiyun static int gb_bundle_idle(struct device *dev)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun pm_runtime_mark_last_busy(dev);
159*4882a593Smuzhiyun pm_request_autosuspend(dev);
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun return 0;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun #endif
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun static const struct dev_pm_ops gb_bundle_pm_ops = {
166*4882a593Smuzhiyun SET_RUNTIME_PM_OPS(gb_bundle_suspend, gb_bundle_resume, gb_bundle_idle)
167*4882a593Smuzhiyun };
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun struct device_type greybus_bundle_type = {
170*4882a593Smuzhiyun .name = "greybus_bundle",
171*4882a593Smuzhiyun .release = gb_bundle_release,
172*4882a593Smuzhiyun .pm = &gb_bundle_pm_ops,
173*4882a593Smuzhiyun };
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /*
176*4882a593Smuzhiyun * Create a gb_bundle structure to represent a discovered
177*4882a593Smuzhiyun * bundle. Returns a pointer to the new bundle or a null
178*4882a593Smuzhiyun * pointer if a failure occurs due to memory exhaustion.
179*4882a593Smuzhiyun */
gb_bundle_create(struct gb_interface * intf,u8 bundle_id,u8 class)180*4882a593Smuzhiyun struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id,
181*4882a593Smuzhiyun u8 class)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun struct gb_bundle *bundle;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun if (bundle_id == BUNDLE_ID_NONE) {
186*4882a593Smuzhiyun dev_err(&intf->dev, "can't use bundle id %u\n", bundle_id);
187*4882a593Smuzhiyun return NULL;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /*
191*4882a593Smuzhiyun * Reject any attempt to reuse a bundle id. We initialize
192*4882a593Smuzhiyun * these serially, so there's no need to worry about keeping
193*4882a593Smuzhiyun * the interface bundle list locked here.
194*4882a593Smuzhiyun */
195*4882a593Smuzhiyun if (gb_bundle_find(intf, bundle_id)) {
196*4882a593Smuzhiyun dev_err(&intf->dev, "duplicate bundle id %u\n", bundle_id);
197*4882a593Smuzhiyun return NULL;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
201*4882a593Smuzhiyun if (!bundle)
202*4882a593Smuzhiyun return NULL;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun bundle->intf = intf;
205*4882a593Smuzhiyun bundle->id = bundle_id;
206*4882a593Smuzhiyun bundle->class = class;
207*4882a593Smuzhiyun INIT_LIST_HEAD(&bundle->connections);
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun bundle->dev.parent = &intf->dev;
210*4882a593Smuzhiyun bundle->dev.bus = &greybus_bus_type;
211*4882a593Smuzhiyun bundle->dev.type = &greybus_bundle_type;
212*4882a593Smuzhiyun bundle->dev.groups = bundle_groups;
213*4882a593Smuzhiyun bundle->dev.dma_mask = intf->dev.dma_mask;
214*4882a593Smuzhiyun device_initialize(&bundle->dev);
215*4882a593Smuzhiyun dev_set_name(&bundle->dev, "%s.%d", dev_name(&intf->dev), bundle_id);
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun list_add(&bundle->links, &intf->bundles);
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun trace_gb_bundle_create(bundle);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun return bundle;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
gb_bundle_add(struct gb_bundle * bundle)224*4882a593Smuzhiyun int gb_bundle_add(struct gb_bundle *bundle)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun int ret;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun ret = device_add(&bundle->dev);
229*4882a593Smuzhiyun if (ret) {
230*4882a593Smuzhiyun dev_err(&bundle->dev, "failed to register bundle: %d\n", ret);
231*4882a593Smuzhiyun return ret;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun trace_gb_bundle_add(bundle);
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun return 0;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /*
240*4882a593Smuzhiyun * Tear down a previously set up bundle.
241*4882a593Smuzhiyun */
gb_bundle_destroy(struct gb_bundle * bundle)242*4882a593Smuzhiyun void gb_bundle_destroy(struct gb_bundle *bundle)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun trace_gb_bundle_destroy(bundle);
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun if (device_is_registered(&bundle->dev))
247*4882a593Smuzhiyun device_del(&bundle->dev);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun list_del(&bundle->links);
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun put_device(&bundle->dev);
252*4882a593Smuzhiyun }
253