1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Greybus manifest parsing
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright 2014-2015 Google Inc.
6*4882a593Smuzhiyun * Copyright 2014-2015 Linaro Ltd.
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/greybus.h>
10*4882a593Smuzhiyun
get_descriptor_type_string(u8 type)11*4882a593Smuzhiyun static const char *get_descriptor_type_string(u8 type)
12*4882a593Smuzhiyun {
13*4882a593Smuzhiyun switch (type) {
14*4882a593Smuzhiyun case GREYBUS_TYPE_INVALID:
15*4882a593Smuzhiyun return "invalid";
16*4882a593Smuzhiyun case GREYBUS_TYPE_STRING:
17*4882a593Smuzhiyun return "string";
18*4882a593Smuzhiyun case GREYBUS_TYPE_INTERFACE:
19*4882a593Smuzhiyun return "interface";
20*4882a593Smuzhiyun case GREYBUS_TYPE_CPORT:
21*4882a593Smuzhiyun return "cport";
22*4882a593Smuzhiyun case GREYBUS_TYPE_BUNDLE:
23*4882a593Smuzhiyun return "bundle";
24*4882a593Smuzhiyun default:
25*4882a593Smuzhiyun WARN_ON(1);
26*4882a593Smuzhiyun return "unknown";
27*4882a593Smuzhiyun }
28*4882a593Smuzhiyun }
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /*
31*4882a593Smuzhiyun * We scan the manifest once to identify where all the descriptors
32*4882a593Smuzhiyun * are. The result is a list of these manifest_desc structures. We
33*4882a593Smuzhiyun * then pick through them for what we're looking for (starting with
34*4882a593Smuzhiyun * the interface descriptor). As each is processed we remove it from
35*4882a593Smuzhiyun * the list. When we're done the list should (probably) be empty.
36*4882a593Smuzhiyun */
37*4882a593Smuzhiyun struct manifest_desc {
38*4882a593Smuzhiyun struct list_head links;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun size_t size;
41*4882a593Smuzhiyun void *data;
42*4882a593Smuzhiyun enum greybus_descriptor_type type;
43*4882a593Smuzhiyun };
44*4882a593Smuzhiyun
release_manifest_descriptor(struct manifest_desc * descriptor)45*4882a593Smuzhiyun static void release_manifest_descriptor(struct manifest_desc *descriptor)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun list_del(&descriptor->links);
48*4882a593Smuzhiyun kfree(descriptor);
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun
release_manifest_descriptors(struct gb_interface * intf)51*4882a593Smuzhiyun static void release_manifest_descriptors(struct gb_interface *intf)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun struct manifest_desc *descriptor;
54*4882a593Smuzhiyun struct manifest_desc *next;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun list_for_each_entry_safe(descriptor, next, &intf->manifest_descs, links)
57*4882a593Smuzhiyun release_manifest_descriptor(descriptor);
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
release_cport_descriptors(struct list_head * head,u8 bundle_id)60*4882a593Smuzhiyun static void release_cport_descriptors(struct list_head *head, u8 bundle_id)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun struct manifest_desc *desc, *tmp;
63*4882a593Smuzhiyun struct greybus_descriptor_cport *desc_cport;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun list_for_each_entry_safe(desc, tmp, head, links) {
66*4882a593Smuzhiyun desc_cport = desc->data;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun if (desc->type != GREYBUS_TYPE_CPORT)
69*4882a593Smuzhiyun continue;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun if (desc_cport->bundle == bundle_id)
72*4882a593Smuzhiyun release_manifest_descriptor(desc);
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
get_next_bundle_desc(struct gb_interface * intf)76*4882a593Smuzhiyun static struct manifest_desc *get_next_bundle_desc(struct gb_interface *intf)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun struct manifest_desc *descriptor;
79*4882a593Smuzhiyun struct manifest_desc *next;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun list_for_each_entry_safe(descriptor, next, &intf->manifest_descs, links)
82*4882a593Smuzhiyun if (descriptor->type == GREYBUS_TYPE_BUNDLE)
83*4882a593Smuzhiyun return descriptor;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun return NULL;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun /*
89*4882a593Smuzhiyun * Validate the given descriptor. Its reported size must fit within
90*4882a593Smuzhiyun * the number of bytes remaining, and it must have a recognized
91*4882a593Smuzhiyun * type. Check that the reported size is at least as big as what
92*4882a593Smuzhiyun * we expect to see. (It could be bigger, perhaps for a new version
93*4882a593Smuzhiyun * of the format.)
94*4882a593Smuzhiyun *
95*4882a593Smuzhiyun * Returns the (non-zero) number of bytes consumed by the descriptor,
96*4882a593Smuzhiyun * or a negative errno.
97*4882a593Smuzhiyun */
identify_descriptor(struct gb_interface * intf,struct greybus_descriptor * desc,size_t size)98*4882a593Smuzhiyun static int identify_descriptor(struct gb_interface *intf,
99*4882a593Smuzhiyun struct greybus_descriptor *desc, size_t size)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun struct greybus_descriptor_header *desc_header = &desc->header;
102*4882a593Smuzhiyun struct manifest_desc *descriptor;
103*4882a593Smuzhiyun size_t desc_size;
104*4882a593Smuzhiyun size_t expected_size;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun if (size < sizeof(*desc_header)) {
107*4882a593Smuzhiyun dev_err(&intf->dev, "manifest too small (%zu < %zu)\n", size,
108*4882a593Smuzhiyun sizeof(*desc_header));
109*4882a593Smuzhiyun return -EINVAL; /* Must at least have header */
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun desc_size = le16_to_cpu(desc_header->size);
113*4882a593Smuzhiyun if (desc_size > size) {
114*4882a593Smuzhiyun dev_err(&intf->dev, "descriptor too big (%zu > %zu)\n",
115*4882a593Smuzhiyun desc_size, size);
116*4882a593Smuzhiyun return -EINVAL;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /* Descriptor needs to at least have a header */
120*4882a593Smuzhiyun expected_size = sizeof(*desc_header);
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun switch (desc_header->type) {
123*4882a593Smuzhiyun case GREYBUS_TYPE_STRING:
124*4882a593Smuzhiyun expected_size += sizeof(struct greybus_descriptor_string);
125*4882a593Smuzhiyun expected_size += desc->string.length;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /* String descriptors are padded to 4 byte boundaries */
128*4882a593Smuzhiyun expected_size = ALIGN(expected_size, 4);
129*4882a593Smuzhiyun break;
130*4882a593Smuzhiyun case GREYBUS_TYPE_INTERFACE:
131*4882a593Smuzhiyun expected_size += sizeof(struct greybus_descriptor_interface);
132*4882a593Smuzhiyun break;
133*4882a593Smuzhiyun case GREYBUS_TYPE_BUNDLE:
134*4882a593Smuzhiyun expected_size += sizeof(struct greybus_descriptor_bundle);
135*4882a593Smuzhiyun break;
136*4882a593Smuzhiyun case GREYBUS_TYPE_CPORT:
137*4882a593Smuzhiyun expected_size += sizeof(struct greybus_descriptor_cport);
138*4882a593Smuzhiyun break;
139*4882a593Smuzhiyun case GREYBUS_TYPE_INVALID:
140*4882a593Smuzhiyun default:
141*4882a593Smuzhiyun dev_err(&intf->dev, "invalid descriptor type (%u)\n",
142*4882a593Smuzhiyun desc_header->type);
143*4882a593Smuzhiyun return -EINVAL;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun if (desc_size < expected_size) {
147*4882a593Smuzhiyun dev_err(&intf->dev, "%s descriptor too small (%zu < %zu)\n",
148*4882a593Smuzhiyun get_descriptor_type_string(desc_header->type),
149*4882a593Smuzhiyun desc_size, expected_size);
150*4882a593Smuzhiyun return -EINVAL;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun /* Descriptor bigger than what we expect */
154*4882a593Smuzhiyun if (desc_size > expected_size) {
155*4882a593Smuzhiyun dev_warn(&intf->dev, "%s descriptor size mismatch (want %zu got %zu)\n",
156*4882a593Smuzhiyun get_descriptor_type_string(desc_header->type),
157*4882a593Smuzhiyun expected_size, desc_size);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun descriptor = kzalloc(sizeof(*descriptor), GFP_KERNEL);
161*4882a593Smuzhiyun if (!descriptor)
162*4882a593Smuzhiyun return -ENOMEM;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun descriptor->size = desc_size;
165*4882a593Smuzhiyun descriptor->data = (char *)desc + sizeof(*desc_header);
166*4882a593Smuzhiyun descriptor->type = desc_header->type;
167*4882a593Smuzhiyun list_add_tail(&descriptor->links, &intf->manifest_descs);
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun /* desc_size is positive and is known to fit in a signed int */
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun return desc_size;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun /*
175*4882a593Smuzhiyun * Find the string descriptor having the given id, validate it, and
176*4882a593Smuzhiyun * allocate a duplicate copy of it. The duplicate has an extra byte
177*4882a593Smuzhiyun * which guarantees the returned string is NUL-terminated.
178*4882a593Smuzhiyun *
179*4882a593Smuzhiyun * String index 0 is valid (it represents "no string"), and for
180*4882a593Smuzhiyun * that a null pointer is returned.
181*4882a593Smuzhiyun *
182*4882a593Smuzhiyun * Otherwise returns a pointer to a newly-allocated copy of the
183*4882a593Smuzhiyun * descriptor string, or an error-coded pointer on failure.
184*4882a593Smuzhiyun */
gb_string_get(struct gb_interface * intf,u8 string_id)185*4882a593Smuzhiyun static char *gb_string_get(struct gb_interface *intf, u8 string_id)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun struct greybus_descriptor_string *desc_string;
188*4882a593Smuzhiyun struct manifest_desc *descriptor;
189*4882a593Smuzhiyun bool found = false;
190*4882a593Smuzhiyun char *string;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun /* A zero string id means no string (but no error) */
193*4882a593Smuzhiyun if (!string_id)
194*4882a593Smuzhiyun return NULL;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun list_for_each_entry(descriptor, &intf->manifest_descs, links) {
197*4882a593Smuzhiyun if (descriptor->type != GREYBUS_TYPE_STRING)
198*4882a593Smuzhiyun continue;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun desc_string = descriptor->data;
201*4882a593Smuzhiyun if (desc_string->id == string_id) {
202*4882a593Smuzhiyun found = true;
203*4882a593Smuzhiyun break;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun if (!found)
207*4882a593Smuzhiyun return ERR_PTR(-ENOENT);
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun /* Allocate an extra byte so we can guarantee it's NUL-terminated */
210*4882a593Smuzhiyun string = kmemdup(&desc_string->string, desc_string->length + 1,
211*4882a593Smuzhiyun GFP_KERNEL);
212*4882a593Smuzhiyun if (!string)
213*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
214*4882a593Smuzhiyun string[desc_string->length] = '\0';
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun /* Ok we've used this string, so we're done with it */
217*4882a593Smuzhiyun release_manifest_descriptor(descriptor);
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun return string;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun /*
223*4882a593Smuzhiyun * Find cport descriptors in the manifest associated with the given
224*4882a593Smuzhiyun * bundle, and set up data structures for the functions that use
225*4882a593Smuzhiyun * them. Returns the number of cports set up for the bundle, or 0
226*4882a593Smuzhiyun * if there is an error.
227*4882a593Smuzhiyun */
gb_manifest_parse_cports(struct gb_bundle * bundle)228*4882a593Smuzhiyun static u32 gb_manifest_parse_cports(struct gb_bundle *bundle)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun struct gb_interface *intf = bundle->intf;
231*4882a593Smuzhiyun struct greybus_descriptor_cport *desc_cport;
232*4882a593Smuzhiyun struct manifest_desc *desc, *next, *tmp;
233*4882a593Smuzhiyun LIST_HEAD(list);
234*4882a593Smuzhiyun u8 bundle_id = bundle->id;
235*4882a593Smuzhiyun u16 cport_id;
236*4882a593Smuzhiyun u32 count = 0;
237*4882a593Smuzhiyun int i;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /* Set up all cport descriptors associated with this bundle */
240*4882a593Smuzhiyun list_for_each_entry_safe(desc, next, &intf->manifest_descs, links) {
241*4882a593Smuzhiyun if (desc->type != GREYBUS_TYPE_CPORT)
242*4882a593Smuzhiyun continue;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun desc_cport = desc->data;
245*4882a593Smuzhiyun if (desc_cport->bundle != bundle_id)
246*4882a593Smuzhiyun continue;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun cport_id = le16_to_cpu(desc_cport->id);
249*4882a593Smuzhiyun if (cport_id > CPORT_ID_MAX)
250*4882a593Smuzhiyun goto exit;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun /* Nothing else should have its cport_id as control cport id */
253*4882a593Smuzhiyun if (cport_id == GB_CONTROL_CPORT_ID) {
254*4882a593Smuzhiyun dev_err(&bundle->dev, "invalid cport id found (%02u)\n",
255*4882a593Smuzhiyun cport_id);
256*4882a593Smuzhiyun goto exit;
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun /*
260*4882a593Smuzhiyun * Found one, move it to our temporary list after checking for
261*4882a593Smuzhiyun * duplicates.
262*4882a593Smuzhiyun */
263*4882a593Smuzhiyun list_for_each_entry(tmp, &list, links) {
264*4882a593Smuzhiyun desc_cport = tmp->data;
265*4882a593Smuzhiyun if (cport_id == le16_to_cpu(desc_cport->id)) {
266*4882a593Smuzhiyun dev_err(&bundle->dev,
267*4882a593Smuzhiyun "duplicate CPort %u found\n", cport_id);
268*4882a593Smuzhiyun goto exit;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun list_move_tail(&desc->links, &list);
272*4882a593Smuzhiyun count++;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun if (!count)
276*4882a593Smuzhiyun return 0;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun bundle->cport_desc = kcalloc(count, sizeof(*bundle->cport_desc),
279*4882a593Smuzhiyun GFP_KERNEL);
280*4882a593Smuzhiyun if (!bundle->cport_desc)
281*4882a593Smuzhiyun goto exit;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun bundle->num_cports = count;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun i = 0;
286*4882a593Smuzhiyun list_for_each_entry_safe(desc, next, &list, links) {
287*4882a593Smuzhiyun desc_cport = desc->data;
288*4882a593Smuzhiyun memcpy(&bundle->cport_desc[i++], desc_cport,
289*4882a593Smuzhiyun sizeof(*desc_cport));
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun /* Release the cport descriptor */
292*4882a593Smuzhiyun release_manifest_descriptor(desc);
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun return count;
296*4882a593Smuzhiyun exit:
297*4882a593Smuzhiyun release_cport_descriptors(&list, bundle_id);
298*4882a593Smuzhiyun /*
299*4882a593Smuzhiyun * Free all cports for this bundle to avoid 'excess descriptors'
300*4882a593Smuzhiyun * warnings.
301*4882a593Smuzhiyun */
302*4882a593Smuzhiyun release_cport_descriptors(&intf->manifest_descs, bundle_id);
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun return 0; /* Error; count should also be 0 */
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun /*
308*4882a593Smuzhiyun * Find bundle descriptors in the manifest and set up their data
309*4882a593Smuzhiyun * structures. Returns the number of bundles set up for the
310*4882a593Smuzhiyun * given interface.
311*4882a593Smuzhiyun */
gb_manifest_parse_bundles(struct gb_interface * intf)312*4882a593Smuzhiyun static u32 gb_manifest_parse_bundles(struct gb_interface *intf)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun struct manifest_desc *desc;
315*4882a593Smuzhiyun struct gb_bundle *bundle;
316*4882a593Smuzhiyun struct gb_bundle *bundle_next;
317*4882a593Smuzhiyun u32 count = 0;
318*4882a593Smuzhiyun u8 bundle_id;
319*4882a593Smuzhiyun u8 class;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun while ((desc = get_next_bundle_desc(intf))) {
322*4882a593Smuzhiyun struct greybus_descriptor_bundle *desc_bundle;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun /* Found one. Set up its bundle structure*/
325*4882a593Smuzhiyun desc_bundle = desc->data;
326*4882a593Smuzhiyun bundle_id = desc_bundle->id;
327*4882a593Smuzhiyun class = desc_bundle->class;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun /* Done with this bundle descriptor */
330*4882a593Smuzhiyun release_manifest_descriptor(desc);
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun /* Ignore any legacy control bundles */
333*4882a593Smuzhiyun if (bundle_id == GB_CONTROL_BUNDLE_ID) {
334*4882a593Smuzhiyun dev_dbg(&intf->dev, "%s - ignoring control bundle\n",
335*4882a593Smuzhiyun __func__);
336*4882a593Smuzhiyun release_cport_descriptors(&intf->manifest_descs,
337*4882a593Smuzhiyun bundle_id);
338*4882a593Smuzhiyun continue;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun /* Nothing else should have its class set to control class */
342*4882a593Smuzhiyun if (class == GREYBUS_CLASS_CONTROL) {
343*4882a593Smuzhiyun dev_err(&intf->dev,
344*4882a593Smuzhiyun "bundle %u cannot use control class\n",
345*4882a593Smuzhiyun bundle_id);
346*4882a593Smuzhiyun goto cleanup;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun bundle = gb_bundle_create(intf, bundle_id, class);
350*4882a593Smuzhiyun if (!bundle)
351*4882a593Smuzhiyun goto cleanup;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun /*
354*4882a593Smuzhiyun * Now go set up this bundle's functions and cports.
355*4882a593Smuzhiyun *
356*4882a593Smuzhiyun * A 'bundle' represents a device in greybus. It may require
357*4882a593Smuzhiyun * multiple cports for its functioning. If we fail to setup any
358*4882a593Smuzhiyun * cport of a bundle, we better reject the complete bundle as
359*4882a593Smuzhiyun * the device may not be able to function properly then.
360*4882a593Smuzhiyun *
361*4882a593Smuzhiyun * But, failing to setup a cport of bundle X doesn't mean that
362*4882a593Smuzhiyun * the device corresponding to bundle Y will not work properly.
363*4882a593Smuzhiyun * Bundles should be treated as separate independent devices.
364*4882a593Smuzhiyun *
365*4882a593Smuzhiyun * While parsing manifest for an interface, treat bundles as
366*4882a593Smuzhiyun * separate entities and don't reject entire interface and its
367*4882a593Smuzhiyun * bundles on failing to initialize a cport. But make sure the
368*4882a593Smuzhiyun * bundle which needs the cport, gets destroyed properly.
369*4882a593Smuzhiyun */
370*4882a593Smuzhiyun if (!gb_manifest_parse_cports(bundle)) {
371*4882a593Smuzhiyun gb_bundle_destroy(bundle);
372*4882a593Smuzhiyun continue;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun count++;
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun return count;
379*4882a593Smuzhiyun cleanup:
380*4882a593Smuzhiyun /* An error occurred; undo any changes we've made */
381*4882a593Smuzhiyun list_for_each_entry_safe(bundle, bundle_next, &intf->bundles, links) {
382*4882a593Smuzhiyun gb_bundle_destroy(bundle);
383*4882a593Smuzhiyun count--;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun return 0; /* Error; count should also be 0 */
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun
gb_manifest_parse_interface(struct gb_interface * intf,struct manifest_desc * interface_desc)388*4882a593Smuzhiyun static bool gb_manifest_parse_interface(struct gb_interface *intf,
389*4882a593Smuzhiyun struct manifest_desc *interface_desc)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun struct greybus_descriptor_interface *desc_intf = interface_desc->data;
392*4882a593Smuzhiyun struct gb_control *control = intf->control;
393*4882a593Smuzhiyun char *str;
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun /* Handle the strings first--they can fail */
396*4882a593Smuzhiyun str = gb_string_get(intf, desc_intf->vendor_stringid);
397*4882a593Smuzhiyun if (IS_ERR(str))
398*4882a593Smuzhiyun return false;
399*4882a593Smuzhiyun control->vendor_string = str;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun str = gb_string_get(intf, desc_intf->product_stringid);
402*4882a593Smuzhiyun if (IS_ERR(str))
403*4882a593Smuzhiyun goto out_free_vendor_string;
404*4882a593Smuzhiyun control->product_string = str;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun /* Assign feature flags communicated via manifest */
407*4882a593Smuzhiyun intf->features = desc_intf->features;
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun /* Release the interface descriptor, now that we're done with it */
410*4882a593Smuzhiyun release_manifest_descriptor(interface_desc);
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun /* An interface must have at least one bundle descriptor */
413*4882a593Smuzhiyun if (!gb_manifest_parse_bundles(intf)) {
414*4882a593Smuzhiyun dev_err(&intf->dev, "manifest bundle descriptors not valid\n");
415*4882a593Smuzhiyun goto out_err;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun return true;
419*4882a593Smuzhiyun out_err:
420*4882a593Smuzhiyun kfree(control->product_string);
421*4882a593Smuzhiyun control->product_string = NULL;
422*4882a593Smuzhiyun out_free_vendor_string:
423*4882a593Smuzhiyun kfree(control->vendor_string);
424*4882a593Smuzhiyun control->vendor_string = NULL;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun return false;
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun /*
430*4882a593Smuzhiyun * Parse a buffer containing an interface manifest.
431*4882a593Smuzhiyun *
432*4882a593Smuzhiyun * If we find anything wrong with the content/format of the buffer
433*4882a593Smuzhiyun * we reject it.
434*4882a593Smuzhiyun *
435*4882a593Smuzhiyun * The first requirement is that the manifest's version is
436*4882a593Smuzhiyun * one we can parse.
437*4882a593Smuzhiyun *
438*4882a593Smuzhiyun * We make an initial pass through the buffer and identify all of
439*4882a593Smuzhiyun * the descriptors it contains, keeping track for each its type
440*4882a593Smuzhiyun * and the location size of its data in the buffer.
441*4882a593Smuzhiyun *
442*4882a593Smuzhiyun * Next we scan the descriptors, looking for an interface descriptor;
443*4882a593Smuzhiyun * there must be exactly one of those. When found, we record the
444*4882a593Smuzhiyun * information it contains, and then remove that descriptor (and any
445*4882a593Smuzhiyun * string descriptors it refers to) from further consideration.
446*4882a593Smuzhiyun *
447*4882a593Smuzhiyun * After that we look for the interface's bundles--there must be at
448*4882a593Smuzhiyun * least one of those.
449*4882a593Smuzhiyun *
450*4882a593Smuzhiyun * Returns true if parsing was successful, false otherwise.
451*4882a593Smuzhiyun */
gb_manifest_parse(struct gb_interface * intf,void * data,size_t size)452*4882a593Smuzhiyun bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun struct greybus_manifest *manifest;
455*4882a593Smuzhiyun struct greybus_manifest_header *header;
456*4882a593Smuzhiyun struct greybus_descriptor *desc;
457*4882a593Smuzhiyun struct manifest_desc *descriptor;
458*4882a593Smuzhiyun struct manifest_desc *interface_desc = NULL;
459*4882a593Smuzhiyun u16 manifest_size;
460*4882a593Smuzhiyun u32 found = 0;
461*4882a593Smuzhiyun bool result;
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun /* Manifest descriptor list should be empty here */
464*4882a593Smuzhiyun if (WARN_ON(!list_empty(&intf->manifest_descs)))
465*4882a593Smuzhiyun return false;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun /* we have to have at _least_ the manifest header */
468*4882a593Smuzhiyun if (size < sizeof(*header)) {
469*4882a593Smuzhiyun dev_err(&intf->dev, "short manifest (%zu < %zu)\n",
470*4882a593Smuzhiyun size, sizeof(*header));
471*4882a593Smuzhiyun return false;
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun /* Make sure the size is right */
475*4882a593Smuzhiyun manifest = data;
476*4882a593Smuzhiyun header = &manifest->header;
477*4882a593Smuzhiyun manifest_size = le16_to_cpu(header->size);
478*4882a593Smuzhiyun if (manifest_size != size) {
479*4882a593Smuzhiyun dev_err(&intf->dev, "manifest size mismatch (%zu != %u)\n",
480*4882a593Smuzhiyun size, manifest_size);
481*4882a593Smuzhiyun return false;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun /* Validate major/minor number */
485*4882a593Smuzhiyun if (header->version_major > GREYBUS_VERSION_MAJOR) {
486*4882a593Smuzhiyun dev_err(&intf->dev, "manifest version too new (%u.%u > %u.%u)\n",
487*4882a593Smuzhiyun header->version_major, header->version_minor,
488*4882a593Smuzhiyun GREYBUS_VERSION_MAJOR, GREYBUS_VERSION_MINOR);
489*4882a593Smuzhiyun return false;
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun /* OK, find all the descriptors */
493*4882a593Smuzhiyun desc = manifest->descriptors;
494*4882a593Smuzhiyun size -= sizeof(*header);
495*4882a593Smuzhiyun while (size) {
496*4882a593Smuzhiyun int desc_size;
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun desc_size = identify_descriptor(intf, desc, size);
499*4882a593Smuzhiyun if (desc_size < 0) {
500*4882a593Smuzhiyun result = false;
501*4882a593Smuzhiyun goto out;
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun desc = (struct greybus_descriptor *)((char *)desc + desc_size);
504*4882a593Smuzhiyun size -= desc_size;
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun /* There must be a single interface descriptor */
508*4882a593Smuzhiyun list_for_each_entry(descriptor, &intf->manifest_descs, links) {
509*4882a593Smuzhiyun if (descriptor->type == GREYBUS_TYPE_INTERFACE)
510*4882a593Smuzhiyun if (!found++)
511*4882a593Smuzhiyun interface_desc = descriptor;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun if (found != 1) {
514*4882a593Smuzhiyun dev_err(&intf->dev, "manifest must have 1 interface descriptor (%u found)\n",
515*4882a593Smuzhiyun found);
516*4882a593Smuzhiyun result = false;
517*4882a593Smuzhiyun goto out;
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun /* Parse the manifest, starting with the interface descriptor */
521*4882a593Smuzhiyun result = gb_manifest_parse_interface(intf, interface_desc);
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun /*
524*4882a593Smuzhiyun * We really should have no remaining descriptors, but we
525*4882a593Smuzhiyun * don't know what newer format manifests might leave.
526*4882a593Smuzhiyun */
527*4882a593Smuzhiyun if (result && !list_empty(&intf->manifest_descs))
528*4882a593Smuzhiyun dev_info(&intf->dev, "excess descriptors in interface manifest\n");
529*4882a593Smuzhiyun out:
530*4882a593Smuzhiyun release_manifest_descriptors(intf);
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun return result;
533*4882a593Smuzhiyun }
534