1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2012 Red Hat
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * based in parts on udlfb.c:
6*4882a593Smuzhiyun * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
7*4882a593Smuzhiyun * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
8*4882a593Smuzhiyun * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <drm/drm.h>
12*4882a593Smuzhiyun #include <drm/drm_print.h>
13*4882a593Smuzhiyun #include <drm/drm_probe_helper.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include "udl_drv.h"
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun /* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */
18*4882a593Smuzhiyun #define BULK_SIZE 512
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #define NR_USB_REQUEST_CHANNEL 0x12
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE)
23*4882a593Smuzhiyun #define WRITES_IN_FLIGHT (4)
24*4882a593Smuzhiyun #define MAX_VENDOR_DESCRIPTOR_SIZE 256
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #define GET_URB_TIMEOUT HZ
27*4882a593Smuzhiyun #define FREE_URB_TIMEOUT (HZ*2)
28*4882a593Smuzhiyun
udl_parse_vendor_descriptor(struct drm_device * dev,struct usb_device * usbdev)29*4882a593Smuzhiyun static int udl_parse_vendor_descriptor(struct drm_device *dev,
30*4882a593Smuzhiyun struct usb_device *usbdev)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun struct udl_device *udl = to_udl(dev);
33*4882a593Smuzhiyun char *desc;
34*4882a593Smuzhiyun char *buf;
35*4882a593Smuzhiyun char *desc_end;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun u8 total_len = 0;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL);
40*4882a593Smuzhiyun if (!buf)
41*4882a593Smuzhiyun return false;
42*4882a593Smuzhiyun desc = buf;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */
45*4882a593Smuzhiyun 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
46*4882a593Smuzhiyun if (total_len > 5) {
47*4882a593Smuzhiyun DRM_INFO("vendor descriptor length:%x data:%11ph\n",
48*4882a593Smuzhiyun total_len, desc);
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun if ((desc[0] != total_len) || /* descriptor length */
51*4882a593Smuzhiyun (desc[1] != 0x5f) || /* vendor descriptor type */
52*4882a593Smuzhiyun (desc[2] != 0x01) || /* version (2 bytes) */
53*4882a593Smuzhiyun (desc[3] != 0x00) ||
54*4882a593Smuzhiyun (desc[4] != total_len - 2)) /* length after type */
55*4882a593Smuzhiyun goto unrecognized;
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun desc_end = desc + total_len;
58*4882a593Smuzhiyun desc += 5; /* the fixed header we've already parsed */
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun while (desc < desc_end) {
61*4882a593Smuzhiyun u8 length;
62*4882a593Smuzhiyun u16 key;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun key = le16_to_cpu(*((u16 *) desc));
65*4882a593Smuzhiyun desc += sizeof(u16);
66*4882a593Smuzhiyun length = *desc;
67*4882a593Smuzhiyun desc++;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun switch (key) {
70*4882a593Smuzhiyun case 0x0200: { /* max_area */
71*4882a593Smuzhiyun u32 max_area;
72*4882a593Smuzhiyun max_area = le32_to_cpu(*((u32 *)desc));
73*4882a593Smuzhiyun DRM_DEBUG("DL chip limited to %d pixel modes\n",
74*4882a593Smuzhiyun max_area);
75*4882a593Smuzhiyun udl->sku_pixel_limit = max_area;
76*4882a593Smuzhiyun break;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun default:
79*4882a593Smuzhiyun break;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun desc += length;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun goto success;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun unrecognized:
88*4882a593Smuzhiyun /* allow udlfb to load for now even if firmware unrecognized */
89*4882a593Smuzhiyun DRM_ERROR("Unrecognized vendor firmware descriptor\n");
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun success:
92*4882a593Smuzhiyun kfree(buf);
93*4882a593Smuzhiyun return true;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun /*
97*4882a593Smuzhiyun * Need to ensure a channel is selected before submitting URBs
98*4882a593Smuzhiyun */
udl_select_std_channel(struct udl_device * udl)99*4882a593Smuzhiyun static int udl_select_std_channel(struct udl_device *udl)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun int ret;
102*4882a593Smuzhiyun static const u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7,
103*4882a593Smuzhiyun 0x1C, 0x88, 0x5E, 0x15,
104*4882a593Smuzhiyun 0x60, 0xFE, 0xC6, 0x97,
105*4882a593Smuzhiyun 0x16, 0x3D, 0x47, 0xF2};
106*4882a593Smuzhiyun void *sendbuf;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun sendbuf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL);
109*4882a593Smuzhiyun if (!sendbuf)
110*4882a593Smuzhiyun return -ENOMEM;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun ret = usb_control_msg(udl->udev,
113*4882a593Smuzhiyun usb_sndctrlpipe(udl->udev, 0),
114*4882a593Smuzhiyun NR_USB_REQUEST_CHANNEL,
115*4882a593Smuzhiyun (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0,
116*4882a593Smuzhiyun sendbuf, sizeof(set_def_chn),
117*4882a593Smuzhiyun USB_CTRL_SET_TIMEOUT);
118*4882a593Smuzhiyun kfree(sendbuf);
119*4882a593Smuzhiyun return ret < 0 ? ret : 0;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
udl_release_urb_work(struct work_struct * work)122*4882a593Smuzhiyun static void udl_release_urb_work(struct work_struct *work)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun struct urb_node *unode = container_of(work, struct urb_node,
125*4882a593Smuzhiyun release_urb_work.work);
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun up(&unode->dev->urbs.limit_sem);
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
udl_urb_completion(struct urb * urb)130*4882a593Smuzhiyun void udl_urb_completion(struct urb *urb)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun struct urb_node *unode = urb->context;
133*4882a593Smuzhiyun struct udl_device *udl = unode->dev;
134*4882a593Smuzhiyun unsigned long flags;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun /* sync/async unlink faults aren't errors */
137*4882a593Smuzhiyun if (urb->status) {
138*4882a593Smuzhiyun if (!(urb->status == -ENOENT ||
139*4882a593Smuzhiyun urb->status == -ECONNRESET ||
140*4882a593Smuzhiyun urb->status == -ESHUTDOWN)) {
141*4882a593Smuzhiyun DRM_ERROR("%s - nonzero write bulk status received: %d\n",
142*4882a593Smuzhiyun __func__, urb->status);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun urb->transfer_buffer_length = udl->urbs.size; /* reset to actual */
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun spin_lock_irqsave(&udl->urbs.lock, flags);
149*4882a593Smuzhiyun list_add_tail(&unode->entry, &udl->urbs.list);
150*4882a593Smuzhiyun udl->urbs.available++;
151*4882a593Smuzhiyun spin_unlock_irqrestore(&udl->urbs.lock, flags);
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun #if 0
154*4882a593Smuzhiyun /*
155*4882a593Smuzhiyun * When using fb_defio, we deadlock if up() is called
156*4882a593Smuzhiyun * while another is waiting. So queue to another process.
157*4882a593Smuzhiyun */
158*4882a593Smuzhiyun if (fb_defio)
159*4882a593Smuzhiyun schedule_delayed_work(&unode->release_urb_work, 0);
160*4882a593Smuzhiyun else
161*4882a593Smuzhiyun #endif
162*4882a593Smuzhiyun up(&udl->urbs.limit_sem);
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
udl_free_urb_list(struct drm_device * dev)165*4882a593Smuzhiyun static void udl_free_urb_list(struct drm_device *dev)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun struct udl_device *udl = to_udl(dev);
168*4882a593Smuzhiyun int count = udl->urbs.count;
169*4882a593Smuzhiyun struct list_head *node;
170*4882a593Smuzhiyun struct urb_node *unode;
171*4882a593Smuzhiyun struct urb *urb;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /* keep waiting and freeing, until we've got 'em all */
176*4882a593Smuzhiyun while (count--) {
177*4882a593Smuzhiyun down(&udl->urbs.limit_sem);
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun spin_lock_irq(&udl->urbs.lock);
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun node = udl->urbs.list.next; /* have reserved one with sem */
182*4882a593Smuzhiyun list_del_init(node);
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun spin_unlock_irq(&udl->urbs.lock);
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun unode = list_entry(node, struct urb_node, entry);
187*4882a593Smuzhiyun urb = unode->urb;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun /* Free each separately allocated piece */
190*4882a593Smuzhiyun usb_free_coherent(urb->dev, udl->urbs.size,
191*4882a593Smuzhiyun urb->transfer_buffer, urb->transfer_dma);
192*4882a593Smuzhiyun usb_free_urb(urb);
193*4882a593Smuzhiyun kfree(node);
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun udl->urbs.count = 0;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
udl_alloc_urb_list(struct drm_device * dev,int count,size_t size)198*4882a593Smuzhiyun static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun struct udl_device *udl = to_udl(dev);
201*4882a593Smuzhiyun struct urb *urb;
202*4882a593Smuzhiyun struct urb_node *unode;
203*4882a593Smuzhiyun char *buf;
204*4882a593Smuzhiyun size_t wanted_size = count * size;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun spin_lock_init(&udl->urbs.lock);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun retry:
209*4882a593Smuzhiyun udl->urbs.size = size;
210*4882a593Smuzhiyun INIT_LIST_HEAD(&udl->urbs.list);
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun sema_init(&udl->urbs.limit_sem, 0);
213*4882a593Smuzhiyun udl->urbs.count = 0;
214*4882a593Smuzhiyun udl->urbs.available = 0;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun while (udl->urbs.count * size < wanted_size) {
217*4882a593Smuzhiyun unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
218*4882a593Smuzhiyun if (!unode)
219*4882a593Smuzhiyun break;
220*4882a593Smuzhiyun unode->dev = udl;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun INIT_DELAYED_WORK(&unode->release_urb_work,
223*4882a593Smuzhiyun udl_release_urb_work);
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun urb = usb_alloc_urb(0, GFP_KERNEL);
226*4882a593Smuzhiyun if (!urb) {
227*4882a593Smuzhiyun kfree(unode);
228*4882a593Smuzhiyun break;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun unode->urb = urb;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun buf = usb_alloc_coherent(udl->udev, size, GFP_KERNEL,
233*4882a593Smuzhiyun &urb->transfer_dma);
234*4882a593Smuzhiyun if (!buf) {
235*4882a593Smuzhiyun kfree(unode);
236*4882a593Smuzhiyun usb_free_urb(urb);
237*4882a593Smuzhiyun if (size > PAGE_SIZE) {
238*4882a593Smuzhiyun size /= 2;
239*4882a593Smuzhiyun udl_free_urb_list(dev);
240*4882a593Smuzhiyun goto retry;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun break;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun /* urb->transfer_buffer_length set to actual before submit */
246*4882a593Smuzhiyun usb_fill_bulk_urb(urb, udl->udev, usb_sndbulkpipe(udl->udev, 1),
247*4882a593Smuzhiyun buf, size, udl_urb_completion, unode);
248*4882a593Smuzhiyun urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun list_add_tail(&unode->entry, &udl->urbs.list);
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun up(&udl->urbs.limit_sem);
253*4882a593Smuzhiyun udl->urbs.count++;
254*4882a593Smuzhiyun udl->urbs.available++;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun DRM_DEBUG("allocated %d %d byte urbs\n", udl->urbs.count, (int) size);
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun return udl->urbs.count;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun
udl_get_urb(struct drm_device * dev)262*4882a593Smuzhiyun struct urb *udl_get_urb(struct drm_device *dev)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun struct udl_device *udl = to_udl(dev);
265*4882a593Smuzhiyun int ret = 0;
266*4882a593Smuzhiyun struct list_head *entry;
267*4882a593Smuzhiyun struct urb_node *unode;
268*4882a593Smuzhiyun struct urb *urb = NULL;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun /* Wait for an in-flight buffer to complete and get re-queued */
271*4882a593Smuzhiyun ret = down_timeout(&udl->urbs.limit_sem, GET_URB_TIMEOUT);
272*4882a593Smuzhiyun if (ret) {
273*4882a593Smuzhiyun DRM_INFO("wait for urb interrupted: %x available: %d\n",
274*4882a593Smuzhiyun ret, udl->urbs.available);
275*4882a593Smuzhiyun goto error;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun spin_lock_irq(&udl->urbs.lock);
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun BUG_ON(list_empty(&udl->urbs.list)); /* reserved one with limit_sem */
281*4882a593Smuzhiyun entry = udl->urbs.list.next;
282*4882a593Smuzhiyun list_del_init(entry);
283*4882a593Smuzhiyun udl->urbs.available--;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun spin_unlock_irq(&udl->urbs.lock);
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun unode = list_entry(entry, struct urb_node, entry);
288*4882a593Smuzhiyun urb = unode->urb;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun error:
291*4882a593Smuzhiyun return urb;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
udl_submit_urb(struct drm_device * dev,struct urb * urb,size_t len)294*4882a593Smuzhiyun int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun struct udl_device *udl = to_udl(dev);
297*4882a593Smuzhiyun int ret;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun BUG_ON(len > udl->urbs.size);
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun urb->transfer_buffer_length = len; /* set to actual payload len */
302*4882a593Smuzhiyun ret = usb_submit_urb(urb, GFP_ATOMIC);
303*4882a593Smuzhiyun if (ret) {
304*4882a593Smuzhiyun udl_urb_completion(urb); /* because no one else will */
305*4882a593Smuzhiyun DRM_ERROR("usb_submit_urb error %x\n", ret);
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun return ret;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
udl_init(struct udl_device * udl)310*4882a593Smuzhiyun int udl_init(struct udl_device *udl)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun struct drm_device *dev = &udl->drm;
313*4882a593Smuzhiyun int ret = -ENOMEM;
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun DRM_DEBUG("\n");
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun udl->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
318*4882a593Smuzhiyun if (!udl->dmadev)
319*4882a593Smuzhiyun drm_warn(dev, "buffer sharing not supported"); /* not an error */
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun mutex_init(&udl->gem_lock);
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun if (!udl_parse_vendor_descriptor(dev, udl->udev)) {
324*4882a593Smuzhiyun ret = -ENODEV;
325*4882a593Smuzhiyun DRM_ERROR("firmware not recognized. Assume incompatible device\n");
326*4882a593Smuzhiyun goto err;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun if (udl_select_std_channel(udl))
330*4882a593Smuzhiyun DRM_ERROR("Selecting channel failed\n");
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun if (!udl_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
333*4882a593Smuzhiyun DRM_ERROR("udl_alloc_urb_list failed\n");
334*4882a593Smuzhiyun goto err;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun DRM_DEBUG("\n");
338*4882a593Smuzhiyun ret = udl_modeset_init(dev);
339*4882a593Smuzhiyun if (ret)
340*4882a593Smuzhiyun goto err;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun drm_kms_helper_poll_init(dev);
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun return 0;
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun err:
347*4882a593Smuzhiyun if (udl->urbs.count)
348*4882a593Smuzhiyun udl_free_urb_list(dev);
349*4882a593Smuzhiyun put_device(udl->dmadev);
350*4882a593Smuzhiyun DRM_ERROR("%d\n", ret);
351*4882a593Smuzhiyun return ret;
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
udl_drop_usb(struct drm_device * dev)354*4882a593Smuzhiyun int udl_drop_usb(struct drm_device *dev)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun struct udl_device *udl = to_udl(dev);
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun udl_free_urb_list(dev);
359*4882a593Smuzhiyun put_device(udl->dmadev);
360*4882a593Smuzhiyun udl->dmadev = NULL;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun return 0;
363*4882a593Smuzhiyun }
364