xref: /OK3568_Linux_fs/kernel/drivers/usb/host/xhci-dbgcap.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * xhci-dbgcap.c - xHCI debug capability support
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2017 Intel Corporation
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Author: Lu Baolu <baolu.lu@linux.intel.com>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun #include <linux/dma-mapping.h>
10*4882a593Smuzhiyun #include <linux/slab.h>
11*4882a593Smuzhiyun #include <linux/nls.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include "xhci.h"
14*4882a593Smuzhiyun #include "xhci-trace.h"
15*4882a593Smuzhiyun #include "xhci-dbgcap.h"
16*4882a593Smuzhiyun 
dbc_free_ctx(struct device * dev,struct xhci_container_ctx * ctx)17*4882a593Smuzhiyun static void dbc_free_ctx(struct device *dev, struct xhci_container_ctx *ctx)
18*4882a593Smuzhiyun {
19*4882a593Smuzhiyun 	if (!ctx)
20*4882a593Smuzhiyun 		return;
21*4882a593Smuzhiyun 	dma_free_coherent(dev, ctx->size, ctx->bytes, ctx->dma);
22*4882a593Smuzhiyun 	kfree(ctx);
23*4882a593Smuzhiyun }
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /* we use only one segment for DbC rings */
dbc_ring_free(struct device * dev,struct xhci_ring * ring)26*4882a593Smuzhiyun static void dbc_ring_free(struct device *dev, struct xhci_ring *ring)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun 	if (!ring)
29*4882a593Smuzhiyun 		return;
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	if (ring->first_seg && ring->first_seg->trbs) {
32*4882a593Smuzhiyun 		dma_free_coherent(dev, TRB_SEGMENT_SIZE,
33*4882a593Smuzhiyun 				  ring->first_seg->trbs,
34*4882a593Smuzhiyun 				  ring->first_seg->dma);
35*4882a593Smuzhiyun 		kfree(ring->first_seg);
36*4882a593Smuzhiyun 	}
37*4882a593Smuzhiyun 	kfree(ring);
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun 
xhci_dbc_populate_strings(struct dbc_str_descs * strings)40*4882a593Smuzhiyun static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	struct usb_string_descriptor	*s_desc;
43*4882a593Smuzhiyun 	u32				string_length;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	/* Serial string: */
46*4882a593Smuzhiyun 	s_desc = (struct usb_string_descriptor *)strings->serial;
47*4882a593Smuzhiyun 	utf8s_to_utf16s(DBC_STRING_SERIAL, strlen(DBC_STRING_SERIAL),
48*4882a593Smuzhiyun 			UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
49*4882a593Smuzhiyun 			DBC_MAX_STRING_LENGTH);
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	s_desc->bLength		= (strlen(DBC_STRING_SERIAL) + 1) * 2;
52*4882a593Smuzhiyun 	s_desc->bDescriptorType	= USB_DT_STRING;
53*4882a593Smuzhiyun 	string_length		= s_desc->bLength;
54*4882a593Smuzhiyun 	string_length		<<= 8;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	/* Product string: */
57*4882a593Smuzhiyun 	s_desc = (struct usb_string_descriptor *)strings->product;
58*4882a593Smuzhiyun 	utf8s_to_utf16s(DBC_STRING_PRODUCT, strlen(DBC_STRING_PRODUCT),
59*4882a593Smuzhiyun 			UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
60*4882a593Smuzhiyun 			DBC_MAX_STRING_LENGTH);
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	s_desc->bLength		= (strlen(DBC_STRING_PRODUCT) + 1) * 2;
63*4882a593Smuzhiyun 	s_desc->bDescriptorType	= USB_DT_STRING;
64*4882a593Smuzhiyun 	string_length		+= s_desc->bLength;
65*4882a593Smuzhiyun 	string_length		<<= 8;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	/* Manufacture string: */
68*4882a593Smuzhiyun 	s_desc = (struct usb_string_descriptor *)strings->manufacturer;
69*4882a593Smuzhiyun 	utf8s_to_utf16s(DBC_STRING_MANUFACTURER,
70*4882a593Smuzhiyun 			strlen(DBC_STRING_MANUFACTURER),
71*4882a593Smuzhiyun 			UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
72*4882a593Smuzhiyun 			DBC_MAX_STRING_LENGTH);
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	s_desc->bLength		= (strlen(DBC_STRING_MANUFACTURER) + 1) * 2;
75*4882a593Smuzhiyun 	s_desc->bDescriptorType	= USB_DT_STRING;
76*4882a593Smuzhiyun 	string_length		+= s_desc->bLength;
77*4882a593Smuzhiyun 	string_length		<<= 8;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	/* String0: */
80*4882a593Smuzhiyun 	strings->string0[0]	= 4;
81*4882a593Smuzhiyun 	strings->string0[1]	= USB_DT_STRING;
82*4882a593Smuzhiyun 	strings->string0[2]	= 0x09;
83*4882a593Smuzhiyun 	strings->string0[3]	= 0x04;
84*4882a593Smuzhiyun 	string_length		+= 4;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	return string_length;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun 
xhci_dbc_init_contexts(struct xhci_dbc * dbc,u32 string_length)89*4882a593Smuzhiyun static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	struct dbc_info_context	*info;
92*4882a593Smuzhiyun 	struct xhci_ep_ctx	*ep_ctx;
93*4882a593Smuzhiyun 	u32			dev_info;
94*4882a593Smuzhiyun 	dma_addr_t		deq, dma;
95*4882a593Smuzhiyun 	unsigned int		max_burst;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	if (!dbc)
98*4882a593Smuzhiyun 		return;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	/* Populate info Context: */
101*4882a593Smuzhiyun 	info			= (struct dbc_info_context *)dbc->ctx->bytes;
102*4882a593Smuzhiyun 	dma			= dbc->string_dma;
103*4882a593Smuzhiyun 	info->string0		= cpu_to_le64(dma);
104*4882a593Smuzhiyun 	info->manufacturer	= cpu_to_le64(dma + DBC_MAX_STRING_LENGTH);
105*4882a593Smuzhiyun 	info->product		= cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2);
106*4882a593Smuzhiyun 	info->serial		= cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
107*4882a593Smuzhiyun 	info->length		= cpu_to_le32(string_length);
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	/* Populate bulk out endpoint context: */
110*4882a593Smuzhiyun 	ep_ctx			= dbc_bulkout_ctx(dbc);
111*4882a593Smuzhiyun 	max_burst		= DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
112*4882a593Smuzhiyun 	deq			= dbc_bulkout_enq(dbc);
113*4882a593Smuzhiyun 	ep_ctx->ep_info		= 0;
114*4882a593Smuzhiyun 	ep_ctx->ep_info2	= dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
115*4882a593Smuzhiyun 	ep_ctx->deq		= cpu_to_le64(deq | dbc->ring_out->cycle_state);
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	/* Populate bulk in endpoint context: */
118*4882a593Smuzhiyun 	ep_ctx			= dbc_bulkin_ctx(dbc);
119*4882a593Smuzhiyun 	deq			= dbc_bulkin_enq(dbc);
120*4882a593Smuzhiyun 	ep_ctx->ep_info		= 0;
121*4882a593Smuzhiyun 	ep_ctx->ep_info2	= dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
122*4882a593Smuzhiyun 	ep_ctx->deq		= cpu_to_le64(deq | dbc->ring_in->cycle_state);
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	/* Set DbC context and info registers: */
125*4882a593Smuzhiyun 	lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp);
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	dev_info = cpu_to_le32((DBC_VENDOR_ID << 16) | DBC_PROTOCOL);
128*4882a593Smuzhiyun 	writel(dev_info, &dbc->regs->devinfo1);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	dev_info = cpu_to_le32((DBC_DEVICE_REV << 16) | DBC_PRODUCT_ID);
131*4882a593Smuzhiyun 	writel(dev_info, &dbc->regs->devinfo2);
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun 
xhci_dbc_giveback(struct dbc_request * req,int status)134*4882a593Smuzhiyun static void xhci_dbc_giveback(struct dbc_request *req, int status)
135*4882a593Smuzhiyun 	__releases(&dbc->lock)
136*4882a593Smuzhiyun 	__acquires(&dbc->lock)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun 	struct xhci_dbc		*dbc = req->dbc;
139*4882a593Smuzhiyun 	struct device		*dev = dbc->dev;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	list_del_init(&req->list_pending);
142*4882a593Smuzhiyun 	req->trb_dma = 0;
143*4882a593Smuzhiyun 	req->trb = NULL;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	if (req->status == -EINPROGRESS)
146*4882a593Smuzhiyun 		req->status = status;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	trace_xhci_dbc_giveback_request(req);
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	dma_unmap_single(dev,
151*4882a593Smuzhiyun 			 req->dma,
152*4882a593Smuzhiyun 			 req->length,
153*4882a593Smuzhiyun 			 dbc_ep_dma_direction(req));
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	/* Give back the transfer request: */
156*4882a593Smuzhiyun 	spin_unlock(&dbc->lock);
157*4882a593Smuzhiyun 	req->complete(dbc, req);
158*4882a593Smuzhiyun 	spin_lock(&dbc->lock);
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun 
xhci_dbc_flush_single_request(struct dbc_request * req)161*4882a593Smuzhiyun static void xhci_dbc_flush_single_request(struct dbc_request *req)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun 	union xhci_trb	*trb = req->trb;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	trb->generic.field[0]	= 0;
166*4882a593Smuzhiyun 	trb->generic.field[1]	= 0;
167*4882a593Smuzhiyun 	trb->generic.field[2]	= 0;
168*4882a593Smuzhiyun 	trb->generic.field[3]	&= cpu_to_le32(TRB_CYCLE);
169*4882a593Smuzhiyun 	trb->generic.field[3]	|= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP));
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	xhci_dbc_giveback(req, -ESHUTDOWN);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
xhci_dbc_flush_endpoint_requests(struct dbc_ep * dep)174*4882a593Smuzhiyun static void xhci_dbc_flush_endpoint_requests(struct dbc_ep *dep)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	struct dbc_request	*req, *tmp;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending)
179*4882a593Smuzhiyun 		xhci_dbc_flush_single_request(req);
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
xhci_dbc_flush_requests(struct xhci_dbc * dbc)182*4882a593Smuzhiyun static void xhci_dbc_flush_requests(struct xhci_dbc *dbc)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun 	xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]);
185*4882a593Smuzhiyun 	xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]);
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun struct dbc_request *
dbc_alloc_request(struct xhci_dbc * dbc,unsigned int direction,gfp_t flags)189*4882a593Smuzhiyun dbc_alloc_request(struct xhci_dbc *dbc, unsigned int direction, gfp_t flags)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun 	struct dbc_request	*req;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	if (direction != BULK_IN &&
194*4882a593Smuzhiyun 	    direction != BULK_OUT)
195*4882a593Smuzhiyun 		return NULL;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	if (!dbc)
198*4882a593Smuzhiyun 		return NULL;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	req = kzalloc(sizeof(*req), flags);
201*4882a593Smuzhiyun 	if (!req)
202*4882a593Smuzhiyun 		return NULL;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	req->dbc = dbc;
205*4882a593Smuzhiyun 	INIT_LIST_HEAD(&req->list_pending);
206*4882a593Smuzhiyun 	INIT_LIST_HEAD(&req->list_pool);
207*4882a593Smuzhiyun 	req->direction = direction;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	trace_xhci_dbc_alloc_request(req);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	return req;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun void
dbc_free_request(struct dbc_request * req)215*4882a593Smuzhiyun dbc_free_request(struct dbc_request *req)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun 	trace_xhci_dbc_free_request(req);
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	kfree(req);
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun static void
xhci_dbc_queue_trb(struct xhci_ring * ring,u32 field1,u32 field2,u32 field3,u32 field4)223*4882a593Smuzhiyun xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1,
224*4882a593Smuzhiyun 		   u32 field2, u32 field3, u32 field4)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun 	union xhci_trb		*trb, *next;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	trb = ring->enqueue;
229*4882a593Smuzhiyun 	trb->generic.field[0]	= cpu_to_le32(field1);
230*4882a593Smuzhiyun 	trb->generic.field[1]	= cpu_to_le32(field2);
231*4882a593Smuzhiyun 	trb->generic.field[2]	= cpu_to_le32(field3);
232*4882a593Smuzhiyun 	trb->generic.field[3]	= cpu_to_le32(field4);
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	ring->num_trbs_free--;
237*4882a593Smuzhiyun 	next = ++(ring->enqueue);
238*4882a593Smuzhiyun 	if (TRB_TYPE_LINK_LE32(next->link.control)) {
239*4882a593Smuzhiyun 		next->link.control ^= cpu_to_le32(TRB_CYCLE);
240*4882a593Smuzhiyun 		ring->enqueue = ring->enq_seg->trbs;
241*4882a593Smuzhiyun 		ring->cycle_state ^= 1;
242*4882a593Smuzhiyun 	}
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun 
xhci_dbc_queue_bulk_tx(struct dbc_ep * dep,struct dbc_request * req)245*4882a593Smuzhiyun static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep,
246*4882a593Smuzhiyun 				  struct dbc_request *req)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun 	u64			addr;
249*4882a593Smuzhiyun 	union xhci_trb		*trb;
250*4882a593Smuzhiyun 	unsigned int		num_trbs;
251*4882a593Smuzhiyun 	struct xhci_dbc		*dbc = req->dbc;
252*4882a593Smuzhiyun 	struct xhci_ring	*ring = dep->ring;
253*4882a593Smuzhiyun 	u32			length, control, cycle;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	num_trbs = count_trbs(req->dma, req->length);
256*4882a593Smuzhiyun 	WARN_ON(num_trbs != 1);
257*4882a593Smuzhiyun 	if (ring->num_trbs_free < num_trbs)
258*4882a593Smuzhiyun 		return -EBUSY;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	addr	= req->dma;
261*4882a593Smuzhiyun 	trb	= ring->enqueue;
262*4882a593Smuzhiyun 	cycle	= ring->cycle_state;
263*4882a593Smuzhiyun 	length	= TRB_LEN(req->length);
264*4882a593Smuzhiyun 	control	= TRB_TYPE(TRB_NORMAL) | TRB_IOC;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	if (cycle)
267*4882a593Smuzhiyun 		control &= cpu_to_le32(~TRB_CYCLE);
268*4882a593Smuzhiyun 	else
269*4882a593Smuzhiyun 		control |= cpu_to_le32(TRB_CYCLE);
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	req->trb = ring->enqueue;
272*4882a593Smuzhiyun 	req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
273*4882a593Smuzhiyun 	xhci_dbc_queue_trb(ring,
274*4882a593Smuzhiyun 			   lower_32_bits(addr),
275*4882a593Smuzhiyun 			   upper_32_bits(addr),
276*4882a593Smuzhiyun 			   length, control);
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	/*
279*4882a593Smuzhiyun 	 * Add a barrier between writes of trb fields and flipping
280*4882a593Smuzhiyun 	 * the cycle bit:
281*4882a593Smuzhiyun 	 */
282*4882a593Smuzhiyun 	wmb();
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	if (cycle)
285*4882a593Smuzhiyun 		trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE);
286*4882a593Smuzhiyun 	else
287*4882a593Smuzhiyun 		trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE);
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell);
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	return 0;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun static int
dbc_ep_do_queue(struct dbc_request * req)295*4882a593Smuzhiyun dbc_ep_do_queue(struct dbc_request *req)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun 	int			ret;
298*4882a593Smuzhiyun 	struct xhci_dbc		*dbc = req->dbc;
299*4882a593Smuzhiyun 	struct device		*dev = dbc->dev;
300*4882a593Smuzhiyun 	struct dbc_ep		*dep = &dbc->eps[req->direction];
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	if (!req->length || !req->buf)
303*4882a593Smuzhiyun 		return -EINVAL;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	req->actual		= 0;
306*4882a593Smuzhiyun 	req->status		= -EINPROGRESS;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	req->dma = dma_map_single(dev,
309*4882a593Smuzhiyun 				  req->buf,
310*4882a593Smuzhiyun 				  req->length,
311*4882a593Smuzhiyun 				  dbc_ep_dma_direction(dep));
312*4882a593Smuzhiyun 	if (dma_mapping_error(dev, req->dma)) {
313*4882a593Smuzhiyun 		dev_err(dbc->dev, "failed to map buffer\n");
314*4882a593Smuzhiyun 		return -EFAULT;
315*4882a593Smuzhiyun 	}
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	ret = xhci_dbc_queue_bulk_tx(dep, req);
318*4882a593Smuzhiyun 	if (ret) {
319*4882a593Smuzhiyun 		dev_err(dbc->dev, "failed to queue trbs\n");
320*4882a593Smuzhiyun 		dma_unmap_single(dev,
321*4882a593Smuzhiyun 				 req->dma,
322*4882a593Smuzhiyun 				 req->length,
323*4882a593Smuzhiyun 				 dbc_ep_dma_direction(dep));
324*4882a593Smuzhiyun 		return -EFAULT;
325*4882a593Smuzhiyun 	}
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	list_add_tail(&req->list_pending, &dep->list_pending);
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	return 0;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun 
dbc_ep_queue(struct dbc_request * req)332*4882a593Smuzhiyun int dbc_ep_queue(struct dbc_request *req)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun 	unsigned long		flags;
335*4882a593Smuzhiyun 	struct xhci_dbc		*dbc = req->dbc;
336*4882a593Smuzhiyun 	int			ret = -ESHUTDOWN;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	if (!dbc)
339*4882a593Smuzhiyun 		return -ENODEV;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	if (req->direction != BULK_IN &&
342*4882a593Smuzhiyun 	    req->direction != BULK_OUT)
343*4882a593Smuzhiyun 		return -EINVAL;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	spin_lock_irqsave(&dbc->lock, flags);
346*4882a593Smuzhiyun 	if (dbc->state == DS_CONFIGURED)
347*4882a593Smuzhiyun 		ret = dbc_ep_do_queue(req);
348*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dbc->lock, flags);
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	mod_delayed_work(system_wq, &dbc->event_work, 0);
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	trace_xhci_dbc_queue_request(req);
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	return ret;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun 
xhci_dbc_do_eps_init(struct xhci_dbc * dbc,bool direction)357*4882a593Smuzhiyun static inline void xhci_dbc_do_eps_init(struct xhci_dbc *dbc, bool direction)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun 	struct dbc_ep		*dep;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	dep			= &dbc->eps[direction];
362*4882a593Smuzhiyun 	dep->dbc		= dbc;
363*4882a593Smuzhiyun 	dep->direction		= direction;
364*4882a593Smuzhiyun 	dep->ring		= direction ? dbc->ring_in : dbc->ring_out;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	INIT_LIST_HEAD(&dep->list_pending);
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun 
xhci_dbc_eps_init(struct xhci_dbc * dbc)369*4882a593Smuzhiyun static void xhci_dbc_eps_init(struct xhci_dbc *dbc)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun 	xhci_dbc_do_eps_init(dbc, BULK_OUT);
372*4882a593Smuzhiyun 	xhci_dbc_do_eps_init(dbc, BULK_IN);
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun 
xhci_dbc_eps_exit(struct xhci_dbc * dbc)375*4882a593Smuzhiyun static void xhci_dbc_eps_exit(struct xhci_dbc *dbc)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun 	memset(dbc->eps, 0, sizeof(struct dbc_ep) * ARRAY_SIZE(dbc->eps));
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun 
dbc_erst_alloc(struct device * dev,struct xhci_ring * evt_ring,struct xhci_erst * erst,gfp_t flags)380*4882a593Smuzhiyun static int dbc_erst_alloc(struct device *dev, struct xhci_ring *evt_ring,
381*4882a593Smuzhiyun 		    struct xhci_erst *erst, gfp_t flags)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun 	erst->entries = dma_alloc_coherent(dev, sizeof(struct xhci_erst_entry),
384*4882a593Smuzhiyun 					   &erst->erst_dma_addr, flags);
385*4882a593Smuzhiyun 	if (!erst->entries)
386*4882a593Smuzhiyun 		return -ENOMEM;
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	erst->num_entries = 1;
389*4882a593Smuzhiyun 	erst->entries[0].seg_addr = cpu_to_le64(evt_ring->first_seg->dma);
390*4882a593Smuzhiyun 	erst->entries[0].seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
391*4882a593Smuzhiyun 	erst->entries[0].rsvd = 0;
392*4882a593Smuzhiyun 	return 0;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun 
dbc_erst_free(struct device * dev,struct xhci_erst * erst)395*4882a593Smuzhiyun static void dbc_erst_free(struct device *dev, struct xhci_erst *erst)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun 	if (erst->entries)
398*4882a593Smuzhiyun 		dma_free_coherent(dev, sizeof(struct xhci_erst_entry),
399*4882a593Smuzhiyun 				  erst->entries, erst->erst_dma_addr);
400*4882a593Smuzhiyun 	erst->entries = NULL;
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun static struct xhci_container_ctx *
dbc_alloc_ctx(struct device * dev,gfp_t flags)404*4882a593Smuzhiyun dbc_alloc_ctx(struct device *dev, gfp_t flags)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun 	struct xhci_container_ctx *ctx;
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	ctx = kzalloc(sizeof(*ctx), flags);
409*4882a593Smuzhiyun 	if (!ctx)
410*4882a593Smuzhiyun 		return NULL;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	/* xhci 7.6.9, all three contexts; info, ep-out and ep-in. Each 64 bytes*/
413*4882a593Smuzhiyun 	ctx->size = 3 * DBC_CONTEXT_SIZE;
414*4882a593Smuzhiyun 	ctx->bytes = dma_alloc_coherent(dev, ctx->size, &ctx->dma, flags);
415*4882a593Smuzhiyun 	if (!ctx->bytes) {
416*4882a593Smuzhiyun 		kfree(ctx);
417*4882a593Smuzhiyun 		return NULL;
418*4882a593Smuzhiyun 	}
419*4882a593Smuzhiyun 	return ctx;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun static struct xhci_ring *
xhci_dbc_ring_alloc(struct device * dev,enum xhci_ring_type type,gfp_t flags)423*4882a593Smuzhiyun xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun 	struct xhci_ring *ring;
426*4882a593Smuzhiyun 	struct xhci_segment *seg;
427*4882a593Smuzhiyun 	dma_addr_t dma;
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	ring = kzalloc(sizeof(*ring), flags);
430*4882a593Smuzhiyun 	if (!ring)
431*4882a593Smuzhiyun 		return NULL;
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	ring->num_segs = 1;
434*4882a593Smuzhiyun 	ring->type = type;
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	seg = kzalloc(sizeof(*seg), flags);
437*4882a593Smuzhiyun 	if (!seg)
438*4882a593Smuzhiyun 		goto seg_fail;
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	ring->first_seg = seg;
441*4882a593Smuzhiyun 	ring->last_seg = seg;
442*4882a593Smuzhiyun 	seg->next = seg;
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	seg->trbs = dma_alloc_coherent(dev, TRB_SEGMENT_SIZE, &dma, flags);
445*4882a593Smuzhiyun 	if (!seg->trbs)
446*4882a593Smuzhiyun 		goto dma_fail;
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	seg->dma = dma;
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	/* Only event ring does not use link TRB */
451*4882a593Smuzhiyun 	if (type != TYPE_EVENT) {
452*4882a593Smuzhiyun 		union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 		trb->link.segment_ptr = cpu_to_le64(dma);
455*4882a593Smuzhiyun 		trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
456*4882a593Smuzhiyun 	}
457*4882a593Smuzhiyun 	INIT_LIST_HEAD(&ring->td_list);
458*4882a593Smuzhiyun 	xhci_initialize_ring_info(ring, 1);
459*4882a593Smuzhiyun 	return ring;
460*4882a593Smuzhiyun dma_fail:
461*4882a593Smuzhiyun 	kfree(seg);
462*4882a593Smuzhiyun seg_fail:
463*4882a593Smuzhiyun 	kfree(ring);
464*4882a593Smuzhiyun 	return NULL;
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun 
xhci_dbc_mem_init(struct xhci_dbc * dbc,gfp_t flags)467*4882a593Smuzhiyun static int xhci_dbc_mem_init(struct xhci_dbc *dbc, gfp_t flags)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun 	int			ret;
470*4882a593Smuzhiyun 	dma_addr_t		deq;
471*4882a593Smuzhiyun 	u32			string_length;
472*4882a593Smuzhiyun 	struct device		*dev = dbc->dev;
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	/* Allocate various rings for events and transfers: */
475*4882a593Smuzhiyun 	dbc->ring_evt = xhci_dbc_ring_alloc(dev, TYPE_EVENT, flags);
476*4882a593Smuzhiyun 	if (!dbc->ring_evt)
477*4882a593Smuzhiyun 		goto evt_fail;
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	dbc->ring_in = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags);
480*4882a593Smuzhiyun 	if (!dbc->ring_in)
481*4882a593Smuzhiyun 		goto in_fail;
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	dbc->ring_out = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags);
484*4882a593Smuzhiyun 	if (!dbc->ring_out)
485*4882a593Smuzhiyun 		goto out_fail;
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	/* Allocate and populate ERST: */
488*4882a593Smuzhiyun 	ret = dbc_erst_alloc(dev, dbc->ring_evt, &dbc->erst, flags);
489*4882a593Smuzhiyun 	if (ret)
490*4882a593Smuzhiyun 		goto erst_fail;
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	/* Allocate context data structure: */
493*4882a593Smuzhiyun 	dbc->ctx = dbc_alloc_ctx(dev, flags); /* was sysdev, and is still */
494*4882a593Smuzhiyun 	if (!dbc->ctx)
495*4882a593Smuzhiyun 		goto ctx_fail;
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	/* Allocate the string table: */
498*4882a593Smuzhiyun 	dbc->string_size = sizeof(struct dbc_str_descs);
499*4882a593Smuzhiyun 	dbc->string = dma_alloc_coherent(dev, dbc->string_size,
500*4882a593Smuzhiyun 					 &dbc->string_dma, flags);
501*4882a593Smuzhiyun 	if (!dbc->string)
502*4882a593Smuzhiyun 		goto string_fail;
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	/* Setup ERST register: */
505*4882a593Smuzhiyun 	writel(dbc->erst.erst_size, &dbc->regs->ersts);
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	lo_hi_writeq(dbc->erst.erst_dma_addr, &dbc->regs->erstba);
508*4882a593Smuzhiyun 	deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
509*4882a593Smuzhiyun 				   dbc->ring_evt->dequeue);
510*4882a593Smuzhiyun 	lo_hi_writeq(deq, &dbc->regs->erdp);
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	/* Setup strings and contexts: */
513*4882a593Smuzhiyun 	string_length = xhci_dbc_populate_strings(dbc->string);
514*4882a593Smuzhiyun 	xhci_dbc_init_contexts(dbc, string_length);
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	xhci_dbc_eps_init(dbc);
517*4882a593Smuzhiyun 	dbc->state = DS_INITIALIZED;
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	return 0;
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun string_fail:
522*4882a593Smuzhiyun 	dbc_free_ctx(dev, dbc->ctx);
523*4882a593Smuzhiyun 	dbc->ctx = NULL;
524*4882a593Smuzhiyun ctx_fail:
525*4882a593Smuzhiyun 	dbc_erst_free(dev, &dbc->erst);
526*4882a593Smuzhiyun erst_fail:
527*4882a593Smuzhiyun 	dbc_ring_free(dev, dbc->ring_out);
528*4882a593Smuzhiyun 	dbc->ring_out = NULL;
529*4882a593Smuzhiyun out_fail:
530*4882a593Smuzhiyun 	dbc_ring_free(dev, dbc->ring_in);
531*4882a593Smuzhiyun 	dbc->ring_in = NULL;
532*4882a593Smuzhiyun in_fail:
533*4882a593Smuzhiyun 	dbc_ring_free(dev, dbc->ring_evt);
534*4882a593Smuzhiyun 	dbc->ring_evt = NULL;
535*4882a593Smuzhiyun evt_fail:
536*4882a593Smuzhiyun 	return -ENOMEM;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun 
xhci_dbc_mem_cleanup(struct xhci_dbc * dbc)539*4882a593Smuzhiyun static void xhci_dbc_mem_cleanup(struct xhci_dbc *dbc)
540*4882a593Smuzhiyun {
541*4882a593Smuzhiyun 	if (!dbc)
542*4882a593Smuzhiyun 		return;
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	xhci_dbc_eps_exit(dbc);
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	if (dbc->string) {
547*4882a593Smuzhiyun 		dma_free_coherent(dbc->dev, dbc->string_size,
548*4882a593Smuzhiyun 				  dbc->string, dbc->string_dma);
549*4882a593Smuzhiyun 		dbc->string = NULL;
550*4882a593Smuzhiyun 	}
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	dbc_free_ctx(dbc->dev, dbc->ctx);
553*4882a593Smuzhiyun 	dbc->ctx = NULL;
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	dbc_erst_free(dbc->dev, &dbc->erst);
556*4882a593Smuzhiyun 	dbc_ring_free(dbc->dev, dbc->ring_out);
557*4882a593Smuzhiyun 	dbc_ring_free(dbc->dev, dbc->ring_in);
558*4882a593Smuzhiyun 	dbc_ring_free(dbc->dev, dbc->ring_evt);
559*4882a593Smuzhiyun 	dbc->ring_in = NULL;
560*4882a593Smuzhiyun 	dbc->ring_out = NULL;
561*4882a593Smuzhiyun 	dbc->ring_evt = NULL;
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun 
xhci_do_dbc_start(struct xhci_dbc * dbc)564*4882a593Smuzhiyun static int xhci_do_dbc_start(struct xhci_dbc *dbc)
565*4882a593Smuzhiyun {
566*4882a593Smuzhiyun 	int			ret;
567*4882a593Smuzhiyun 	u32			ctrl;
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	if (dbc->state != DS_DISABLED)
570*4882a593Smuzhiyun 		return -EINVAL;
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	writel(0, &dbc->regs->control);
573*4882a593Smuzhiyun 	ret = xhci_handshake(&dbc->regs->control,
574*4882a593Smuzhiyun 			     DBC_CTRL_DBC_ENABLE,
575*4882a593Smuzhiyun 			     0, 1000);
576*4882a593Smuzhiyun 	if (ret)
577*4882a593Smuzhiyun 		return ret;
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	ret = xhci_dbc_mem_init(dbc, GFP_ATOMIC);
580*4882a593Smuzhiyun 	if (ret)
581*4882a593Smuzhiyun 		return ret;
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	ctrl = readl(&dbc->regs->control);
584*4882a593Smuzhiyun 	writel(ctrl | DBC_CTRL_DBC_ENABLE | DBC_CTRL_PORT_ENABLE,
585*4882a593Smuzhiyun 	       &dbc->regs->control);
586*4882a593Smuzhiyun 	ret = xhci_handshake(&dbc->regs->control,
587*4882a593Smuzhiyun 			     DBC_CTRL_DBC_ENABLE,
588*4882a593Smuzhiyun 			     DBC_CTRL_DBC_ENABLE, 1000);
589*4882a593Smuzhiyun 	if (ret)
590*4882a593Smuzhiyun 		return ret;
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	dbc->state = DS_ENABLED;
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	return 0;
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun 
xhci_do_dbc_stop(struct xhci_dbc * dbc)597*4882a593Smuzhiyun static int xhci_do_dbc_stop(struct xhci_dbc *dbc)
598*4882a593Smuzhiyun {
599*4882a593Smuzhiyun 	if (dbc->state == DS_DISABLED)
600*4882a593Smuzhiyun 		return -1;
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	writel(0, &dbc->regs->control);
603*4882a593Smuzhiyun 	dbc->state = DS_DISABLED;
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	return 0;
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun 
xhci_dbc_start(struct xhci_dbc * dbc)608*4882a593Smuzhiyun static int xhci_dbc_start(struct xhci_dbc *dbc)
609*4882a593Smuzhiyun {
610*4882a593Smuzhiyun 	int			ret;
611*4882a593Smuzhiyun 	unsigned long		flags;
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	WARN_ON(!dbc);
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	pm_runtime_get_sync(dbc->dev); /* note this was self.controller */
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	spin_lock_irqsave(&dbc->lock, flags);
618*4882a593Smuzhiyun 	ret = xhci_do_dbc_start(dbc);
619*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dbc->lock, flags);
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	if (ret) {
622*4882a593Smuzhiyun 		pm_runtime_put(dbc->dev); /* note this was self.controller */
623*4882a593Smuzhiyun 		return ret;
624*4882a593Smuzhiyun 	}
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 	return mod_delayed_work(system_wq, &dbc->event_work, 1);
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun 
xhci_dbc_stop(struct xhci_dbc * dbc)629*4882a593Smuzhiyun static void xhci_dbc_stop(struct xhci_dbc *dbc)
630*4882a593Smuzhiyun {
631*4882a593Smuzhiyun 	int ret;
632*4882a593Smuzhiyun 	unsigned long		flags;
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	WARN_ON(!dbc);
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	switch (dbc->state) {
637*4882a593Smuzhiyun 	case DS_DISABLED:
638*4882a593Smuzhiyun 		return;
639*4882a593Smuzhiyun 	case DS_CONFIGURED:
640*4882a593Smuzhiyun 	case DS_STALLED:
641*4882a593Smuzhiyun 		if (dbc->driver->disconnect)
642*4882a593Smuzhiyun 			dbc->driver->disconnect(dbc);
643*4882a593Smuzhiyun 		break;
644*4882a593Smuzhiyun 	default:
645*4882a593Smuzhiyun 		break;
646*4882a593Smuzhiyun 	}
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 	cancel_delayed_work_sync(&dbc->event_work);
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	spin_lock_irqsave(&dbc->lock, flags);
651*4882a593Smuzhiyun 	ret = xhci_do_dbc_stop(dbc);
652*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dbc->lock, flags);
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	if (!ret) {
655*4882a593Smuzhiyun 		xhci_dbc_mem_cleanup(dbc);
656*4882a593Smuzhiyun 		pm_runtime_put_sync(dbc->dev); /* note, was self.controller */
657*4882a593Smuzhiyun 	}
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun static void
dbc_handle_port_status(struct xhci_dbc * dbc,union xhci_trb * event)661*4882a593Smuzhiyun dbc_handle_port_status(struct xhci_dbc *dbc, union xhci_trb *event)
662*4882a593Smuzhiyun {
663*4882a593Smuzhiyun 	u32			portsc;
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 	portsc = readl(&dbc->regs->portsc);
666*4882a593Smuzhiyun 	if (portsc & DBC_PORTSC_CONN_CHANGE)
667*4882a593Smuzhiyun 		dev_info(dbc->dev, "DbC port connect change\n");
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	if (portsc & DBC_PORTSC_RESET_CHANGE)
670*4882a593Smuzhiyun 		dev_info(dbc->dev, "DbC port reset change\n");
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	if (portsc & DBC_PORTSC_LINK_CHANGE)
673*4882a593Smuzhiyun 		dev_info(dbc->dev, "DbC port link status change\n");
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	if (portsc & DBC_PORTSC_CONFIG_CHANGE)
676*4882a593Smuzhiyun 		dev_info(dbc->dev, "DbC config error change\n");
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	/* Port reset change bit will be cleared in other place: */
679*4882a593Smuzhiyun 	writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc);
680*4882a593Smuzhiyun }
681*4882a593Smuzhiyun 
dbc_handle_xfer_event(struct xhci_dbc * dbc,union xhci_trb * event)682*4882a593Smuzhiyun static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event)
683*4882a593Smuzhiyun {
684*4882a593Smuzhiyun 	struct dbc_ep		*dep;
685*4882a593Smuzhiyun 	struct xhci_ring	*ring;
686*4882a593Smuzhiyun 	int			ep_id;
687*4882a593Smuzhiyun 	int			status;
688*4882a593Smuzhiyun 	u32			comp_code;
689*4882a593Smuzhiyun 	size_t			remain_length;
690*4882a593Smuzhiyun 	struct dbc_request	*req = NULL, *r;
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	comp_code	= GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
693*4882a593Smuzhiyun 	remain_length	= EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2]));
694*4882a593Smuzhiyun 	ep_id		= TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3]));
695*4882a593Smuzhiyun 	dep		= (ep_id == EPID_OUT) ?
696*4882a593Smuzhiyun 				get_out_ep(dbc) : get_in_ep(dbc);
697*4882a593Smuzhiyun 	ring		= dep->ring;
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	switch (comp_code) {
700*4882a593Smuzhiyun 	case COMP_SUCCESS:
701*4882a593Smuzhiyun 		remain_length = 0;
702*4882a593Smuzhiyun 		fallthrough;
703*4882a593Smuzhiyun 	case COMP_SHORT_PACKET:
704*4882a593Smuzhiyun 		status = 0;
705*4882a593Smuzhiyun 		break;
706*4882a593Smuzhiyun 	case COMP_TRB_ERROR:
707*4882a593Smuzhiyun 	case COMP_BABBLE_DETECTED_ERROR:
708*4882a593Smuzhiyun 	case COMP_USB_TRANSACTION_ERROR:
709*4882a593Smuzhiyun 	case COMP_STALL_ERROR:
710*4882a593Smuzhiyun 		dev_warn(dbc->dev, "tx error %d detected\n", comp_code);
711*4882a593Smuzhiyun 		status = -comp_code;
712*4882a593Smuzhiyun 		break;
713*4882a593Smuzhiyun 	default:
714*4882a593Smuzhiyun 		dev_err(dbc->dev, "unknown tx error %d\n", comp_code);
715*4882a593Smuzhiyun 		status = -comp_code;
716*4882a593Smuzhiyun 		break;
717*4882a593Smuzhiyun 	}
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun 	/* Match the pending request: */
720*4882a593Smuzhiyun 	list_for_each_entry(r, &dep->list_pending, list_pending) {
721*4882a593Smuzhiyun 		if (r->trb_dma == event->trans_event.buffer) {
722*4882a593Smuzhiyun 			req = r;
723*4882a593Smuzhiyun 			break;
724*4882a593Smuzhiyun 		}
725*4882a593Smuzhiyun 	}
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 	if (!req) {
728*4882a593Smuzhiyun 		dev_warn(dbc->dev, "no matched request\n");
729*4882a593Smuzhiyun 		return;
730*4882a593Smuzhiyun 	}
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun 	trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	ring->num_trbs_free++;
735*4882a593Smuzhiyun 	req->actual = req->length - remain_length;
736*4882a593Smuzhiyun 	xhci_dbc_giveback(req, status);
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun 
inc_evt_deq(struct xhci_ring * ring)739*4882a593Smuzhiyun static void inc_evt_deq(struct xhci_ring *ring)
740*4882a593Smuzhiyun {
741*4882a593Smuzhiyun 	/* If on the last TRB of the segment go back to the beginning */
742*4882a593Smuzhiyun 	if (ring->dequeue == &ring->deq_seg->trbs[TRBS_PER_SEGMENT - 1]) {
743*4882a593Smuzhiyun 		ring->cycle_state ^= 1;
744*4882a593Smuzhiyun 		ring->dequeue = ring->deq_seg->trbs;
745*4882a593Smuzhiyun 		return;
746*4882a593Smuzhiyun 	}
747*4882a593Smuzhiyun 	ring->dequeue++;
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun 
xhci_dbc_do_handle_events(struct xhci_dbc * dbc)750*4882a593Smuzhiyun static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
751*4882a593Smuzhiyun {
752*4882a593Smuzhiyun 	dma_addr_t		deq;
753*4882a593Smuzhiyun 	struct dbc_ep		*dep;
754*4882a593Smuzhiyun 	union xhci_trb		*evt;
755*4882a593Smuzhiyun 	u32			ctrl, portsc;
756*4882a593Smuzhiyun 	bool			update_erdp = false;
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	/* DbC state machine: */
759*4882a593Smuzhiyun 	switch (dbc->state) {
760*4882a593Smuzhiyun 	case DS_DISABLED:
761*4882a593Smuzhiyun 	case DS_INITIALIZED:
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 		return EVT_ERR;
764*4882a593Smuzhiyun 	case DS_ENABLED:
765*4882a593Smuzhiyun 		portsc = readl(&dbc->regs->portsc);
766*4882a593Smuzhiyun 		if (portsc & DBC_PORTSC_CONN_STATUS) {
767*4882a593Smuzhiyun 			dbc->state = DS_CONNECTED;
768*4882a593Smuzhiyun 			dev_info(dbc->dev, "DbC connected\n");
769*4882a593Smuzhiyun 		}
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 		return EVT_DONE;
772*4882a593Smuzhiyun 	case DS_CONNECTED:
773*4882a593Smuzhiyun 		ctrl = readl(&dbc->regs->control);
774*4882a593Smuzhiyun 		if (ctrl & DBC_CTRL_DBC_RUN) {
775*4882a593Smuzhiyun 			dbc->state = DS_CONFIGURED;
776*4882a593Smuzhiyun 			dev_info(dbc->dev, "DbC configured\n");
777*4882a593Smuzhiyun 			portsc = readl(&dbc->regs->portsc);
778*4882a593Smuzhiyun 			writel(portsc, &dbc->regs->portsc);
779*4882a593Smuzhiyun 			return EVT_GSER;
780*4882a593Smuzhiyun 		}
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 		return EVT_DONE;
783*4882a593Smuzhiyun 	case DS_CONFIGURED:
784*4882a593Smuzhiyun 		/* Handle cable unplug event: */
785*4882a593Smuzhiyun 		portsc = readl(&dbc->regs->portsc);
786*4882a593Smuzhiyun 		if (!(portsc & DBC_PORTSC_PORT_ENABLED) &&
787*4882a593Smuzhiyun 		    !(portsc & DBC_PORTSC_CONN_STATUS)) {
788*4882a593Smuzhiyun 			dev_info(dbc->dev, "DbC cable unplugged\n");
789*4882a593Smuzhiyun 			dbc->state = DS_ENABLED;
790*4882a593Smuzhiyun 			xhci_dbc_flush_requests(dbc);
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun 			return EVT_DISC;
793*4882a593Smuzhiyun 		}
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 		/* Handle debug port reset event: */
796*4882a593Smuzhiyun 		if (portsc & DBC_PORTSC_RESET_CHANGE) {
797*4882a593Smuzhiyun 			dev_info(dbc->dev, "DbC port reset\n");
798*4882a593Smuzhiyun 			writel(portsc, &dbc->regs->portsc);
799*4882a593Smuzhiyun 			dbc->state = DS_ENABLED;
800*4882a593Smuzhiyun 			xhci_dbc_flush_requests(dbc);
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun 			return EVT_DISC;
803*4882a593Smuzhiyun 		}
804*4882a593Smuzhiyun 
805*4882a593Smuzhiyun 		/* Handle endpoint stall event: */
806*4882a593Smuzhiyun 		ctrl = readl(&dbc->regs->control);
807*4882a593Smuzhiyun 		if ((ctrl & DBC_CTRL_HALT_IN_TR) ||
808*4882a593Smuzhiyun 		    (ctrl & DBC_CTRL_HALT_OUT_TR)) {
809*4882a593Smuzhiyun 			dev_info(dbc->dev, "DbC Endpoint stall\n");
810*4882a593Smuzhiyun 			dbc->state = DS_STALLED;
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 			if (ctrl & DBC_CTRL_HALT_IN_TR) {
813*4882a593Smuzhiyun 				dep = get_in_ep(dbc);
814*4882a593Smuzhiyun 				xhci_dbc_flush_endpoint_requests(dep);
815*4882a593Smuzhiyun 			}
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 			if (ctrl & DBC_CTRL_HALT_OUT_TR) {
818*4882a593Smuzhiyun 				dep = get_out_ep(dbc);
819*4882a593Smuzhiyun 				xhci_dbc_flush_endpoint_requests(dep);
820*4882a593Smuzhiyun 			}
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 			return EVT_DONE;
823*4882a593Smuzhiyun 		}
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 		/* Clear DbC run change bit: */
826*4882a593Smuzhiyun 		if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) {
827*4882a593Smuzhiyun 			writel(ctrl, &dbc->regs->control);
828*4882a593Smuzhiyun 			ctrl = readl(&dbc->regs->control);
829*4882a593Smuzhiyun 		}
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 		break;
832*4882a593Smuzhiyun 	case DS_STALLED:
833*4882a593Smuzhiyun 		ctrl = readl(&dbc->regs->control);
834*4882a593Smuzhiyun 		if (!(ctrl & DBC_CTRL_HALT_IN_TR) &&
835*4882a593Smuzhiyun 		    !(ctrl & DBC_CTRL_HALT_OUT_TR) &&
836*4882a593Smuzhiyun 		    (ctrl & DBC_CTRL_DBC_RUN)) {
837*4882a593Smuzhiyun 			dbc->state = DS_CONFIGURED;
838*4882a593Smuzhiyun 			break;
839*4882a593Smuzhiyun 		}
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 		return EVT_DONE;
842*4882a593Smuzhiyun 	default:
843*4882a593Smuzhiyun 		dev_err(dbc->dev, "Unknown DbC state %d\n", dbc->state);
844*4882a593Smuzhiyun 		break;
845*4882a593Smuzhiyun 	}
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 	/* Handle the events in the event ring: */
848*4882a593Smuzhiyun 	evt = dbc->ring_evt->dequeue;
849*4882a593Smuzhiyun 	while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) ==
850*4882a593Smuzhiyun 			dbc->ring_evt->cycle_state) {
851*4882a593Smuzhiyun 		/*
852*4882a593Smuzhiyun 		 * Add a barrier between reading the cycle flag and any
853*4882a593Smuzhiyun 		 * reads of the event's flags/data below:
854*4882a593Smuzhiyun 		 */
855*4882a593Smuzhiyun 		rmb();
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 		trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic);
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 		switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) {
860*4882a593Smuzhiyun 		case TRB_TYPE(TRB_PORT_STATUS):
861*4882a593Smuzhiyun 			dbc_handle_port_status(dbc, evt);
862*4882a593Smuzhiyun 			break;
863*4882a593Smuzhiyun 		case TRB_TYPE(TRB_TRANSFER):
864*4882a593Smuzhiyun 			dbc_handle_xfer_event(dbc, evt);
865*4882a593Smuzhiyun 			break;
866*4882a593Smuzhiyun 		default:
867*4882a593Smuzhiyun 			break;
868*4882a593Smuzhiyun 		}
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 		inc_evt_deq(dbc->ring_evt);
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 		evt = dbc->ring_evt->dequeue;
873*4882a593Smuzhiyun 		update_erdp = true;
874*4882a593Smuzhiyun 	}
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 	/* Update event ring dequeue pointer: */
877*4882a593Smuzhiyun 	if (update_erdp) {
878*4882a593Smuzhiyun 		deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
879*4882a593Smuzhiyun 					   dbc->ring_evt->dequeue);
880*4882a593Smuzhiyun 		lo_hi_writeq(deq, &dbc->regs->erdp);
881*4882a593Smuzhiyun 	}
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 	return EVT_DONE;
884*4882a593Smuzhiyun }
885*4882a593Smuzhiyun 
xhci_dbc_handle_events(struct work_struct * work)886*4882a593Smuzhiyun static void xhci_dbc_handle_events(struct work_struct *work)
887*4882a593Smuzhiyun {
888*4882a593Smuzhiyun 	enum evtreturn		evtr;
889*4882a593Smuzhiyun 	struct xhci_dbc		*dbc;
890*4882a593Smuzhiyun 	unsigned long		flags;
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun 	spin_lock_irqsave(&dbc->lock, flags);
895*4882a593Smuzhiyun 	evtr = xhci_dbc_do_handle_events(dbc);
896*4882a593Smuzhiyun 	spin_unlock_irqrestore(&dbc->lock, flags);
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 	switch (evtr) {
899*4882a593Smuzhiyun 	case EVT_GSER:
900*4882a593Smuzhiyun 		if (dbc->driver->configure)
901*4882a593Smuzhiyun 			dbc->driver->configure(dbc);
902*4882a593Smuzhiyun 		break;
903*4882a593Smuzhiyun 	case EVT_DISC:
904*4882a593Smuzhiyun 		if (dbc->driver->disconnect)
905*4882a593Smuzhiyun 			dbc->driver->disconnect(dbc);
906*4882a593Smuzhiyun 		break;
907*4882a593Smuzhiyun 	case EVT_DONE:
908*4882a593Smuzhiyun 		break;
909*4882a593Smuzhiyun 	default:
910*4882a593Smuzhiyun 		dev_info(dbc->dev, "stop handling dbc events\n");
911*4882a593Smuzhiyun 		return;
912*4882a593Smuzhiyun 	}
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun 	mod_delayed_work(system_wq, &dbc->event_work, 1);
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun 
xhci_do_dbc_exit(struct xhci_hcd * xhci)917*4882a593Smuzhiyun static void xhci_do_dbc_exit(struct xhci_hcd *xhci)
918*4882a593Smuzhiyun {
919*4882a593Smuzhiyun 	unsigned long		flags;
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 	spin_lock_irqsave(&xhci->lock, flags);
922*4882a593Smuzhiyun 	kfree(xhci->dbc);
923*4882a593Smuzhiyun 	xhci->dbc = NULL;
924*4882a593Smuzhiyun 	spin_unlock_irqrestore(&xhci->lock, flags);
925*4882a593Smuzhiyun }
926*4882a593Smuzhiyun 
xhci_do_dbc_init(struct xhci_hcd * xhci)927*4882a593Smuzhiyun static int xhci_do_dbc_init(struct xhci_hcd *xhci)
928*4882a593Smuzhiyun {
929*4882a593Smuzhiyun 	u32			reg;
930*4882a593Smuzhiyun 	struct xhci_dbc		*dbc;
931*4882a593Smuzhiyun 	unsigned long		flags;
932*4882a593Smuzhiyun 	void __iomem		*base;
933*4882a593Smuzhiyun 	int			dbc_cap_offs;
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 	base = &xhci->cap_regs->hc_capbase;
936*4882a593Smuzhiyun 	dbc_cap_offs = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_DEBUG);
937*4882a593Smuzhiyun 	if (!dbc_cap_offs)
938*4882a593Smuzhiyun 		return -ENODEV;
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun 	dbc = kzalloc(sizeof(*dbc), GFP_KERNEL);
941*4882a593Smuzhiyun 	if (!dbc)
942*4882a593Smuzhiyun 		return -ENOMEM;
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 	dbc->regs = base + dbc_cap_offs;
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 	/* We will avoid using DbC in xhci driver if it's in use. */
947*4882a593Smuzhiyun 	reg = readl(&dbc->regs->control);
948*4882a593Smuzhiyun 	if (reg & DBC_CTRL_DBC_ENABLE) {
949*4882a593Smuzhiyun 		kfree(dbc);
950*4882a593Smuzhiyun 		return -EBUSY;
951*4882a593Smuzhiyun 	}
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	spin_lock_irqsave(&xhci->lock, flags);
954*4882a593Smuzhiyun 	if (xhci->dbc) {
955*4882a593Smuzhiyun 		spin_unlock_irqrestore(&xhci->lock, flags);
956*4882a593Smuzhiyun 		kfree(dbc);
957*4882a593Smuzhiyun 		return -EBUSY;
958*4882a593Smuzhiyun 	}
959*4882a593Smuzhiyun 	xhci->dbc = dbc;
960*4882a593Smuzhiyun 	spin_unlock_irqrestore(&xhci->lock, flags);
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 	dbc->xhci = xhci;
963*4882a593Smuzhiyun 	dbc->dev = xhci_to_hcd(xhci)->self.sysdev;
964*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events);
965*4882a593Smuzhiyun 	spin_lock_init(&dbc->lock);
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 	return 0;
968*4882a593Smuzhiyun }
969*4882a593Smuzhiyun 
dbc_show(struct device * dev,struct device_attribute * attr,char * buf)970*4882a593Smuzhiyun static ssize_t dbc_show(struct device *dev,
971*4882a593Smuzhiyun 			struct device_attribute *attr,
972*4882a593Smuzhiyun 			char *buf)
973*4882a593Smuzhiyun {
974*4882a593Smuzhiyun 	const char		*p;
975*4882a593Smuzhiyun 	struct xhci_dbc		*dbc;
976*4882a593Smuzhiyun 	struct xhci_hcd		*xhci;
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 	xhci = hcd_to_xhci(dev_get_drvdata(dev));
979*4882a593Smuzhiyun 	dbc = xhci->dbc;
980*4882a593Smuzhiyun 
981*4882a593Smuzhiyun 	switch (dbc->state) {
982*4882a593Smuzhiyun 	case DS_DISABLED:
983*4882a593Smuzhiyun 		p = "disabled";
984*4882a593Smuzhiyun 		break;
985*4882a593Smuzhiyun 	case DS_INITIALIZED:
986*4882a593Smuzhiyun 		p = "initialized";
987*4882a593Smuzhiyun 		break;
988*4882a593Smuzhiyun 	case DS_ENABLED:
989*4882a593Smuzhiyun 		p = "enabled";
990*4882a593Smuzhiyun 		break;
991*4882a593Smuzhiyun 	case DS_CONNECTED:
992*4882a593Smuzhiyun 		p = "connected";
993*4882a593Smuzhiyun 		break;
994*4882a593Smuzhiyun 	case DS_CONFIGURED:
995*4882a593Smuzhiyun 		p = "configured";
996*4882a593Smuzhiyun 		break;
997*4882a593Smuzhiyun 	case DS_STALLED:
998*4882a593Smuzhiyun 		p = "stalled";
999*4882a593Smuzhiyun 		break;
1000*4882a593Smuzhiyun 	default:
1001*4882a593Smuzhiyun 		p = "unknown";
1002*4882a593Smuzhiyun 	}
1003*4882a593Smuzhiyun 
1004*4882a593Smuzhiyun 	return sprintf(buf, "%s\n", p);
1005*4882a593Smuzhiyun }
1006*4882a593Smuzhiyun 
dbc_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1007*4882a593Smuzhiyun static ssize_t dbc_store(struct device *dev,
1008*4882a593Smuzhiyun 			 struct device_attribute *attr,
1009*4882a593Smuzhiyun 			 const char *buf, size_t count)
1010*4882a593Smuzhiyun {
1011*4882a593Smuzhiyun 	struct xhci_hcd		*xhci;
1012*4882a593Smuzhiyun 	struct xhci_dbc		*dbc;
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 	xhci = hcd_to_xhci(dev_get_drvdata(dev));
1015*4882a593Smuzhiyun 	dbc = xhci->dbc;
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 	if (!strncmp(buf, "enable", 6))
1018*4882a593Smuzhiyun 		xhci_dbc_start(dbc);
1019*4882a593Smuzhiyun 	else if (!strncmp(buf, "disable", 7))
1020*4882a593Smuzhiyun 		xhci_dbc_stop(dbc);
1021*4882a593Smuzhiyun 	else
1022*4882a593Smuzhiyun 		return -EINVAL;
1023*4882a593Smuzhiyun 
1024*4882a593Smuzhiyun 	return count;
1025*4882a593Smuzhiyun }
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun static DEVICE_ATTR_RW(dbc);
1028*4882a593Smuzhiyun 
xhci_dbc_init(struct xhci_hcd * xhci)1029*4882a593Smuzhiyun int xhci_dbc_init(struct xhci_hcd *xhci)
1030*4882a593Smuzhiyun {
1031*4882a593Smuzhiyun 	int			ret;
1032*4882a593Smuzhiyun 	struct device		*dev = xhci_to_hcd(xhci)->self.controller;
1033*4882a593Smuzhiyun 
1034*4882a593Smuzhiyun 	ret = xhci_do_dbc_init(xhci);
1035*4882a593Smuzhiyun 	if (ret)
1036*4882a593Smuzhiyun 		goto init_err3;
1037*4882a593Smuzhiyun 
1038*4882a593Smuzhiyun 	ret = xhci_dbc_tty_probe(xhci);
1039*4882a593Smuzhiyun 	if (ret)
1040*4882a593Smuzhiyun 		goto init_err2;
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun 	ret = device_create_file(dev, &dev_attr_dbc);
1043*4882a593Smuzhiyun 	if (ret)
1044*4882a593Smuzhiyun 		goto init_err1;
1045*4882a593Smuzhiyun 
1046*4882a593Smuzhiyun 	return 0;
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun init_err1:
1049*4882a593Smuzhiyun 	xhci_dbc_tty_remove(xhci->dbc);
1050*4882a593Smuzhiyun init_err2:
1051*4882a593Smuzhiyun 	xhci_do_dbc_exit(xhci);
1052*4882a593Smuzhiyun init_err3:
1053*4882a593Smuzhiyun 	return ret;
1054*4882a593Smuzhiyun }
1055*4882a593Smuzhiyun 
xhci_dbc_exit(struct xhci_hcd * xhci)1056*4882a593Smuzhiyun void xhci_dbc_exit(struct xhci_hcd *xhci)
1057*4882a593Smuzhiyun {
1058*4882a593Smuzhiyun 	struct device		*dev = xhci_to_hcd(xhci)->self.controller;
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun 	if (!xhci->dbc)
1061*4882a593Smuzhiyun 		return;
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 	device_remove_file(dev, &dev_attr_dbc);
1064*4882a593Smuzhiyun 	xhci_dbc_tty_remove(xhci->dbc);
1065*4882a593Smuzhiyun 	xhci_dbc_stop(xhci->dbc);
1066*4882a593Smuzhiyun 	xhci_do_dbc_exit(xhci);
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun #ifdef CONFIG_PM
xhci_dbc_suspend(struct xhci_hcd * xhci)1070*4882a593Smuzhiyun int xhci_dbc_suspend(struct xhci_hcd *xhci)
1071*4882a593Smuzhiyun {
1072*4882a593Smuzhiyun 	struct xhci_dbc		*dbc = xhci->dbc;
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun 	if (!dbc)
1075*4882a593Smuzhiyun 		return 0;
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun 	if (dbc->state == DS_CONFIGURED)
1078*4882a593Smuzhiyun 		dbc->resume_required = 1;
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun 	xhci_dbc_stop(dbc);
1081*4882a593Smuzhiyun 
1082*4882a593Smuzhiyun 	return 0;
1083*4882a593Smuzhiyun }
1084*4882a593Smuzhiyun 
xhci_dbc_resume(struct xhci_hcd * xhci)1085*4882a593Smuzhiyun int xhci_dbc_resume(struct xhci_hcd *xhci)
1086*4882a593Smuzhiyun {
1087*4882a593Smuzhiyun 	int			ret = 0;
1088*4882a593Smuzhiyun 	struct xhci_dbc		*dbc = xhci->dbc;
1089*4882a593Smuzhiyun 
1090*4882a593Smuzhiyun 	if (!dbc)
1091*4882a593Smuzhiyun 		return 0;
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun 	if (dbc->resume_required) {
1094*4882a593Smuzhiyun 		dbc->resume_required = 0;
1095*4882a593Smuzhiyun 		xhci_dbc_start(dbc);
1096*4882a593Smuzhiyun 	}
1097*4882a593Smuzhiyun 
1098*4882a593Smuzhiyun 	return ret;
1099*4882a593Smuzhiyun }
1100*4882a593Smuzhiyun #endif /* CONFIG_PM */
1101