1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * USB HOST XHCI Controller stack
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Based on xHCI host controller driver in linux-kernel
5*4882a593Smuzhiyun * by Sarah Sharp.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright (C) 2008 Intel Corp.
8*4882a593Smuzhiyun * Author: Sarah Sharp
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Copyright (C) 2013 Samsung Electronics Co.Ltd
11*4882a593Smuzhiyun * Authors: Vivek Gautam <gautam.vivek@samsung.com>
12*4882a593Smuzhiyun * Vikas Sajjan <vikas.sajjan@samsung.com>
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * SPDX-License-Identifier: GPL-2.0+
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include <common.h>
18*4882a593Smuzhiyun #include <asm/byteorder.h>
19*4882a593Smuzhiyun #include <usb.h>
20*4882a593Smuzhiyun #include <asm/unaligned.h>
21*4882a593Smuzhiyun #include <linux/errno.h>
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #include <usb/xhci.h>
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun /**
26*4882a593Smuzhiyun * Is this TRB a link TRB or was the last TRB the last TRB in this event ring
27*4882a593Smuzhiyun * segment? I.e. would the updated event TRB pointer step off the end of the
28*4882a593Smuzhiyun * event seg ?
29*4882a593Smuzhiyun *
30*4882a593Smuzhiyun * @param ctrl Host controller data structure
31*4882a593Smuzhiyun * @param ring pointer to the ring
32*4882a593Smuzhiyun * @param seg poniter to the segment to which TRB belongs
33*4882a593Smuzhiyun * @param trb poniter to the ring trb
34*4882a593Smuzhiyun * @return 1 if this TRB a link TRB else 0
35*4882a593Smuzhiyun */
last_trb(struct xhci_ctrl * ctrl,struct xhci_ring * ring,struct xhci_segment * seg,union xhci_trb * trb)36*4882a593Smuzhiyun static int last_trb(struct xhci_ctrl *ctrl, struct xhci_ring *ring,
37*4882a593Smuzhiyun struct xhci_segment *seg, union xhci_trb *trb)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun if (ring == ctrl->event_ring)
40*4882a593Smuzhiyun return trb == &seg->trbs[TRBS_PER_SEGMENT];
41*4882a593Smuzhiyun else
42*4882a593Smuzhiyun return TRB_TYPE_LINK_LE32(trb->link.control);
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /**
46*4882a593Smuzhiyun * Does this link TRB point to the first segment in a ring,
47*4882a593Smuzhiyun * or was the previous TRB the last TRB on the last segment in the ERST?
48*4882a593Smuzhiyun *
49*4882a593Smuzhiyun * @param ctrl Host controller data structure
50*4882a593Smuzhiyun * @param ring pointer to the ring
51*4882a593Smuzhiyun * @param seg poniter to the segment to which TRB belongs
52*4882a593Smuzhiyun * @param trb poniter to the ring trb
53*4882a593Smuzhiyun * @return 1 if this TRB is the last TRB on the last segment else 0
54*4882a593Smuzhiyun */
last_trb_on_last_seg(struct xhci_ctrl * ctrl,struct xhci_ring * ring,struct xhci_segment * seg,union xhci_trb * trb)55*4882a593Smuzhiyun static bool last_trb_on_last_seg(struct xhci_ctrl *ctrl,
56*4882a593Smuzhiyun struct xhci_ring *ring,
57*4882a593Smuzhiyun struct xhci_segment *seg,
58*4882a593Smuzhiyun union xhci_trb *trb)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun if (ring == ctrl->event_ring)
61*4882a593Smuzhiyun return ((trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
62*4882a593Smuzhiyun (seg->next == ring->first_seg));
63*4882a593Smuzhiyun else
64*4882a593Smuzhiyun return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /**
68*4882a593Smuzhiyun * See Cycle bit rules. SW is the consumer for the event ring only.
69*4882a593Smuzhiyun * Don't make a ring full of link TRBs. That would be dumb and this would loop.
70*4882a593Smuzhiyun *
71*4882a593Smuzhiyun * If we've just enqueued a TRB that is in the middle of a TD (meaning the
72*4882a593Smuzhiyun * chain bit is set), then set the chain bit in all the following link TRBs.
73*4882a593Smuzhiyun * If we've enqueued the last TRB in a TD, make sure the following link TRBs
74*4882a593Smuzhiyun * have their chain bit cleared (so that each Link TRB is a separate TD).
75*4882a593Smuzhiyun *
76*4882a593Smuzhiyun * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
77*4882a593Smuzhiyun * set, but other sections talk about dealing with the chain bit set. This was
78*4882a593Smuzhiyun * fixed in the 0.96 specification errata, but we have to assume that all 0.95
79*4882a593Smuzhiyun * xHCI hardware can't handle the chain bit being cleared on a link TRB.
80*4882a593Smuzhiyun *
81*4882a593Smuzhiyun * @param ctrl Host controller data structure
82*4882a593Smuzhiyun * @param ring pointer to the ring
83*4882a593Smuzhiyun * @param more_trbs_coming flag to indicate whether more trbs
84*4882a593Smuzhiyun * are expected or NOT.
85*4882a593Smuzhiyun * Will you enqueue more TRBs before calling
86*4882a593Smuzhiyun * prepare_ring()?
87*4882a593Smuzhiyun * @return none
88*4882a593Smuzhiyun */
inc_enq(struct xhci_ctrl * ctrl,struct xhci_ring * ring,bool more_trbs_coming)89*4882a593Smuzhiyun static void inc_enq(struct xhci_ctrl *ctrl, struct xhci_ring *ring,
90*4882a593Smuzhiyun bool more_trbs_coming)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun u32 chain;
93*4882a593Smuzhiyun union xhci_trb *next;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
96*4882a593Smuzhiyun next = ++(ring->enqueue);
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /*
99*4882a593Smuzhiyun * Update the dequeue pointer further if that was a link TRB or we're at
100*4882a593Smuzhiyun * the end of an event ring segment (which doesn't have link TRBS)
101*4882a593Smuzhiyun */
102*4882a593Smuzhiyun while (last_trb(ctrl, ring, ring->enq_seg, next)) {
103*4882a593Smuzhiyun if (ring != ctrl->event_ring) {
104*4882a593Smuzhiyun /*
105*4882a593Smuzhiyun * If the caller doesn't plan on enqueueing more
106*4882a593Smuzhiyun * TDs before ringing the doorbell, then we
107*4882a593Smuzhiyun * don't want to give the link TRB to the
108*4882a593Smuzhiyun * hardware just yet. We'll give the link TRB
109*4882a593Smuzhiyun * back in prepare_ring() just before we enqueue
110*4882a593Smuzhiyun * the TD at the top of the ring.
111*4882a593Smuzhiyun */
112*4882a593Smuzhiyun if (!chain && !more_trbs_coming)
113*4882a593Smuzhiyun break;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun /*
116*4882a593Smuzhiyun * If we're not dealing with 0.95 hardware or
117*4882a593Smuzhiyun * isoc rings on AMD 0.96 host,
118*4882a593Smuzhiyun * carry over the chain bit of the previous TRB
119*4882a593Smuzhiyun * (which may mean the chain bit is cleared).
120*4882a593Smuzhiyun */
121*4882a593Smuzhiyun next->link.control &= cpu_to_le32(~TRB_CHAIN);
122*4882a593Smuzhiyun next->link.control |= cpu_to_le32(chain);
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun next->link.control ^= cpu_to_le32(TRB_CYCLE);
125*4882a593Smuzhiyun xhci_flush_cache((uintptr_t)next,
126*4882a593Smuzhiyun sizeof(union xhci_trb));
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun /* Toggle the cycle bit after the last ring segment. */
129*4882a593Smuzhiyun if (last_trb_on_last_seg(ctrl, ring,
130*4882a593Smuzhiyun ring->enq_seg, next))
131*4882a593Smuzhiyun ring->cycle_state = (ring->cycle_state ? 0 : 1);
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun ring->enq_seg = ring->enq_seg->next;
134*4882a593Smuzhiyun ring->enqueue = ring->enq_seg->trbs;
135*4882a593Smuzhiyun next = ring->enqueue;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun /**
140*4882a593Smuzhiyun * See Cycle bit rules. SW is the consumer for the event ring only.
141*4882a593Smuzhiyun * Don't make a ring full of link TRBs. That would be dumb and this would loop.
142*4882a593Smuzhiyun *
143*4882a593Smuzhiyun * @param ctrl Host controller data structure
144*4882a593Smuzhiyun * @param ring Ring whose Dequeue TRB pointer needs to be incremented.
145*4882a593Smuzhiyun * return none
146*4882a593Smuzhiyun */
inc_deq(struct xhci_ctrl * ctrl,struct xhci_ring * ring)147*4882a593Smuzhiyun static void inc_deq(struct xhci_ctrl *ctrl, struct xhci_ring *ring)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun do {
150*4882a593Smuzhiyun /*
151*4882a593Smuzhiyun * Update the dequeue pointer further if that was a link TRB or
152*4882a593Smuzhiyun * we're at the end of an event ring segment (which doesn't have
153*4882a593Smuzhiyun * link TRBS)
154*4882a593Smuzhiyun */
155*4882a593Smuzhiyun if (last_trb(ctrl, ring, ring->deq_seg, ring->dequeue)) {
156*4882a593Smuzhiyun if (ring == ctrl->event_ring &&
157*4882a593Smuzhiyun last_trb_on_last_seg(ctrl, ring,
158*4882a593Smuzhiyun ring->deq_seg, ring->dequeue)) {
159*4882a593Smuzhiyun ring->cycle_state = (ring->cycle_state ? 0 : 1);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun ring->deq_seg = ring->deq_seg->next;
162*4882a593Smuzhiyun ring->dequeue = ring->deq_seg->trbs;
163*4882a593Smuzhiyun } else {
164*4882a593Smuzhiyun ring->dequeue++;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun } while (last_trb(ctrl, ring, ring->deq_seg, ring->dequeue));
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun /**
170*4882a593Smuzhiyun * Generic function for queueing a TRB on a ring.
171*4882a593Smuzhiyun * The caller must have checked to make sure there's room on the ring.
172*4882a593Smuzhiyun *
173*4882a593Smuzhiyun * @param more_trbs_coming: Will you enqueue more TRBs before calling
174*4882a593Smuzhiyun * prepare_ring()?
175*4882a593Smuzhiyun * @param ctrl Host controller data structure
176*4882a593Smuzhiyun * @param ring pointer to the ring
177*4882a593Smuzhiyun * @param more_trbs_coming flag to indicate whether more trbs
178*4882a593Smuzhiyun * @param trb_fields pointer to trb field array containing TRB contents
179*4882a593Smuzhiyun * @return pointer to the enqueued trb
180*4882a593Smuzhiyun */
queue_trb(struct xhci_ctrl * ctrl,struct xhci_ring * ring,bool more_trbs_coming,unsigned int * trb_fields)181*4882a593Smuzhiyun static struct xhci_generic_trb *queue_trb(struct xhci_ctrl *ctrl,
182*4882a593Smuzhiyun struct xhci_ring *ring,
183*4882a593Smuzhiyun bool more_trbs_coming,
184*4882a593Smuzhiyun unsigned int *trb_fields)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun struct xhci_generic_trb *trb;
187*4882a593Smuzhiyun int i;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun trb = &ring->enqueue->generic;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun for (i = 0; i < 4; i++)
192*4882a593Smuzhiyun trb->field[i] = cpu_to_le32(trb_fields[i]);
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun xhci_flush_cache((uintptr_t)trb, sizeof(struct xhci_generic_trb));
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun inc_enq(ctrl, ring, more_trbs_coming);
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun return trb;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun /**
202*4882a593Smuzhiyun * Does various checks on the endpoint ring, and makes it ready
203*4882a593Smuzhiyun * to queue num_trbs.
204*4882a593Smuzhiyun *
205*4882a593Smuzhiyun * @param ctrl Host controller data structure
206*4882a593Smuzhiyun * @param ep_ring pointer to the EP Transfer Ring
207*4882a593Smuzhiyun * @param ep_state State of the End Point
208*4882a593Smuzhiyun * @return error code in case of invalid ep_state, 0 on success
209*4882a593Smuzhiyun */
prepare_ring(struct xhci_ctrl * ctrl,struct xhci_ring * ep_ring,u32 ep_state)210*4882a593Smuzhiyun static int prepare_ring(struct xhci_ctrl *ctrl, struct xhci_ring *ep_ring,
211*4882a593Smuzhiyun u32 ep_state)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun union xhci_trb *next = ep_ring->enqueue;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun /* Make sure the endpoint has been added to xHC schedule */
216*4882a593Smuzhiyun switch (ep_state) {
217*4882a593Smuzhiyun case EP_STATE_DISABLED:
218*4882a593Smuzhiyun /*
219*4882a593Smuzhiyun * USB core changed config/interfaces without notifying us,
220*4882a593Smuzhiyun * or hardware is reporting the wrong state.
221*4882a593Smuzhiyun */
222*4882a593Smuzhiyun puts("WARN urb submitted to disabled ep\n");
223*4882a593Smuzhiyun return -ENOENT;
224*4882a593Smuzhiyun case EP_STATE_ERROR:
225*4882a593Smuzhiyun puts("WARN waiting for error on ep to be cleared\n");
226*4882a593Smuzhiyun return -EINVAL;
227*4882a593Smuzhiyun case EP_STATE_HALTED:
228*4882a593Smuzhiyun puts("WARN halted endpoint, queueing URB anyway.\n");
229*4882a593Smuzhiyun case EP_STATE_STOPPED:
230*4882a593Smuzhiyun case EP_STATE_RUNNING:
231*4882a593Smuzhiyun debug("EP STATE RUNNING.\n");
232*4882a593Smuzhiyun break;
233*4882a593Smuzhiyun default:
234*4882a593Smuzhiyun puts("ERROR unknown endpoint state for ep\n");
235*4882a593Smuzhiyun return -EINVAL;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun while (last_trb(ctrl, ep_ring, ep_ring->enq_seg, next)) {
239*4882a593Smuzhiyun /*
240*4882a593Smuzhiyun * If we're not dealing with 0.95 hardware or isoc rings
241*4882a593Smuzhiyun * on AMD 0.96 host, clear the chain bit.
242*4882a593Smuzhiyun */
243*4882a593Smuzhiyun next->link.control &= cpu_to_le32(~TRB_CHAIN);
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun next->link.control ^= cpu_to_le32(TRB_CYCLE);
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun xhci_flush_cache((uintptr_t)next, sizeof(union xhci_trb));
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun /* Toggle the cycle bit after the last ring segment. */
250*4882a593Smuzhiyun if (last_trb_on_last_seg(ctrl, ep_ring,
251*4882a593Smuzhiyun ep_ring->enq_seg, next))
252*4882a593Smuzhiyun ep_ring->cycle_state = (ep_ring->cycle_state ? 0 : 1);
253*4882a593Smuzhiyun ep_ring->enq_seg = ep_ring->enq_seg->next;
254*4882a593Smuzhiyun ep_ring->enqueue = ep_ring->enq_seg->trbs;
255*4882a593Smuzhiyun next = ep_ring->enqueue;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun return 0;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /**
262*4882a593Smuzhiyun * Generic function for queueing a command TRB on the command ring.
263*4882a593Smuzhiyun * Check to make sure there's room on the command ring for one command TRB.
264*4882a593Smuzhiyun *
265*4882a593Smuzhiyun * @param ctrl Host controller data structure
266*4882a593Smuzhiyun * @param ptr Pointer address to write in the first two fields (opt.)
267*4882a593Smuzhiyun * @param slot_id Slot ID to encode in the flags field (opt.)
268*4882a593Smuzhiyun * @param ep_index Endpoint index to encode in the flags field (opt.)
269*4882a593Smuzhiyun * @param cmd Command type to enqueue
270*4882a593Smuzhiyun * @return none
271*4882a593Smuzhiyun */
xhci_queue_command(struct xhci_ctrl * ctrl,u8 * ptr,u32 slot_id,u32 ep_index,trb_type cmd)272*4882a593Smuzhiyun void xhci_queue_command(struct xhci_ctrl *ctrl, u8 *ptr, u32 slot_id,
273*4882a593Smuzhiyun u32 ep_index, trb_type cmd)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun u32 fields[4];
276*4882a593Smuzhiyun u64 val_64 = (uintptr_t)ptr;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun BUG_ON(prepare_ring(ctrl, ctrl->cmd_ring, EP_STATE_RUNNING));
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun fields[0] = lower_32_bits(val_64);
281*4882a593Smuzhiyun fields[1] = upper_32_bits(val_64);
282*4882a593Smuzhiyun fields[2] = 0;
283*4882a593Smuzhiyun fields[3] = TRB_TYPE(cmd) | SLOT_ID_FOR_TRB(slot_id) |
284*4882a593Smuzhiyun ctrl->cmd_ring->cycle_state;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /*
287*4882a593Smuzhiyun * Only 'reset endpoint', 'stop endpoint' and 'set TR dequeue pointer'
288*4882a593Smuzhiyun * commands need endpoint id encoded.
289*4882a593Smuzhiyun */
290*4882a593Smuzhiyun if (cmd >= TRB_RESET_EP && cmd <= TRB_SET_DEQ)
291*4882a593Smuzhiyun fields[3] |= EP_ID_FOR_TRB(ep_index);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun queue_trb(ctrl, ctrl->cmd_ring, false, fields);
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun /* Ring the command ring doorbell */
296*4882a593Smuzhiyun xhci_writel(&ctrl->dba->doorbell[0], DB_VALUE_HOST);
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun /**
300*4882a593Smuzhiyun * The TD size is the number of bytes remaining in the TD (including this TRB),
301*4882a593Smuzhiyun * right shifted by 10.
302*4882a593Smuzhiyun * It must fit in bits 21:17, so it can't be bigger than 31.
303*4882a593Smuzhiyun *
304*4882a593Smuzhiyun * @param remainder remaining packets to be sent
305*4882a593Smuzhiyun * @return remainder if remainder is less than max else max
306*4882a593Smuzhiyun */
xhci_td_remainder(unsigned int remainder)307*4882a593Smuzhiyun static u32 xhci_td_remainder(unsigned int remainder)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun u32 max = (1 << (21 - 17 + 1)) - 1;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun if ((remainder >> 10) >= max)
312*4882a593Smuzhiyun return max << 17;
313*4882a593Smuzhiyun else
314*4882a593Smuzhiyun return (remainder >> 10) << 17;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun /**
318*4882a593Smuzhiyun * Finds out the remanining packets to be sent
319*4882a593Smuzhiyun *
320*4882a593Smuzhiyun * @param running_total total size sent so far
321*4882a593Smuzhiyun * @param trb_buff_len length of the TRB Buffer
322*4882a593Smuzhiyun * @param total_packet_count total packet count
323*4882a593Smuzhiyun * @param maxpacketsize max packet size of current pipe
324*4882a593Smuzhiyun * @param num_trbs_left number of TRBs left to be processed
325*4882a593Smuzhiyun * @return 0 if running_total or trb_buff_len is 0, else remainder
326*4882a593Smuzhiyun */
xhci_v1_0_td_remainder(int running_total,int trb_buff_len,unsigned int total_packet_count,int maxpacketsize,unsigned int num_trbs_left)327*4882a593Smuzhiyun static u32 xhci_v1_0_td_remainder(int running_total,
328*4882a593Smuzhiyun int trb_buff_len,
329*4882a593Smuzhiyun unsigned int total_packet_count,
330*4882a593Smuzhiyun int maxpacketsize,
331*4882a593Smuzhiyun unsigned int num_trbs_left)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun int packets_transferred;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun /* One TRB with a zero-length data packet. */
336*4882a593Smuzhiyun if (num_trbs_left == 0 || (running_total == 0 && trb_buff_len == 0))
337*4882a593Smuzhiyun return 0;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun /*
340*4882a593Smuzhiyun * All the TRB queueing functions don't count the current TRB in
341*4882a593Smuzhiyun * running_total.
342*4882a593Smuzhiyun */
343*4882a593Smuzhiyun packets_transferred = (running_total + trb_buff_len) / maxpacketsize;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun if ((total_packet_count - packets_transferred) > 31)
346*4882a593Smuzhiyun return 31 << 17;
347*4882a593Smuzhiyun return (total_packet_count - packets_transferred) << 17;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun /**
351*4882a593Smuzhiyun * Ring the doorbell of the End Point
352*4882a593Smuzhiyun *
353*4882a593Smuzhiyun * @param udev pointer to the USB device structure
354*4882a593Smuzhiyun * @param ep_index index of the endpoint
355*4882a593Smuzhiyun * @param start_cycle cycle flag of the first TRB
356*4882a593Smuzhiyun * @param start_trb pionter to the first TRB
357*4882a593Smuzhiyun * @return none
358*4882a593Smuzhiyun */
giveback_first_trb(struct usb_device * udev,int ep_index,int start_cycle,struct xhci_generic_trb * start_trb)359*4882a593Smuzhiyun static void giveback_first_trb(struct usb_device *udev, int ep_index,
360*4882a593Smuzhiyun int start_cycle,
361*4882a593Smuzhiyun struct xhci_generic_trb *start_trb)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun struct xhci_ctrl *ctrl = xhci_get_ctrl(udev);
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun /*
366*4882a593Smuzhiyun * Pass all the TRBs to the hardware at once and make sure this write
367*4882a593Smuzhiyun * isn't reordered.
368*4882a593Smuzhiyun */
369*4882a593Smuzhiyun if (start_cycle)
370*4882a593Smuzhiyun start_trb->field[3] |= cpu_to_le32(start_cycle);
371*4882a593Smuzhiyun else
372*4882a593Smuzhiyun start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun xhci_flush_cache((uintptr_t)start_trb, sizeof(struct xhci_generic_trb));
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun /* Ringing EP doorbell here */
377*4882a593Smuzhiyun xhci_writel(&ctrl->dba->doorbell[udev->slot_id],
378*4882a593Smuzhiyun DB_VALUE(ep_index, 0));
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun return;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun /**** POLLING mechanism for XHCI ****/
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun /**
386*4882a593Smuzhiyun * Finalizes a handled event TRB by advancing our dequeue pointer and giving
387*4882a593Smuzhiyun * the TRB back to the hardware for recycling. Must call this exactly once at
388*4882a593Smuzhiyun * the end of each event handler, and not touch the TRB again afterwards.
389*4882a593Smuzhiyun *
390*4882a593Smuzhiyun * @param ctrl Host controller data structure
391*4882a593Smuzhiyun * @return none
392*4882a593Smuzhiyun */
xhci_acknowledge_event(struct xhci_ctrl * ctrl)393*4882a593Smuzhiyun void xhci_acknowledge_event(struct xhci_ctrl *ctrl)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun /* Advance our dequeue pointer to the next event */
396*4882a593Smuzhiyun inc_deq(ctrl, ctrl->event_ring);
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun /* Inform the hardware */
399*4882a593Smuzhiyun xhci_writeq(&ctrl->ir_set->erst_dequeue,
400*4882a593Smuzhiyun (uintptr_t)ctrl->event_ring->dequeue | ERST_EHB);
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun /**
404*4882a593Smuzhiyun * Checks if there is a new event to handle on the event ring.
405*4882a593Smuzhiyun *
406*4882a593Smuzhiyun * @param ctrl Host controller data structure
407*4882a593Smuzhiyun * @return 0 if failure else 1 on success
408*4882a593Smuzhiyun */
event_ready(struct xhci_ctrl * ctrl)409*4882a593Smuzhiyun static int event_ready(struct xhci_ctrl *ctrl)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun union xhci_trb *event;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun xhci_inval_cache((uintptr_t)ctrl->event_ring->dequeue,
414*4882a593Smuzhiyun sizeof(union xhci_trb));
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun event = ctrl->event_ring->dequeue;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun /* Does the HC or OS own the TRB? */
419*4882a593Smuzhiyun if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
420*4882a593Smuzhiyun ctrl->event_ring->cycle_state)
421*4882a593Smuzhiyun return 0;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun return 1;
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun /**
427*4882a593Smuzhiyun * Waits for a specific type of event and returns it. Discards unexpected
428*4882a593Smuzhiyun * events. Caller *must* call xhci_acknowledge_event() after it is finished
429*4882a593Smuzhiyun * processing the event, and must not access the returned pointer afterwards.
430*4882a593Smuzhiyun *
431*4882a593Smuzhiyun * @param ctrl Host controller data structure
432*4882a593Smuzhiyun * @param expected TRB type expected from Event TRB
433*4882a593Smuzhiyun * @return pointer to event trb
434*4882a593Smuzhiyun */
xhci_wait_for_event(struct xhci_ctrl * ctrl,trb_type expected)435*4882a593Smuzhiyun union xhci_trb *xhci_wait_for_event(struct xhci_ctrl *ctrl, trb_type expected)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun trb_type type;
438*4882a593Smuzhiyun unsigned long ts = get_timer(0);
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun do {
441*4882a593Smuzhiyun union xhci_trb *event = ctrl->event_ring->dequeue;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun if (!event_ready(ctrl))
444*4882a593Smuzhiyun continue;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags));
447*4882a593Smuzhiyun if (type == expected)
448*4882a593Smuzhiyun return event;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun if (type == TRB_PORT_STATUS)
451*4882a593Smuzhiyun /* TODO: remove this once enumeration has been reworked */
452*4882a593Smuzhiyun /*
453*4882a593Smuzhiyun * Port status change events always have a
454*4882a593Smuzhiyun * successful completion code
455*4882a593Smuzhiyun */
456*4882a593Smuzhiyun BUG_ON(GET_COMP_CODE(
457*4882a593Smuzhiyun le32_to_cpu(event->generic.field[2])) !=
458*4882a593Smuzhiyun COMP_SUCCESS);
459*4882a593Smuzhiyun else
460*4882a593Smuzhiyun printf("Unexpected XHCI event TRB, skipping... "
461*4882a593Smuzhiyun "(%08x %08x %08x %08x)\n",
462*4882a593Smuzhiyun le32_to_cpu(event->generic.field[0]),
463*4882a593Smuzhiyun le32_to_cpu(event->generic.field[1]),
464*4882a593Smuzhiyun le32_to_cpu(event->generic.field[2]),
465*4882a593Smuzhiyun le32_to_cpu(event->generic.field[3]));
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun xhci_acknowledge_event(ctrl);
468*4882a593Smuzhiyun } while (get_timer(ts) < XHCI_TIMEOUT);
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun if (expected == TRB_TRANSFER)
471*4882a593Smuzhiyun return NULL;
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun printf("XHCI timeout on event type %d... cannot recover.\n", expected);
474*4882a593Smuzhiyun BUG();
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun /*
478*4882a593Smuzhiyun * Stops transfer processing for an endpoint and throws away all unprocessed
479*4882a593Smuzhiyun * TRBs by setting the xHC's dequeue pointer to our enqueue pointer. The next
480*4882a593Smuzhiyun * xhci_bulk_tx/xhci_ctrl_tx on this enpoint will add new transfers there and
481*4882a593Smuzhiyun * ring the doorbell, causing this endpoint to start working again.
482*4882a593Smuzhiyun * (Careful: This will BUG() when there was no transfer in progress. Shouldn't
483*4882a593Smuzhiyun * happen in practice for current uses and is too complicated to fix right now.)
484*4882a593Smuzhiyun */
abort_td(struct usb_device * udev,int ep_index)485*4882a593Smuzhiyun static void abort_td(struct usb_device *udev, int ep_index)
486*4882a593Smuzhiyun {
487*4882a593Smuzhiyun struct xhci_ctrl *ctrl = xhci_get_ctrl(udev);
488*4882a593Smuzhiyun struct xhci_ring *ring = ctrl->devs[udev->slot_id]->eps[ep_index].ring;
489*4882a593Smuzhiyun union xhci_trb *event;
490*4882a593Smuzhiyun u32 field;
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun xhci_queue_command(ctrl, NULL, udev->slot_id, ep_index, TRB_STOP_RING);
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun event = xhci_wait_for_event(ctrl, TRB_TRANSFER);
495*4882a593Smuzhiyun field = le32_to_cpu(event->trans_event.flags);
496*4882a593Smuzhiyun BUG_ON(TRB_TO_SLOT_ID(field) != udev->slot_id);
497*4882a593Smuzhiyun BUG_ON(TRB_TO_EP_INDEX(field) != ep_index);
498*4882a593Smuzhiyun BUG_ON(GET_COMP_CODE(le32_to_cpu(event->trans_event.transfer_len
499*4882a593Smuzhiyun != COMP_STOP)));
500*4882a593Smuzhiyun xhci_acknowledge_event(ctrl);
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun event = xhci_wait_for_event(ctrl, TRB_COMPLETION);
503*4882a593Smuzhiyun BUG_ON(TRB_TO_SLOT_ID(le32_to_cpu(event->event_cmd.flags))
504*4882a593Smuzhiyun != udev->slot_id || GET_COMP_CODE(le32_to_cpu(
505*4882a593Smuzhiyun event->event_cmd.status)) != COMP_SUCCESS);
506*4882a593Smuzhiyun xhci_acknowledge_event(ctrl);
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun xhci_queue_command(ctrl, (void *)((uintptr_t)ring->enqueue |
509*4882a593Smuzhiyun ring->cycle_state), udev->slot_id, ep_index, TRB_SET_DEQ);
510*4882a593Smuzhiyun event = xhci_wait_for_event(ctrl, TRB_COMPLETION);
511*4882a593Smuzhiyun BUG_ON(TRB_TO_SLOT_ID(le32_to_cpu(event->event_cmd.flags))
512*4882a593Smuzhiyun != udev->slot_id || GET_COMP_CODE(le32_to_cpu(
513*4882a593Smuzhiyun event->event_cmd.status)) != COMP_SUCCESS);
514*4882a593Smuzhiyun xhci_acknowledge_event(ctrl);
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
record_transfer_result(struct usb_device * udev,union xhci_trb * event,int length)517*4882a593Smuzhiyun static void record_transfer_result(struct usb_device *udev,
518*4882a593Smuzhiyun union xhci_trb *event, int length)
519*4882a593Smuzhiyun {
520*4882a593Smuzhiyun udev->act_len = min(length, length -
521*4882a593Smuzhiyun (int)EVENT_TRB_LEN(le32_to_cpu(event->trans_event.transfer_len)));
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun switch (GET_COMP_CODE(le32_to_cpu(event->trans_event.transfer_len))) {
524*4882a593Smuzhiyun case COMP_SUCCESS:
525*4882a593Smuzhiyun BUG_ON(udev->act_len != length);
526*4882a593Smuzhiyun /* fallthrough */
527*4882a593Smuzhiyun case COMP_SHORT_TX:
528*4882a593Smuzhiyun udev->status = 0;
529*4882a593Smuzhiyun break;
530*4882a593Smuzhiyun case COMP_STALL:
531*4882a593Smuzhiyun udev->status = USB_ST_STALLED;
532*4882a593Smuzhiyun break;
533*4882a593Smuzhiyun case COMP_DB_ERR:
534*4882a593Smuzhiyun case COMP_TRB_ERR:
535*4882a593Smuzhiyun udev->status = USB_ST_BUF_ERR;
536*4882a593Smuzhiyun break;
537*4882a593Smuzhiyun case COMP_BABBLE:
538*4882a593Smuzhiyun udev->status = USB_ST_BABBLE_DET;
539*4882a593Smuzhiyun break;
540*4882a593Smuzhiyun default:
541*4882a593Smuzhiyun udev->status = 0x80; /* USB_ST_TOO_LAZY_TO_MAKE_A_NEW_MACRO */
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun /**** Bulk and Control transfer methods ****/
546*4882a593Smuzhiyun /**
547*4882a593Smuzhiyun * Queues up the BULK Request
548*4882a593Smuzhiyun *
549*4882a593Smuzhiyun * @param udev pointer to the USB device structure
550*4882a593Smuzhiyun * @param pipe contains the DIR_IN or OUT , devnum
551*4882a593Smuzhiyun * @param length length of the buffer
552*4882a593Smuzhiyun * @param buffer buffer to be read/written based on the request
553*4882a593Smuzhiyun * @return returns 0 if successful else -1 on failure
554*4882a593Smuzhiyun */
xhci_bulk_tx(struct usb_device * udev,unsigned long pipe,int length,void * buffer)555*4882a593Smuzhiyun int xhci_bulk_tx(struct usb_device *udev, unsigned long pipe,
556*4882a593Smuzhiyun int length, void *buffer)
557*4882a593Smuzhiyun {
558*4882a593Smuzhiyun int num_trbs = 0;
559*4882a593Smuzhiyun struct xhci_generic_trb *start_trb;
560*4882a593Smuzhiyun bool first_trb = false;
561*4882a593Smuzhiyun int start_cycle;
562*4882a593Smuzhiyun u32 field = 0;
563*4882a593Smuzhiyun u32 length_field = 0;
564*4882a593Smuzhiyun struct xhci_ctrl *ctrl = xhci_get_ctrl(udev);
565*4882a593Smuzhiyun int slot_id = udev->slot_id;
566*4882a593Smuzhiyun int ep_index;
567*4882a593Smuzhiyun struct xhci_virt_device *virt_dev;
568*4882a593Smuzhiyun struct xhci_ep_ctx *ep_ctx;
569*4882a593Smuzhiyun struct xhci_ring *ring; /* EP transfer ring */
570*4882a593Smuzhiyun union xhci_trb *event;
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun int running_total, trb_buff_len;
573*4882a593Smuzhiyun unsigned int total_packet_count;
574*4882a593Smuzhiyun int maxpacketsize;
575*4882a593Smuzhiyun u64 addr;
576*4882a593Smuzhiyun int ret;
577*4882a593Smuzhiyun u32 trb_fields[4];
578*4882a593Smuzhiyun u64 val_64 = (uintptr_t)buffer;
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun debug("dev=%p, pipe=%lx, buffer=%p, length=%d\n",
581*4882a593Smuzhiyun udev, pipe, buffer, length);
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun ep_index = usb_pipe_ep_index(pipe);
584*4882a593Smuzhiyun virt_dev = ctrl->devs[slot_id];
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun xhci_inval_cache((uintptr_t)virt_dev->out_ctx->bytes,
587*4882a593Smuzhiyun virt_dev->out_ctx->size);
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun ep_ctx = xhci_get_ep_ctx(ctrl, virt_dev->out_ctx, ep_index);
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun ring = virt_dev->eps[ep_index].ring;
592*4882a593Smuzhiyun /*
593*4882a593Smuzhiyun * How much data is (potentially) left before the 64KB boundary?
594*4882a593Smuzhiyun * XHCI Spec puts restriction( TABLE 49 and 6.4.1 section of XHCI Spec)
595*4882a593Smuzhiyun * that the buffer should not span 64KB boundary. if so
596*4882a593Smuzhiyun * we send request in more than 1 TRB by chaining them.
597*4882a593Smuzhiyun */
598*4882a593Smuzhiyun running_total = TRB_MAX_BUFF_SIZE -
599*4882a593Smuzhiyun (lower_32_bits(val_64) & (TRB_MAX_BUFF_SIZE - 1));
600*4882a593Smuzhiyun trb_buff_len = running_total;
601*4882a593Smuzhiyun running_total &= TRB_MAX_BUFF_SIZE - 1;
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun /*
604*4882a593Smuzhiyun * If there's some data on this 64KB chunk, or we have to send a
605*4882a593Smuzhiyun * zero-length transfer, we need at least one TRB
606*4882a593Smuzhiyun */
607*4882a593Smuzhiyun if (running_total != 0 || length == 0)
608*4882a593Smuzhiyun num_trbs++;
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun /* How many more 64KB chunks to transfer, how many more TRBs? */
611*4882a593Smuzhiyun while (running_total < length) {
612*4882a593Smuzhiyun num_trbs++;
613*4882a593Smuzhiyun running_total += TRB_MAX_BUFF_SIZE;
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun /*
617*4882a593Smuzhiyun * XXX: Calling routine prepare_ring() called in place of
618*4882a593Smuzhiyun * prepare_trasfer() as there in 'Linux' since we are not
619*4882a593Smuzhiyun * maintaining multiple TDs/transfer at the same time.
620*4882a593Smuzhiyun */
621*4882a593Smuzhiyun ret = prepare_ring(ctrl, ring,
622*4882a593Smuzhiyun le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK);
623*4882a593Smuzhiyun if (ret < 0)
624*4882a593Smuzhiyun return ret;
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun /*
627*4882a593Smuzhiyun * Don't give the first TRB to the hardware (by toggling the cycle bit)
628*4882a593Smuzhiyun * until we've finished creating all the other TRBs. The ring's cycle
629*4882a593Smuzhiyun * state may change as we enqueue the other TRBs, so save it too.
630*4882a593Smuzhiyun */
631*4882a593Smuzhiyun start_trb = &ring->enqueue->generic;
632*4882a593Smuzhiyun start_cycle = ring->cycle_state;
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun running_total = 0;
635*4882a593Smuzhiyun maxpacketsize = usb_maxpacket(udev, pipe);
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun total_packet_count = DIV_ROUND_UP(length, maxpacketsize);
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun /* How much data is in the first TRB? */
640*4882a593Smuzhiyun /*
641*4882a593Smuzhiyun * How much data is (potentially) left before the 64KB boundary?
642*4882a593Smuzhiyun * XHCI Spec puts restriction( TABLE 49 and 6.4.1 section of XHCI Spec)
643*4882a593Smuzhiyun * that the buffer should not span 64KB boundary. if so
644*4882a593Smuzhiyun * we send request in more than 1 TRB by chaining them.
645*4882a593Smuzhiyun */
646*4882a593Smuzhiyun addr = val_64;
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun if (trb_buff_len > length)
649*4882a593Smuzhiyun trb_buff_len = length;
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun first_trb = true;
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun /* flush the buffer before use */
654*4882a593Smuzhiyun xhci_flush_cache((uintptr_t)buffer, length);
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun /* Queue the first TRB, even if it's zero-length */
657*4882a593Smuzhiyun do {
658*4882a593Smuzhiyun u32 remainder = 0;
659*4882a593Smuzhiyun field = 0;
660*4882a593Smuzhiyun /* Don't change the cycle bit of the first TRB until later */
661*4882a593Smuzhiyun if (first_trb) {
662*4882a593Smuzhiyun first_trb = false;
663*4882a593Smuzhiyun if (start_cycle == 0)
664*4882a593Smuzhiyun field |= TRB_CYCLE;
665*4882a593Smuzhiyun } else {
666*4882a593Smuzhiyun field |= ring->cycle_state;
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun /*
670*4882a593Smuzhiyun * Chain all the TRBs together; clear the chain bit in the last
671*4882a593Smuzhiyun * TRB to indicate it's the last TRB in the chain.
672*4882a593Smuzhiyun */
673*4882a593Smuzhiyun if (num_trbs > 1)
674*4882a593Smuzhiyun field |= TRB_CHAIN;
675*4882a593Smuzhiyun else
676*4882a593Smuzhiyun field |= TRB_IOC;
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun /* Only set interrupt on short packet for IN endpoints */
679*4882a593Smuzhiyun if (usb_pipein(pipe))
680*4882a593Smuzhiyun field |= TRB_ISP;
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun /* Set the TRB length, TD size, and interrupter fields. */
683*4882a593Smuzhiyun if (HC_VERSION(xhci_readl(&ctrl->hccr->cr_capbase)) < 0x100)
684*4882a593Smuzhiyun remainder = xhci_td_remainder(length - running_total);
685*4882a593Smuzhiyun else
686*4882a593Smuzhiyun remainder = xhci_v1_0_td_remainder(running_total,
687*4882a593Smuzhiyun trb_buff_len,
688*4882a593Smuzhiyun total_packet_count,
689*4882a593Smuzhiyun maxpacketsize,
690*4882a593Smuzhiyun num_trbs - 1);
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun length_field = ((trb_buff_len & TRB_LEN_MASK) |
693*4882a593Smuzhiyun remainder |
694*4882a593Smuzhiyun ((0 & TRB_INTR_TARGET_MASK) <<
695*4882a593Smuzhiyun TRB_INTR_TARGET_SHIFT));
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun trb_fields[0] = lower_32_bits(addr);
698*4882a593Smuzhiyun trb_fields[1] = upper_32_bits(addr);
699*4882a593Smuzhiyun trb_fields[2] = length_field;
700*4882a593Smuzhiyun trb_fields[3] = field | (TRB_NORMAL << TRB_TYPE_SHIFT);
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun queue_trb(ctrl, ring, (num_trbs > 1), trb_fields);
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun --num_trbs;
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun running_total += trb_buff_len;
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun /* Calculate length for next transfer */
709*4882a593Smuzhiyun addr += trb_buff_len;
710*4882a593Smuzhiyun trb_buff_len = min((length - running_total), TRB_MAX_BUFF_SIZE);
711*4882a593Smuzhiyun } while (running_total < length);
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun giveback_first_trb(udev, ep_index, start_cycle, start_trb);
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun event = xhci_wait_for_event(ctrl, TRB_TRANSFER);
716*4882a593Smuzhiyun if (!event) {
717*4882a593Smuzhiyun debug("XHCI bulk transfer timed out, aborting...\n");
718*4882a593Smuzhiyun abort_td(udev, ep_index);
719*4882a593Smuzhiyun udev->status = USB_ST_NAK_REC; /* closest thing to a timeout */
720*4882a593Smuzhiyun udev->act_len = 0;
721*4882a593Smuzhiyun return -ETIMEDOUT;
722*4882a593Smuzhiyun }
723*4882a593Smuzhiyun field = le32_to_cpu(event->trans_event.flags);
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun BUG_ON(TRB_TO_SLOT_ID(field) != slot_id);
726*4882a593Smuzhiyun BUG_ON(TRB_TO_EP_INDEX(field) != ep_index);
727*4882a593Smuzhiyun BUG_ON(*(void **)(uintptr_t)le64_to_cpu(event->trans_event.buffer) -
728*4882a593Smuzhiyun buffer > (size_t)length);
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun record_transfer_result(udev, event, length);
731*4882a593Smuzhiyun xhci_acknowledge_event(ctrl);
732*4882a593Smuzhiyun xhci_inval_cache((uintptr_t)buffer, length);
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun return (udev->status != USB_ST_NOT_PROC) ? 0 : -1;
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun /**
738*4882a593Smuzhiyun * Queues up the Control Transfer Request
739*4882a593Smuzhiyun *
740*4882a593Smuzhiyun * @param udev pointer to the USB device structure
741*4882a593Smuzhiyun * @param pipe contains the DIR_IN or OUT , devnum
742*4882a593Smuzhiyun * @param req request type
743*4882a593Smuzhiyun * @param length length of the buffer
744*4882a593Smuzhiyun * @param buffer buffer to be read/written based on the request
745*4882a593Smuzhiyun * @return returns 0 if successful else error code on failure
746*4882a593Smuzhiyun */
xhci_ctrl_tx(struct usb_device * udev,unsigned long pipe,struct devrequest * req,int length,void * buffer)747*4882a593Smuzhiyun int xhci_ctrl_tx(struct usb_device *udev, unsigned long pipe,
748*4882a593Smuzhiyun struct devrequest *req, int length,
749*4882a593Smuzhiyun void *buffer)
750*4882a593Smuzhiyun {
751*4882a593Smuzhiyun int ret;
752*4882a593Smuzhiyun int start_cycle;
753*4882a593Smuzhiyun int num_trbs;
754*4882a593Smuzhiyun u32 field;
755*4882a593Smuzhiyun u32 length_field;
756*4882a593Smuzhiyun u64 buf_64 = 0;
757*4882a593Smuzhiyun struct xhci_generic_trb *start_trb;
758*4882a593Smuzhiyun struct xhci_ctrl *ctrl = xhci_get_ctrl(udev);
759*4882a593Smuzhiyun int slot_id = udev->slot_id;
760*4882a593Smuzhiyun int ep_index;
761*4882a593Smuzhiyun u32 trb_fields[4];
762*4882a593Smuzhiyun struct xhci_virt_device *virt_dev = ctrl->devs[slot_id];
763*4882a593Smuzhiyun struct xhci_ring *ep_ring;
764*4882a593Smuzhiyun union xhci_trb *event;
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun debug("req=%u (%#x), type=%u (%#x), value=%u (%#x), index=%u\n",
767*4882a593Smuzhiyun req->request, req->request,
768*4882a593Smuzhiyun req->requesttype, req->requesttype,
769*4882a593Smuzhiyun le16_to_cpu(req->value), le16_to_cpu(req->value),
770*4882a593Smuzhiyun le16_to_cpu(req->index));
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun ep_index = usb_pipe_ep_index(pipe);
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun ep_ring = virt_dev->eps[ep_index].ring;
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun /*
777*4882a593Smuzhiyun * Check to see if the max packet size for the default control
778*4882a593Smuzhiyun * endpoint changed during FS device enumeration
779*4882a593Smuzhiyun */
780*4882a593Smuzhiyun if (udev->speed == USB_SPEED_FULL) {
781*4882a593Smuzhiyun ret = xhci_check_maxpacket(udev);
782*4882a593Smuzhiyun if (ret < 0)
783*4882a593Smuzhiyun return ret;
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun xhci_inval_cache((uintptr_t)virt_dev->out_ctx->bytes,
787*4882a593Smuzhiyun virt_dev->out_ctx->size);
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun struct xhci_ep_ctx *ep_ctx = NULL;
790*4882a593Smuzhiyun ep_ctx = xhci_get_ep_ctx(ctrl, virt_dev->out_ctx, ep_index);
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun /* 1 TRB for setup, 1 for status */
793*4882a593Smuzhiyun num_trbs = 2;
794*4882a593Smuzhiyun /*
795*4882a593Smuzhiyun * Don't need to check if we need additional event data and normal TRBs,
796*4882a593Smuzhiyun * since data in control transfers will never get bigger than 16MB
797*4882a593Smuzhiyun * XXX: can we get a buffer that crosses 64KB boundaries?
798*4882a593Smuzhiyun */
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun if (length > 0)
801*4882a593Smuzhiyun num_trbs++;
802*4882a593Smuzhiyun /*
803*4882a593Smuzhiyun * XXX: Calling routine prepare_ring() called in place of
804*4882a593Smuzhiyun * prepare_trasfer() as there in 'Linux' since we are not
805*4882a593Smuzhiyun * maintaining multiple TDs/transfer at the same time.
806*4882a593Smuzhiyun */
807*4882a593Smuzhiyun ret = prepare_ring(ctrl, ep_ring,
808*4882a593Smuzhiyun le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK);
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun if (ret < 0)
811*4882a593Smuzhiyun return ret;
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun /*
814*4882a593Smuzhiyun * Don't give the first TRB to the hardware (by toggling the cycle bit)
815*4882a593Smuzhiyun * until we've finished creating all the other TRBs. The ring's cycle
816*4882a593Smuzhiyun * state may change as we enqueue the other TRBs, so save it too.
817*4882a593Smuzhiyun */
818*4882a593Smuzhiyun start_trb = &ep_ring->enqueue->generic;
819*4882a593Smuzhiyun start_cycle = ep_ring->cycle_state;
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun debug("start_trb %p, start_cycle %d\n", start_trb, start_cycle);
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun /* Queue setup TRB - see section 6.4.1.2.1 */
824*4882a593Smuzhiyun /* FIXME better way to translate setup_packet into two u32 fields? */
825*4882a593Smuzhiyun field = 0;
826*4882a593Smuzhiyun field |= TRB_IDT | (TRB_SETUP << TRB_TYPE_SHIFT);
827*4882a593Smuzhiyun if (start_cycle == 0)
828*4882a593Smuzhiyun field |= 0x1;
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun /* xHCI 1.0 6.4.1.2.1: Transfer Type field */
831*4882a593Smuzhiyun if (HC_VERSION(xhci_readl(&ctrl->hccr->cr_capbase)) >= 0x100) {
832*4882a593Smuzhiyun if (length > 0) {
833*4882a593Smuzhiyun if (req->requesttype & USB_DIR_IN)
834*4882a593Smuzhiyun field |= (TRB_DATA_IN << TRB_TX_TYPE_SHIFT);
835*4882a593Smuzhiyun else
836*4882a593Smuzhiyun field |= (TRB_DATA_OUT << TRB_TX_TYPE_SHIFT);
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun debug("req->requesttype = %d, req->request = %d,"
841*4882a593Smuzhiyun "le16_to_cpu(req->value) = %d,"
842*4882a593Smuzhiyun "le16_to_cpu(req->index) = %d,"
843*4882a593Smuzhiyun "le16_to_cpu(req->length) = %d\n",
844*4882a593Smuzhiyun req->requesttype, req->request, le16_to_cpu(req->value),
845*4882a593Smuzhiyun le16_to_cpu(req->index), le16_to_cpu(req->length));
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun trb_fields[0] = req->requesttype | req->request << 8 |
848*4882a593Smuzhiyun le16_to_cpu(req->value) << 16;
849*4882a593Smuzhiyun trb_fields[1] = le16_to_cpu(req->index) |
850*4882a593Smuzhiyun le16_to_cpu(req->length) << 16;
851*4882a593Smuzhiyun /* TRB_LEN | (TRB_INTR_TARGET) */
852*4882a593Smuzhiyun trb_fields[2] = (8 | ((0 & TRB_INTR_TARGET_MASK) <<
853*4882a593Smuzhiyun TRB_INTR_TARGET_SHIFT));
854*4882a593Smuzhiyun /* Immediate data in pointer */
855*4882a593Smuzhiyun trb_fields[3] = field;
856*4882a593Smuzhiyun queue_trb(ctrl, ep_ring, true, trb_fields);
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun /* Re-initializing field to zero */
859*4882a593Smuzhiyun field = 0;
860*4882a593Smuzhiyun /* If there's data, queue data TRBs */
861*4882a593Smuzhiyun /* Only set interrupt on short packet for IN endpoints */
862*4882a593Smuzhiyun if (usb_pipein(pipe))
863*4882a593Smuzhiyun field = TRB_ISP | (TRB_DATA << TRB_TYPE_SHIFT);
864*4882a593Smuzhiyun else
865*4882a593Smuzhiyun field = (TRB_DATA << TRB_TYPE_SHIFT);
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun length_field = (length & TRB_LEN_MASK) | xhci_td_remainder(length) |
868*4882a593Smuzhiyun ((0 & TRB_INTR_TARGET_MASK) << TRB_INTR_TARGET_SHIFT);
869*4882a593Smuzhiyun debug("length_field = %d, length = %d,"
870*4882a593Smuzhiyun "xhci_td_remainder(length) = %d , TRB_INTR_TARGET(0) = %d\n",
871*4882a593Smuzhiyun length_field, (length & TRB_LEN_MASK),
872*4882a593Smuzhiyun xhci_td_remainder(length), 0);
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun if (length > 0) {
875*4882a593Smuzhiyun if (req->requesttype & USB_DIR_IN)
876*4882a593Smuzhiyun field |= TRB_DIR_IN;
877*4882a593Smuzhiyun buf_64 = (uintptr_t)buffer;
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun trb_fields[0] = lower_32_bits(buf_64);
880*4882a593Smuzhiyun trb_fields[1] = upper_32_bits(buf_64);
881*4882a593Smuzhiyun trb_fields[2] = length_field;
882*4882a593Smuzhiyun trb_fields[3] = field | ep_ring->cycle_state;
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun xhci_flush_cache((uintptr_t)buffer, length);
885*4882a593Smuzhiyun queue_trb(ctrl, ep_ring, true, trb_fields);
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun /*
889*4882a593Smuzhiyun * Queue status TRB -
890*4882a593Smuzhiyun * see Table 7 and sections 4.11.2.2 and 6.4.1.2.3
891*4882a593Smuzhiyun */
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun /* If the device sent data, the status stage is an OUT transfer */
894*4882a593Smuzhiyun field = 0;
895*4882a593Smuzhiyun if (length > 0 && req->requesttype & USB_DIR_IN)
896*4882a593Smuzhiyun field = 0;
897*4882a593Smuzhiyun else
898*4882a593Smuzhiyun field = TRB_DIR_IN;
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun trb_fields[0] = 0;
901*4882a593Smuzhiyun trb_fields[1] = 0;
902*4882a593Smuzhiyun trb_fields[2] = ((0 & TRB_INTR_TARGET_MASK) << TRB_INTR_TARGET_SHIFT);
903*4882a593Smuzhiyun /* Event on completion */
904*4882a593Smuzhiyun trb_fields[3] = field | TRB_IOC |
905*4882a593Smuzhiyun (TRB_STATUS << TRB_TYPE_SHIFT) |
906*4882a593Smuzhiyun ep_ring->cycle_state;
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun queue_trb(ctrl, ep_ring, false, trb_fields);
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun giveback_first_trb(udev, ep_index, start_cycle, start_trb);
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun event = xhci_wait_for_event(ctrl, TRB_TRANSFER);
913*4882a593Smuzhiyun if (!event)
914*4882a593Smuzhiyun goto abort;
915*4882a593Smuzhiyun field = le32_to_cpu(event->trans_event.flags);
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun BUG_ON(TRB_TO_SLOT_ID(field) != slot_id);
918*4882a593Smuzhiyun BUG_ON(TRB_TO_EP_INDEX(field) != ep_index);
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun record_transfer_result(udev, event, length);
921*4882a593Smuzhiyun xhci_acknowledge_event(ctrl);
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun /* Invalidate buffer to make it available to usb-core */
924*4882a593Smuzhiyun if (length > 0)
925*4882a593Smuzhiyun xhci_inval_cache((uintptr_t)buffer, length);
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun if (GET_COMP_CODE(le32_to_cpu(event->trans_event.transfer_len))
928*4882a593Smuzhiyun == COMP_SHORT_TX) {
929*4882a593Smuzhiyun /* Short data stage, clear up additional status stage event */
930*4882a593Smuzhiyun event = xhci_wait_for_event(ctrl, TRB_TRANSFER);
931*4882a593Smuzhiyun if (!event)
932*4882a593Smuzhiyun goto abort;
933*4882a593Smuzhiyun BUG_ON(TRB_TO_SLOT_ID(field) != slot_id);
934*4882a593Smuzhiyun BUG_ON(TRB_TO_EP_INDEX(field) != ep_index);
935*4882a593Smuzhiyun xhci_acknowledge_event(ctrl);
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun return (udev->status != USB_ST_NOT_PROC) ? 0 : -1;
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun abort:
941*4882a593Smuzhiyun debug("XHCI control transfer timed out, aborting...\n");
942*4882a593Smuzhiyun abort_td(udev, ep_index);
943*4882a593Smuzhiyun udev->status = USB_ST_NAK_REC;
944*4882a593Smuzhiyun udev->act_len = 0;
945*4882a593Smuzhiyun return -ETIMEDOUT;
946*4882a593Smuzhiyun }
947