1 /* 2 * USB HOST XHCI Controller stack 3 * 4 * Based on xHCI host controller driver in linux-kernel 5 * by Sarah Sharp. 6 * 7 * Copyright (C) 2008 Intel Corp. 8 * Author: Sarah Sharp 9 * 10 * Copyright (C) 2013 Samsung Electronics Co.Ltd 11 * Authors: Vivek Gautam <gautam.vivek@samsung.com> 12 * Vikas Sajjan <vikas.sajjan@samsung.com> 13 * 14 * SPDX-License-Identifier: GPL-2.0+ 15 */ 16 17 #include <common.h> 18 #include <dm.h> 19 #include <asm/byteorder.h> 20 #include <usb.h> 21 #include <malloc.h> 22 #include <asm/cache.h> 23 #include <linux/errno.h> 24 25 #include <usb/xhci.h> 26 27 #define CACHELINE_SIZE CONFIG_SYS_CACHELINE_SIZE 28 /** 29 * flushes the address passed till the length 30 * 31 * @param addr pointer to memory region to be flushed 32 * @param len the length of the cache line to be flushed 33 * @return none 34 */ 35 void xhci_flush_cache(uintptr_t addr, u32 len) 36 { 37 BUG_ON((void *)addr == NULL || len == 0); 38 39 flush_dcache_range(addr & ~(CACHELINE_SIZE - 1), 40 ALIGN(addr + len, CACHELINE_SIZE)); 41 } 42 43 /** 44 * invalidates the address passed till the length 45 * 46 * @param addr pointer to memory region to be invalidates 47 * @param len the length of the cache line to be invalidated 48 * @return none 49 */ 50 void xhci_inval_cache(uintptr_t addr, u32 len) 51 { 52 BUG_ON((void *)addr == NULL || len == 0); 53 54 invalidate_dcache_range(addr & ~(CACHELINE_SIZE - 1), 55 ALIGN(addr + len, CACHELINE_SIZE)); 56 } 57 58 59 /** 60 * frees the "segment" pointer passed 61 * 62 * @param ptr pointer to "segement" to be freed 63 * @return none 64 */ 65 static void xhci_segment_free(struct xhci_segment *seg) 66 { 67 free(seg->trbs); 68 seg->trbs = NULL; 69 70 free(seg); 71 } 72 73 /** 74 * frees the "ring" pointer passed 75 * 76 * @param ptr pointer to "ring" to be freed 77 * @return none 78 */ 79 static void xhci_ring_free(struct xhci_ring *ring) 80 { 81 struct xhci_segment *seg; 82 struct xhci_segment *first_seg; 83 84 BUG_ON(!ring); 85 86 first_seg = ring->first_seg; 87 seg = first_seg->next; 88 while (seg != first_seg) { 89 struct xhci_segment *next = seg->next; 90 xhci_segment_free(seg); 91 seg = next; 92 } 93 xhci_segment_free(first_seg); 94 95 free(ring); 96 } 97 98 /** 99 * Free the scratchpad buffer array and scratchpad buffers 100 * 101 * @ctrl host controller data structure 102 * @return none 103 */ 104 static void xhci_scratchpad_free(struct xhci_ctrl *ctrl) 105 { 106 if (!ctrl->scratchpad) 107 return; 108 109 ctrl->dcbaa->dev_context_ptrs[0] = 0; 110 111 free((void *)(uintptr_t)ctrl->scratchpad->sp_array[0]); 112 free(ctrl->scratchpad->sp_array); 113 free(ctrl->scratchpad); 114 ctrl->scratchpad = NULL; 115 } 116 117 /** 118 * frees the "xhci_container_ctx" pointer passed 119 * 120 * @param ptr pointer to "xhci_container_ctx" to be freed 121 * @return none 122 */ 123 static void xhci_free_container_ctx(struct xhci_container_ctx *ctx) 124 { 125 free(ctx->bytes); 126 free(ctx); 127 } 128 129 /** 130 * frees the virtual devices for "xhci_ctrl" pointer passed 131 * 132 * @param ptr pointer to "xhci_ctrl" whose virtual devices are to be freed 133 * @return none 134 */ 135 static void xhci_free_virt_devices(struct xhci_ctrl *ctrl) 136 { 137 int i; 138 int slot_id; 139 struct xhci_virt_device *virt_dev; 140 141 /* 142 * refactored here to loop through all virt_dev 143 * Slot ID 0 is reserved 144 */ 145 for (slot_id = 0; slot_id < MAX_HC_SLOTS; slot_id++) { 146 virt_dev = ctrl->devs[slot_id]; 147 if (!virt_dev) 148 continue; 149 150 ctrl->dcbaa->dev_context_ptrs[slot_id] = 0; 151 152 for (i = 0; i < 31; ++i) 153 if (virt_dev->eps[i].ring) 154 xhci_ring_free(virt_dev->eps[i].ring); 155 156 if (virt_dev->in_ctx) 157 xhci_free_container_ctx(virt_dev->in_ctx); 158 if (virt_dev->out_ctx) 159 xhci_free_container_ctx(virt_dev->out_ctx); 160 161 free(virt_dev); 162 /* make sure we are pointing to NULL */ 163 ctrl->devs[slot_id] = NULL; 164 } 165 } 166 167 /** 168 * frees all the memory allocated 169 * 170 * @param ptr pointer to "xhci_ctrl" to be cleaned up 171 * @return none 172 */ 173 void xhci_cleanup(struct xhci_ctrl *ctrl) 174 { 175 xhci_ring_free(ctrl->event_ring); 176 xhci_ring_free(ctrl->cmd_ring); 177 xhci_scratchpad_free(ctrl); 178 xhci_free_virt_devices(ctrl); 179 free(ctrl->erst.entries); 180 free(ctrl->dcbaa); 181 memset(ctrl, '\0', sizeof(struct xhci_ctrl)); 182 } 183 184 /** 185 * Malloc the aligned memory 186 * 187 * @param size size of memory to be allocated 188 * @return allocates the memory and returns the aligned pointer 189 */ 190 static void *xhci_malloc(unsigned int size) 191 { 192 void *ptr; 193 size_t cacheline_size = max(XHCI_ALIGNMENT, CACHELINE_SIZE); 194 195 ptr = memalign(cacheline_size, ALIGN(size, cacheline_size)); 196 BUG_ON(!ptr); 197 memset(ptr, '\0', size); 198 199 xhci_flush_cache((uintptr_t)ptr, size); 200 201 return ptr; 202 } 203 204 /** 205 * Make the prev segment point to the next segment. 206 * Change the last TRB in the prev segment to be a Link TRB which points to the 207 * address of the next segment. The caller needs to set any Link TRB 208 * related flags, such as End TRB, Toggle Cycle, and no snoop. 209 * 210 * @param prev pointer to the previous segment 211 * @param next pointer to the next segment 212 * @param link_trbs flag to indicate whether to link the trbs or NOT 213 * @return none 214 */ 215 static void xhci_link_segments(struct xhci_segment *prev, 216 struct xhci_segment *next, bool link_trbs) 217 { 218 u32 val; 219 u64 val_64 = 0; 220 221 if (!prev || !next) 222 return; 223 prev->next = next; 224 if (link_trbs) { 225 val_64 = (uintptr_t)next->trbs; 226 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = val_64; 227 228 /* 229 * Set the last TRB in the segment to 230 * have a TRB type ID of Link TRB 231 */ 232 val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control); 233 val &= ~TRB_TYPE_BITMASK; 234 val |= (TRB_LINK << TRB_TYPE_SHIFT); 235 236 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val); 237 } 238 } 239 240 /** 241 * Initialises the Ring's enqueue,dequeue,enq_seg pointers 242 * 243 * @param ring pointer to the RING to be intialised 244 * @return none 245 */ 246 static void xhci_initialize_ring_info(struct xhci_ring *ring) 247 { 248 /* 249 * The ring is empty, so the enqueue pointer == dequeue pointer 250 */ 251 ring->enqueue = ring->first_seg->trbs; 252 ring->enq_seg = ring->first_seg; 253 ring->dequeue = ring->enqueue; 254 ring->deq_seg = ring->first_seg; 255 256 /* 257 * The ring is initialized to 0. The producer must write 1 to the 258 * cycle bit to handover ownership of the TRB, so PCS = 1. 259 * The consumer must compare CCS to the cycle bit to 260 * check ownership, so CCS = 1. 261 */ 262 ring->cycle_state = 1; 263 } 264 265 /** 266 * Allocates a generic ring segment from the ring pool, sets the dma address, 267 * initializes the segment to zero, and sets the private next pointer to NULL. 268 * Section 4.11.1.1: 269 * "All components of all Command and Transfer TRBs shall be initialized to '0'" 270 * 271 * @param none 272 * @return pointer to the newly allocated SEGMENT 273 */ 274 static struct xhci_segment *xhci_segment_alloc(void) 275 { 276 struct xhci_segment *seg; 277 278 seg = (struct xhci_segment *)malloc(sizeof(struct xhci_segment)); 279 BUG_ON(!seg); 280 281 seg->trbs = (union xhci_trb *)xhci_malloc(SEGMENT_SIZE); 282 283 seg->next = NULL; 284 285 return seg; 286 } 287 288 /** 289 * Create a new ring with zero or more segments. 290 * TODO: current code only uses one-time-allocated single-segment rings 291 * of 1KB anyway, so we might as well get rid of all the segment and 292 * linking code (and maybe increase the size a bit, e.g. 4KB). 293 * 294 * 295 * Link each segment together into a ring. 296 * Set the end flag and the cycle toggle bit on the last segment. 297 * See section 4.9.2 and figures 15 and 16 of XHCI spec rev1.0. 298 * 299 * @param num_segs number of segments in the ring 300 * @param link_trbs flag to indicate whether to link the trbs or NOT 301 * @return pointer to the newly created RING 302 */ 303 struct xhci_ring *xhci_ring_alloc(unsigned int num_segs, bool link_trbs) 304 { 305 struct xhci_ring *ring; 306 struct xhci_segment *prev; 307 308 ring = (struct xhci_ring *)malloc(sizeof(struct xhci_ring)); 309 BUG_ON(!ring); 310 311 if (num_segs == 0) 312 return ring; 313 314 ring->first_seg = xhci_segment_alloc(); 315 BUG_ON(!ring->first_seg); 316 317 num_segs--; 318 319 prev = ring->first_seg; 320 while (num_segs > 0) { 321 struct xhci_segment *next; 322 323 next = xhci_segment_alloc(); 324 BUG_ON(!next); 325 326 xhci_link_segments(prev, next, link_trbs); 327 328 prev = next; 329 num_segs--; 330 } 331 xhci_link_segments(prev, ring->first_seg, link_trbs); 332 if (link_trbs) { 333 /* See section 4.9.2.1 and 6.4.4.1 */ 334 prev->trbs[TRBS_PER_SEGMENT-1].link.control |= 335 cpu_to_le32(LINK_TOGGLE); 336 } 337 xhci_initialize_ring_info(ring); 338 339 return ring; 340 } 341 342 /** 343 * Set up the scratchpad buffer array and scratchpad buffers 344 * 345 * @ctrl host controller data structure 346 * @return -ENOMEM if buffer allocation fails, 0 on success 347 */ 348 static int xhci_scratchpad_alloc(struct xhci_ctrl *ctrl) 349 { 350 struct xhci_hccr *hccr = ctrl->hccr; 351 struct xhci_hcor *hcor = ctrl->hcor; 352 struct xhci_scratchpad *scratchpad; 353 int num_sp; 354 uint32_t page_size; 355 void *buf; 356 int i; 357 358 num_sp = HCS_MAX_SCRATCHPAD(xhci_readl(&hccr->cr_hcsparams2)); 359 if (!num_sp) 360 return 0; 361 362 scratchpad = malloc(sizeof(*scratchpad)); 363 if (!scratchpad) 364 goto fail_sp; 365 ctrl->scratchpad = scratchpad; 366 367 scratchpad->sp_array = xhci_malloc(num_sp * sizeof(u64)); 368 if (!scratchpad->sp_array) 369 goto fail_sp2; 370 ctrl->dcbaa->dev_context_ptrs[0] = 371 cpu_to_le64((uintptr_t)scratchpad->sp_array); 372 373 xhci_flush_cache((uintptr_t)&ctrl->dcbaa->dev_context_ptrs[0], 374 sizeof(ctrl->dcbaa->dev_context_ptrs[0])); 375 376 page_size = xhci_readl(&hcor->or_pagesize) & 0xffff; 377 for (i = 0; i < 16; i++) { 378 if ((0x1 & page_size) != 0) 379 break; 380 page_size = page_size >> 1; 381 } 382 BUG_ON(i == 16); 383 384 page_size = 1 << (i + 12); 385 buf = memalign(page_size, num_sp * page_size); 386 if (!buf) 387 goto fail_sp3; 388 memset(buf, '\0', num_sp * page_size); 389 xhci_flush_cache((uintptr_t)buf, num_sp * page_size); 390 391 for (i = 0; i < num_sp; i++) { 392 uintptr_t ptr = (uintptr_t)buf + i * page_size; 393 scratchpad->sp_array[i] = cpu_to_le64(ptr); 394 } 395 396 return 0; 397 398 fail_sp3: 399 free(scratchpad->sp_array); 400 401 fail_sp2: 402 free(scratchpad); 403 ctrl->scratchpad = NULL; 404 405 fail_sp: 406 return -ENOMEM; 407 } 408 409 /** 410 * Allocates the Container context 411 * 412 * @param ctrl Host controller data structure 413 * @param type type of XHCI Container Context 414 * @return NULL if failed else pointer to the context on success 415 */ 416 static struct xhci_container_ctx 417 *xhci_alloc_container_ctx(struct xhci_ctrl *ctrl, int type) 418 { 419 struct xhci_container_ctx *ctx; 420 421 ctx = (struct xhci_container_ctx *) 422 malloc(sizeof(struct xhci_container_ctx)); 423 BUG_ON(!ctx); 424 425 BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT)); 426 ctx->type = type; 427 ctx->size = (MAX_EP_CTX_NUM + 1) * 428 CTX_SIZE(readl(&ctrl->hccr->cr_hccparams)); 429 if (type == XHCI_CTX_TYPE_INPUT) 430 ctx->size += CTX_SIZE(readl(&ctrl->hccr->cr_hccparams)); 431 432 ctx->bytes = (u8 *)xhci_malloc(ctx->size); 433 434 return ctx; 435 } 436 437 /** 438 * Allocating virtual device 439 * 440 * @param udev pointer to USB deivce structure 441 * @return 0 on success else -1 on failure 442 */ 443 int xhci_alloc_virt_device(struct xhci_ctrl *ctrl, unsigned int slot_id) 444 { 445 u64 byte_64 = 0; 446 struct xhci_virt_device *virt_dev; 447 448 /* Slot ID 0 is reserved */ 449 if (ctrl->devs[slot_id]) { 450 printf("Virt dev for slot[%d] already allocated\n", slot_id); 451 return -EEXIST; 452 } 453 454 ctrl->devs[slot_id] = (struct xhci_virt_device *) 455 malloc(sizeof(struct xhci_virt_device)); 456 457 if (!ctrl->devs[slot_id]) { 458 puts("Failed to allocate virtual device\n"); 459 return -ENOMEM; 460 } 461 462 memset(ctrl->devs[slot_id], 0, sizeof(struct xhci_virt_device)); 463 virt_dev = ctrl->devs[slot_id]; 464 465 /* Allocate the (output) device context that will be used in the HC. */ 466 virt_dev->out_ctx = xhci_alloc_container_ctx(ctrl, 467 XHCI_CTX_TYPE_DEVICE); 468 if (!virt_dev->out_ctx) { 469 puts("Failed to allocate out context for virt dev\n"); 470 return -ENOMEM; 471 } 472 473 /* Allocate the (input) device context for address device command */ 474 virt_dev->in_ctx = xhci_alloc_container_ctx(ctrl, 475 XHCI_CTX_TYPE_INPUT); 476 if (!virt_dev->in_ctx) { 477 puts("Failed to allocate in context for virt dev\n"); 478 return -ENOMEM; 479 } 480 481 /* Allocate endpoint 0 ring */ 482 virt_dev->eps[0].ring = xhci_ring_alloc(1, true); 483 484 byte_64 = (uintptr_t)(virt_dev->out_ctx->bytes); 485 486 /* Point to output device context in dcbaa. */ 487 ctrl->dcbaa->dev_context_ptrs[slot_id] = byte_64; 488 489 xhci_flush_cache((uintptr_t)&ctrl->dcbaa->dev_context_ptrs[slot_id], 490 sizeof(__le64)); 491 return 0; 492 } 493 494 /** 495 * Allocates the necessary data structures 496 * for XHCI host controller 497 * 498 * @param ctrl Host controller data structure 499 * @param hccr pointer to HOST Controller Control Registers 500 * @param hcor pointer to HOST Controller Operational Registers 501 * @return 0 if successful else -1 on failure 502 */ 503 int xhci_mem_init(struct xhci_ctrl *ctrl, struct xhci_hccr *hccr, 504 struct xhci_hcor *hcor) 505 { 506 uint64_t val_64; 507 uint64_t trb_64; 508 uint32_t val; 509 unsigned long deq; 510 int i; 511 struct xhci_segment *seg; 512 513 /* DCBAA initialization */ 514 ctrl->dcbaa = (struct xhci_device_context_array *) 515 xhci_malloc(sizeof(struct xhci_device_context_array)); 516 if (ctrl->dcbaa == NULL) { 517 puts("unable to allocate DCBA\n"); 518 return -ENOMEM; 519 } 520 521 val_64 = (uintptr_t)ctrl->dcbaa; 522 /* Set the pointer in DCBAA register */ 523 xhci_writeq(&hcor->or_dcbaap, val_64); 524 525 /* Command ring control pointer register initialization */ 526 ctrl->cmd_ring = xhci_ring_alloc(1, true); 527 528 /* Set the address in the Command Ring Control register */ 529 trb_64 = (uintptr_t)ctrl->cmd_ring->first_seg->trbs; 530 val_64 = xhci_readq(&hcor->or_crcr); 531 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | 532 (trb_64 & (u64) ~CMD_RING_RSVD_BITS) | 533 ctrl->cmd_ring->cycle_state; 534 xhci_writeq(&hcor->or_crcr, val_64); 535 536 /* write the address of db register */ 537 val = xhci_readl(&hccr->cr_dboff); 538 val &= DBOFF_MASK; 539 ctrl->dba = (struct xhci_doorbell_array *)((char *)hccr + val); 540 541 /* write the address of runtime register */ 542 val = xhci_readl(&hccr->cr_rtsoff); 543 val &= RTSOFF_MASK; 544 ctrl->run_regs = (struct xhci_run_regs *)((char *)hccr + val); 545 546 /* writting the address of ir_set structure */ 547 ctrl->ir_set = &ctrl->run_regs->ir_set[0]; 548 549 /* Event ring does not maintain link TRB */ 550 ctrl->event_ring = xhci_ring_alloc(ERST_NUM_SEGS, false); 551 ctrl->erst.entries = (struct xhci_erst_entry *) 552 xhci_malloc(sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS); 553 554 ctrl->erst.num_entries = ERST_NUM_SEGS; 555 556 for (val = 0, seg = ctrl->event_ring->first_seg; 557 val < ERST_NUM_SEGS; 558 val++) { 559 trb_64 = 0; 560 trb_64 = (uintptr_t)seg->trbs; 561 struct xhci_erst_entry *entry = &ctrl->erst.entries[val]; 562 xhci_writeq(&entry->seg_addr, trb_64); 563 entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT); 564 entry->rsvd = 0; 565 seg = seg->next; 566 } 567 xhci_flush_cache((uintptr_t)ctrl->erst.entries, 568 ERST_NUM_SEGS * sizeof(struct xhci_erst_entry)); 569 570 deq = (unsigned long)ctrl->event_ring->dequeue; 571 572 /* Update HC event ring dequeue pointer */ 573 xhci_writeq(&ctrl->ir_set->erst_dequeue, 574 (u64)deq & (u64)~ERST_PTR_MASK); 575 576 /* set ERST count with the number of entries in the segment table */ 577 val = xhci_readl(&ctrl->ir_set->erst_size); 578 val &= ERST_SIZE_MASK; 579 val |= ERST_NUM_SEGS; 580 xhci_writel(&ctrl->ir_set->erst_size, val); 581 582 /* this is the event ring segment table pointer */ 583 val_64 = xhci_readq(&ctrl->ir_set->erst_base); 584 val_64 &= ERST_PTR_MASK; 585 val_64 |= ((uintptr_t)(ctrl->erst.entries) & ~ERST_PTR_MASK); 586 587 xhci_writeq(&ctrl->ir_set->erst_base, val_64); 588 589 /* set up the scratchpad buffer array and scratchpad buffers */ 590 xhci_scratchpad_alloc(ctrl); 591 592 /* initializing the virtual devices to NULL */ 593 for (i = 0; i < MAX_HC_SLOTS; ++i) 594 ctrl->devs[i] = NULL; 595 596 /* 597 * Just Zero'ing this register completely, 598 * or some spurious Device Notification Events 599 * might screw things here. 600 */ 601 xhci_writel(&hcor->or_dnctrl, 0x0); 602 603 return 0; 604 } 605 606 /** 607 * Give the input control context for the passed container context 608 * 609 * @param ctx pointer to the context 610 * @return pointer to the Input control context data 611 */ 612 struct xhci_input_control_ctx 613 *xhci_get_input_control_ctx(struct xhci_container_ctx *ctx) 614 { 615 BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT); 616 return (struct xhci_input_control_ctx *)ctx->bytes; 617 } 618 619 /** 620 * Give the slot context for the passed container context 621 * 622 * @param ctrl Host controller data structure 623 * @param ctx pointer to the context 624 * @return pointer to the slot control context data 625 */ 626 struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_ctrl *ctrl, 627 struct xhci_container_ctx *ctx) 628 { 629 if (ctx->type == XHCI_CTX_TYPE_DEVICE) 630 return (struct xhci_slot_ctx *)ctx->bytes; 631 632 return (struct xhci_slot_ctx *) 633 (ctx->bytes + CTX_SIZE(readl(&ctrl->hccr->cr_hccparams))); 634 } 635 636 /** 637 * Gets the EP context from based on the ep_index 638 * 639 * @param ctrl Host controller data structure 640 * @param ctx context container 641 * @param ep_index index of the endpoint 642 * @return pointer to the End point context 643 */ 644 struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_ctrl *ctrl, 645 struct xhci_container_ctx *ctx, 646 unsigned int ep_index) 647 { 648 /* increment ep index by offset of start of ep ctx array */ 649 ep_index++; 650 if (ctx->type == XHCI_CTX_TYPE_INPUT) 651 ep_index++; 652 653 return (struct xhci_ep_ctx *) 654 (ctx->bytes + 655 (ep_index * CTX_SIZE(readl(&ctrl->hccr->cr_hccparams)))); 656 } 657 658 /** 659 * Copy output xhci_ep_ctx to the input xhci_ep_ctx copy. 660 * Useful when you want to change one particular aspect of the endpoint 661 * and then issue a configure endpoint command. 662 * 663 * @param ctrl Host controller data structure 664 * @param in_ctx contains the input context 665 * @param out_ctx contains the input context 666 * @param ep_index index of the end point 667 * @return none 668 */ 669 void xhci_endpoint_copy(struct xhci_ctrl *ctrl, 670 struct xhci_container_ctx *in_ctx, 671 struct xhci_container_ctx *out_ctx, 672 unsigned int ep_index) 673 { 674 struct xhci_ep_ctx *out_ep_ctx; 675 struct xhci_ep_ctx *in_ep_ctx; 676 677 out_ep_ctx = xhci_get_ep_ctx(ctrl, out_ctx, ep_index); 678 in_ep_ctx = xhci_get_ep_ctx(ctrl, in_ctx, ep_index); 679 680 in_ep_ctx->ep_info = out_ep_ctx->ep_info; 681 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2; 682 in_ep_ctx->deq = out_ep_ctx->deq; 683 in_ep_ctx->tx_info = out_ep_ctx->tx_info; 684 } 685 686 /** 687 * Copy output xhci_slot_ctx to the input xhci_slot_ctx. 688 * Useful when you want to change one particular aspect of the endpoint 689 * and then issue a configure endpoint command. 690 * Only the context entries field matters, but 691 * we'll copy the whole thing anyway. 692 * 693 * @param ctrl Host controller data structure 694 * @param in_ctx contains the inpout context 695 * @param out_ctx contains the inpout context 696 * @return none 697 */ 698 void xhci_slot_copy(struct xhci_ctrl *ctrl, struct xhci_container_ctx *in_ctx, 699 struct xhci_container_ctx *out_ctx) 700 { 701 struct xhci_slot_ctx *in_slot_ctx; 702 struct xhci_slot_ctx *out_slot_ctx; 703 704 in_slot_ctx = xhci_get_slot_ctx(ctrl, in_ctx); 705 out_slot_ctx = xhci_get_slot_ctx(ctrl, out_ctx); 706 707 in_slot_ctx->dev_info = out_slot_ctx->dev_info; 708 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2; 709 in_slot_ctx->tt_info = out_slot_ctx->tt_info; 710 in_slot_ctx->dev_state = out_slot_ctx->dev_state; 711 } 712 713 /** 714 * Setup an xHCI virtual device for a Set Address command 715 * 716 * @param udev pointer to the Device Data Structure 717 * @return returns negative value on failure else 0 on success 718 */ 719 void xhci_setup_addressable_virt_dev(struct xhci_ctrl *ctrl, 720 struct usb_device *udev, int hop_portnr) 721 { 722 struct xhci_virt_device *virt_dev; 723 struct xhci_ep_ctx *ep0_ctx; 724 struct xhci_slot_ctx *slot_ctx; 725 u32 port_num = 0; 726 u64 trb_64 = 0; 727 int slot_id = udev->slot_id; 728 int speed = udev->speed; 729 int route = 0; 730 #if CONFIG_IS_ENABLED(DM_USB) 731 struct usb_device *dev = udev; 732 struct usb_hub_device *hub; 733 #endif 734 735 virt_dev = ctrl->devs[slot_id]; 736 737 BUG_ON(!virt_dev); 738 739 /* Extract the EP0 and Slot Ctrl */ 740 ep0_ctx = xhci_get_ep_ctx(ctrl, virt_dev->in_ctx, 0); 741 slot_ctx = xhci_get_slot_ctx(ctrl, virt_dev->in_ctx); 742 743 /* Only the control endpoint is valid - one endpoint context */ 744 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); 745 746 #if CONFIG_IS_ENABLED(DM_USB) 747 /* Calculate the route string for this device */ 748 port_num = dev->portnr; 749 while (!usb_hub_is_root_hub(dev->dev)) { 750 hub = dev_get_uclass_priv(dev->dev); 751 /* 752 * Each hub in the topology is expected to have no more than 753 * 15 ports in order for the route string of a device to be 754 * unique. SuperSpeed hubs are restricted to only having 15 755 * ports, but FS/LS/HS hubs are not. The xHCI specification 756 * says that if the port number the device is greater than 15, 757 * that portion of the route string shall be set to 15. 758 */ 759 if (port_num > 15) 760 port_num = 15; 761 route |= port_num << (hub->hub_depth * 4); 762 dev = dev_get_parent_priv(dev->dev); 763 port_num = dev->portnr; 764 dev = dev_get_parent_priv(dev->dev->parent); 765 } 766 767 debug("route string %x\n", route); 768 #endif 769 slot_ctx->dev_info |= route; 770 771 switch (speed) { 772 case USB_SPEED_SUPER: 773 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS); 774 break; 775 case USB_SPEED_HIGH: 776 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS); 777 break; 778 case USB_SPEED_FULL: 779 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS); 780 break; 781 case USB_SPEED_LOW: 782 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS); 783 break; 784 default: 785 /* Speed was set earlier, this shouldn't happen. */ 786 BUG(); 787 } 788 789 #if CONFIG_IS_ENABLED(DM_USB) 790 /* Set up TT fields to support FS/LS devices */ 791 if (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) { 792 struct udevice *parent = udev->dev; 793 794 dev = udev; 795 do { 796 port_num = dev->portnr; 797 dev = dev_get_parent_priv(parent); 798 if (usb_hub_is_root_hub(dev->dev)) 799 break; 800 parent = dev->dev->parent; 801 } while (dev->speed != USB_SPEED_HIGH); 802 803 if (!usb_hub_is_root_hub(dev->dev)) { 804 hub = dev_get_uclass_priv(dev->dev); 805 if (hub->tt.multi) 806 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); 807 slot_ctx->tt_info |= cpu_to_le32(TT_PORT(port_num)); 808 slot_ctx->tt_info |= cpu_to_le32(TT_SLOT(dev->slot_id)); 809 } 810 } 811 #endif 812 813 port_num = hop_portnr; 814 debug("port_num = %d\n", port_num); 815 816 slot_ctx->dev_info2 |= 817 cpu_to_le32(((port_num & ROOT_HUB_PORT_MASK) << 818 ROOT_HUB_PORT_SHIFT)); 819 820 /* Step 4 - ring already allocated */ 821 /* Step 5 */ 822 ep0_ctx->ep_info2 = cpu_to_le32(CTRL_EP << EP_TYPE_SHIFT); 823 debug("SPEED = %d\n", speed); 824 825 switch (speed) { 826 case USB_SPEED_SUPER: 827 ep0_ctx->ep_info2 |= cpu_to_le32(((512 & MAX_PACKET_MASK) << 828 MAX_PACKET_SHIFT)); 829 debug("Setting Packet size = 512bytes\n"); 830 break; 831 case USB_SPEED_HIGH: 832 /* USB core guesses at a 64-byte max packet first for FS devices */ 833 case USB_SPEED_FULL: 834 ep0_ctx->ep_info2 |= cpu_to_le32(((64 & MAX_PACKET_MASK) << 835 MAX_PACKET_SHIFT)); 836 debug("Setting Packet size = 64bytes\n"); 837 break; 838 case USB_SPEED_LOW: 839 ep0_ctx->ep_info2 |= cpu_to_le32(((8 & MAX_PACKET_MASK) << 840 MAX_PACKET_SHIFT)); 841 debug("Setting Packet size = 8bytes\n"); 842 break; 843 default: 844 /* New speed? */ 845 BUG(); 846 } 847 848 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */ 849 ep0_ctx->ep_info2 |= 850 cpu_to_le32(((0 & MAX_BURST_MASK) << MAX_BURST_SHIFT) | 851 ((3 & ERROR_COUNT_MASK) << ERROR_COUNT_SHIFT)); 852 853 trb_64 = (uintptr_t)virt_dev->eps[0].ring->first_seg->trbs; 854 ep0_ctx->deq = cpu_to_le64(trb_64 | virt_dev->eps[0].ring->cycle_state); 855 856 /* 857 * xHCI spec 6.2.3: 858 * software shall set 'Average TRB Length' to 8 for control endpoints. 859 */ 860 ep0_ctx->tx_info = cpu_to_le32(EP_AVG_TRB_LENGTH(8)); 861 862 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ 863 864 xhci_flush_cache((uintptr_t)ep0_ctx, sizeof(struct xhci_ep_ctx)); 865 xhci_flush_cache((uintptr_t)slot_ctx, sizeof(struct xhci_slot_ctx)); 866 } 867