1 /* 2 * Copyright (c) 2014 Google, Inc 3 * Written by Simon Glass <sjg@chromium.org> 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 */ 7 8 #include <common.h> 9 #include <dm.h> 10 #include <errno.h> 11 #include <inttypes.h> 12 #include <pci.h> 13 #include <asm/io.h> 14 #include <dm/device-internal.h> 15 #include <dm/lists.h> 16 #if defined(CONFIG_X86) && defined(CONFIG_HAVE_FSP) 17 #include <asm/fsp/fsp_support.h> 18 #endif 19 #include "pci_internal.h" 20 21 DECLARE_GLOBAL_DATA_PTR; 22 23 int pci_get_bus(int busnum, struct udevice **busp) 24 { 25 int ret; 26 27 ret = uclass_get_device_by_seq(UCLASS_PCI, busnum, busp); 28 29 /* Since buses may not be numbered yet try a little harder with bus 0 */ 30 if (ret == -ENODEV) { 31 ret = uclass_first_device_err(UCLASS_PCI, busp); 32 if (ret) 33 return ret; 34 ret = uclass_get_device_by_seq(UCLASS_PCI, busnum, busp); 35 } 36 37 return ret; 38 } 39 40 struct udevice *pci_get_controller(struct udevice *dev) 41 { 42 while (device_is_on_pci_bus(dev)) 43 dev = dev->parent; 44 45 return dev; 46 } 47 48 pci_dev_t dm_pci_get_bdf(struct udevice *dev) 49 { 50 struct pci_child_platdata *pplat = dev_get_parent_platdata(dev); 51 struct udevice *bus = dev->parent; 52 53 return PCI_ADD_BUS(bus->seq, pplat->devfn); 54 } 55 56 /** 57 * pci_get_bus_max() - returns the bus number of the last active bus 58 * 59 * @return last bus number, or -1 if no active buses 60 */ 61 static int pci_get_bus_max(void) 62 { 63 struct udevice *bus; 64 struct uclass *uc; 65 int ret = -1; 66 67 ret = uclass_get(UCLASS_PCI, &uc); 68 uclass_foreach_dev(bus, uc) { 69 if (bus->seq > ret) 70 ret = bus->seq; 71 } 72 73 debug("%s: ret=%d\n", __func__, ret); 74 75 return ret; 76 } 77 78 int pci_last_busno(void) 79 { 80 return pci_get_bus_max(); 81 } 82 83 int pci_get_ff(enum pci_size_t size) 84 { 85 switch (size) { 86 case PCI_SIZE_8: 87 return 0xff; 88 case PCI_SIZE_16: 89 return 0xffff; 90 default: 91 return 0xffffffff; 92 } 93 } 94 95 int pci_bus_find_devfn(struct udevice *bus, pci_dev_t find_devfn, 96 struct udevice **devp) 97 { 98 struct udevice *dev; 99 100 for (device_find_first_child(bus, &dev); 101 dev; 102 device_find_next_child(&dev)) { 103 struct pci_child_platdata *pplat; 104 105 pplat = dev_get_parent_platdata(dev); 106 if (pplat && pplat->devfn == find_devfn) { 107 *devp = dev; 108 return 0; 109 } 110 } 111 112 return -ENODEV; 113 } 114 115 int dm_pci_bus_find_bdf(pci_dev_t bdf, struct udevice **devp) 116 { 117 struct udevice *bus; 118 int ret; 119 120 ret = pci_get_bus(PCI_BUS(bdf), &bus); 121 if (ret) 122 return ret; 123 return pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), devp); 124 } 125 126 static int pci_device_matches_ids(struct udevice *dev, 127 struct pci_device_id *ids) 128 { 129 struct pci_child_platdata *pplat; 130 int i; 131 132 pplat = dev_get_parent_platdata(dev); 133 if (!pplat) 134 return -EINVAL; 135 for (i = 0; ids[i].vendor != 0; i++) { 136 if (pplat->vendor == ids[i].vendor && 137 pplat->device == ids[i].device) 138 return i; 139 } 140 141 return -EINVAL; 142 } 143 144 int pci_bus_find_devices(struct udevice *bus, struct pci_device_id *ids, 145 int *indexp, struct udevice **devp) 146 { 147 struct udevice *dev; 148 149 /* Scan all devices on this bus */ 150 for (device_find_first_child(bus, &dev); 151 dev; 152 device_find_next_child(&dev)) { 153 if (pci_device_matches_ids(dev, ids) >= 0) { 154 if ((*indexp)-- <= 0) { 155 *devp = dev; 156 return 0; 157 } 158 } 159 } 160 161 return -ENODEV; 162 } 163 164 int pci_find_device_id(struct pci_device_id *ids, int index, 165 struct udevice **devp) 166 { 167 struct udevice *bus; 168 169 /* Scan all known buses */ 170 for (uclass_first_device(UCLASS_PCI, &bus); 171 bus; 172 uclass_next_device(&bus)) { 173 if (!pci_bus_find_devices(bus, ids, &index, devp)) 174 return 0; 175 } 176 *devp = NULL; 177 178 return -ENODEV; 179 } 180 181 static int dm_pci_bus_find_device(struct udevice *bus, unsigned int vendor, 182 unsigned int device, int *indexp, 183 struct udevice **devp) 184 { 185 struct pci_child_platdata *pplat; 186 struct udevice *dev; 187 188 for (device_find_first_child(bus, &dev); 189 dev; 190 device_find_next_child(&dev)) { 191 pplat = dev_get_parent_platdata(dev); 192 if (pplat->vendor == vendor && pplat->device == device) { 193 if (!(*indexp)--) { 194 *devp = dev; 195 return 0; 196 } 197 } 198 } 199 200 return -ENODEV; 201 } 202 203 int dm_pci_find_device(unsigned int vendor, unsigned int device, int index, 204 struct udevice **devp) 205 { 206 struct udevice *bus; 207 208 /* Scan all known buses */ 209 for (uclass_first_device(UCLASS_PCI, &bus); 210 bus; 211 uclass_next_device(&bus)) { 212 if (!dm_pci_bus_find_device(bus, vendor, device, &index, devp)) 213 return device_probe(*devp); 214 } 215 *devp = NULL; 216 217 return -ENODEV; 218 } 219 220 int dm_pci_find_class(uint find_class, int index, struct udevice **devp) 221 { 222 struct udevice *dev; 223 224 /* Scan all known buses */ 225 for (pci_find_first_device(&dev); 226 dev; 227 pci_find_next_device(&dev)) { 228 struct pci_child_platdata *pplat = dev_get_parent_platdata(dev); 229 230 if (pplat->class == find_class && !index--) { 231 *devp = dev; 232 return device_probe(*devp); 233 } 234 } 235 *devp = NULL; 236 237 return -ENODEV; 238 } 239 240 /** 241 * pci_retrain_link - Trigger PCIe link retrain for a device 242 * @udev: PCI device to retrain link 243 * @dev: PCI device and function address 244 * 245 * Return: 0 on success, negative error code on failure. 246 */ 247 int pci_retrain_link(struct udevice *udev, pci_dev_t dev) 248 { 249 u16 link_control, link_status; 250 int pcie_cap_ptr; 251 int timeout = 100; /* Timeout in milliseconds */ 252 253 /* Find the PCIe Capability */ 254 pcie_cap_ptr = dm_pci_find_capability(udev, PCI_CAP_ID_EXP); 255 if (!pcie_cap_ptr) { 256 printf("PCIe Capability not found for device %04x:%04x\n", 257 PCI_BUS(dev), PCI_DEV(dev)); 258 return -ENODEV; 259 } 260 261 /* Read the Link Control Register */ 262 dm_pci_read_config16(udev, pcie_cap_ptr + PCI_EXP_LNKCTL, &link_control); 263 264 /* Set the Retrain Link bit (bit 5) */ 265 link_control |= (1 << 5); 266 267 /* Write the updated value back to the Link Control Register */ 268 dm_pci_write_config16(udev, pcie_cap_ptr + PCI_EXP_LNKCTL, link_control); 269 270 printf("Retrain triggered for device %04x:%04x\n", PCI_BUS(dev), PCI_DEV(dev)); 271 272 /* Wait for the link to complete training */ 273 while (timeout--) { 274 /* Read the Link Status Register */ 275 dm_pci_read_config16(udev, pcie_cap_ptr + PCI_EXP_LNKSTA, &link_status); 276 277 /* Check if the link is up and training is complete */ 278 if (!(link_status & PCI_EXP_LNKSTA_LT)) 279 break; 280 281 mdelay(10); /* Wait 1 millisecond */ 282 } 283 284 if (link_status & PCI_EXP_LNKSTA_LT) { 285 printf("Link training failed for device %04x:%04x\n", 286 PCI_BUS(dev), PCI_DEV(dev)); 287 return -ETIMEDOUT; 288 } 289 290 printf("Link Status for device %04x:%04x: 0x%x\n", 291 PCI_BUS(dev), PCI_DEV(dev), link_status); 292 printf(" Speed: Gen%d\n", (link_status & PCI_EXP_LNKSTA_CLS) >> 0); 293 printf(" Width: x%d\n", (link_status & PCI_EXP_LNKSTA_NLW) >> 4); 294 printf(" Link Up: %s\n", (link_status & PCI_EXP_LNKSTA_LT) ? "No" : "Yes"); 295 296 return 0; 297 } 298 299 static int pci_is_bridge(pci_dev_t dev) 300 { 301 u8 header_type; 302 303 pci_read_config8(dev, PCI_HEADER_TYPE, &header_type); 304 header_type = header_type & 0x7f; 305 306 return (header_type == PCI_HEADER_TYPE_BRIDGE); 307 } 308 309 static void save_pci_state(pci_dev_t dev, struct pci_device_state *state) 310 { 311 int i; 312 313 /* Save BARs */ 314 for (i = 0; i < 6; i++) 315 pci_read_config32(dev, PCI_BASE_ADDRESS_0 + i * 4, &state->bar[i]); 316 317 /* Save Command Register */ 318 pci_read_config16(dev, PCI_COMMAND, &state->command); 319 320 /* Save Bus Numbers (for bridge devices) */ 321 if (pci_is_bridge(dev)) { 322 pci_read_config8(dev, PCI_PRIMARY_BUS, &state->primary_bus); 323 pci_read_config8(dev, PCI_SECONDARY_BUS, &state->secondary_bus); 324 pci_read_config8(dev, PCI_SUBORDINATE_BUS, &state->subordinate_bus); 325 } 326 } 327 328 static void restore_pci_state(pci_dev_t dev, struct pci_device_state *state) 329 { 330 int i; 331 332 /* Restore BARs */ 333 for (i = 0; i < 6; i++) 334 pci_write_config32(dev, PCI_BASE_ADDRESS_0 + i * 4, 335 state->bar[i]); 336 337 /* Restore Command Register */ 338 pci_write_config16(dev, PCI_COMMAND, state->command); 339 340 /* Restore Bus Numbers (for bridge devices) */ 341 if (pci_is_bridge(dev)) { 342 pci_write_config8(dev, PCI_PRIMARY_BUS, state->primary_bus); 343 pci_write_config8(dev, PCI_SECONDARY_BUS, state->secondary_bus); 344 pci_write_config8(dev, PCI_SUBORDINATE_BUS, state->subordinate_bus); 345 } 346 } 347 348 static int pci_flr(struct udevice *udev, pci_dev_t dev) 349 { 350 u32 pcie_cap; 351 u16 devctl; 352 int pos; 353 354 pos = dm_pci_find_capability(udev, PCI_CAP_ID_EXP); 355 if (!pos) { 356 printf("PCIe Capability not found\n"); 357 return -1; 358 } 359 360 /* Check if FLR is supported */ 361 dm_pci_read_config32(udev, pos + PCI_EXP_DEVCAP, &pcie_cap); 362 if (!(pcie_cap & PCI_EXP_DEVCAP_FLR)) { 363 printf("FLR not supported by device, pos 0x%x, cap 0x%x\n", pos, pcie_cap); 364 return -1; 365 } 366 367 devctl = pcie_cap | PCI_EXP_DEVCTL_FLR; 368 dm_pci_write_config16(udev, pos + PCI_EXP_DEVCTL, devctl); 369 mdelay(100); 370 dm_pci_write_config16(udev, pos + PCI_EXP_DEVCTL, pcie_cap); 371 372 return 0; 373 } 374 375 int pci_reset_function(struct udevice *udev, pci_dev_t dev) 376 { 377 struct pci_device_state state; 378 379 /* Save the current state */ 380 save_pci_state(dev, &state); 381 382 /* Trigger FLR */ 383 if (pci_flr(udev, dev)) { 384 printf("FLR failed\n"); 385 return -1; 386 } 387 388 /* Restore the saved state */ 389 restore_pci_state(dev, &state); 390 391 printf("FLR completed and state restored for device %02x:%02x.%d\n", 392 PCI_BUS(dev), PCI_DEV(dev), PCI_FUNC(dev)); 393 394 return 0; 395 } 396 397 int pci_bus_write_config(struct udevice *bus, pci_dev_t bdf, int offset, 398 unsigned long value, enum pci_size_t size) 399 { 400 struct dm_pci_ops *ops; 401 402 ops = pci_get_ops(bus); 403 if (!ops->write_config) 404 return -ENOSYS; 405 return ops->write_config(bus, bdf, offset, value, size); 406 } 407 408 int pci_bus_clrset_config32(struct udevice *bus, pci_dev_t bdf, int offset, 409 u32 clr, u32 set) 410 { 411 ulong val; 412 int ret; 413 414 ret = pci_bus_read_config(bus, bdf, offset, &val, PCI_SIZE_32); 415 if (ret) 416 return ret; 417 val &= ~clr; 418 val |= set; 419 420 return pci_bus_write_config(bus, bdf, offset, val, PCI_SIZE_32); 421 } 422 423 int pci_write_config(pci_dev_t bdf, int offset, unsigned long value, 424 enum pci_size_t size) 425 { 426 struct udevice *bus; 427 int ret; 428 429 ret = pci_get_bus(PCI_BUS(bdf), &bus); 430 if (ret) 431 return ret; 432 433 return pci_bus_write_config(bus, bdf, offset, value, size); 434 } 435 436 int dm_pci_write_config(struct udevice *dev, int offset, unsigned long value, 437 enum pci_size_t size) 438 { 439 struct udevice *bus; 440 441 for (bus = dev; device_is_on_pci_bus(bus);) 442 bus = bus->parent; 443 return pci_bus_write_config(bus, dm_pci_get_bdf(dev), offset, value, 444 size); 445 } 446 447 int pci_write_config32(pci_dev_t bdf, int offset, u32 value) 448 { 449 return pci_write_config(bdf, offset, value, PCI_SIZE_32); 450 } 451 452 int pci_write_config16(pci_dev_t bdf, int offset, u16 value) 453 { 454 return pci_write_config(bdf, offset, value, PCI_SIZE_16); 455 } 456 457 int pci_write_config8(pci_dev_t bdf, int offset, u8 value) 458 { 459 return pci_write_config(bdf, offset, value, PCI_SIZE_8); 460 } 461 462 int dm_pci_write_config8(struct udevice *dev, int offset, u8 value) 463 { 464 return dm_pci_write_config(dev, offset, value, PCI_SIZE_8); 465 } 466 467 int dm_pci_write_config16(struct udevice *dev, int offset, u16 value) 468 { 469 return dm_pci_write_config(dev, offset, value, PCI_SIZE_16); 470 } 471 472 int dm_pci_write_config32(struct udevice *dev, int offset, u32 value) 473 { 474 return dm_pci_write_config(dev, offset, value, PCI_SIZE_32); 475 } 476 477 int pci_bus_read_config(struct udevice *bus, pci_dev_t bdf, int offset, 478 unsigned long *valuep, enum pci_size_t size) 479 { 480 struct dm_pci_ops *ops; 481 482 ops = pci_get_ops(bus); 483 if (!ops->read_config) 484 return -ENOSYS; 485 return ops->read_config(bus, bdf, offset, valuep, size); 486 } 487 488 int pci_read_config(pci_dev_t bdf, int offset, unsigned long *valuep, 489 enum pci_size_t size) 490 { 491 struct udevice *bus; 492 int ret; 493 494 ret = pci_get_bus(PCI_BUS(bdf), &bus); 495 if (ret) 496 return ret; 497 498 return pci_bus_read_config(bus, bdf, offset, valuep, size); 499 } 500 501 int dm_pci_read_config(struct udevice *dev, int offset, unsigned long *valuep, 502 enum pci_size_t size) 503 { 504 struct udevice *bus; 505 506 for (bus = dev; device_is_on_pci_bus(bus);) 507 bus = bus->parent; 508 return pci_bus_read_config(bus, dm_pci_get_bdf(dev), offset, valuep, 509 size); 510 } 511 512 int pci_read_config32(pci_dev_t bdf, int offset, u32 *valuep) 513 { 514 unsigned long value; 515 int ret; 516 517 ret = pci_read_config(bdf, offset, &value, PCI_SIZE_32); 518 if (ret) 519 return ret; 520 *valuep = value; 521 522 return 0; 523 } 524 525 int pci_read_config16(pci_dev_t bdf, int offset, u16 *valuep) 526 { 527 unsigned long value; 528 int ret; 529 530 ret = pci_read_config(bdf, offset, &value, PCI_SIZE_16); 531 if (ret) 532 return ret; 533 *valuep = value; 534 535 return 0; 536 } 537 538 int pci_read_config8(pci_dev_t bdf, int offset, u8 *valuep) 539 { 540 unsigned long value; 541 int ret; 542 543 ret = pci_read_config(bdf, offset, &value, PCI_SIZE_8); 544 if (ret) 545 return ret; 546 *valuep = value; 547 548 return 0; 549 } 550 551 int dm_pci_read_config8(struct udevice *dev, int offset, u8 *valuep) 552 { 553 unsigned long value; 554 int ret; 555 556 ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_8); 557 if (ret) 558 return ret; 559 *valuep = value; 560 561 return 0; 562 } 563 564 int dm_pci_read_config16(struct udevice *dev, int offset, u16 *valuep) 565 { 566 unsigned long value; 567 int ret; 568 569 ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_16); 570 if (ret) 571 return ret; 572 *valuep = value; 573 574 return 0; 575 } 576 577 int dm_pci_read_config32(struct udevice *dev, int offset, u32 *valuep) 578 { 579 unsigned long value; 580 int ret; 581 582 ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_32); 583 if (ret) 584 return ret; 585 *valuep = value; 586 587 return 0; 588 } 589 590 int dm_pci_clrset_config8(struct udevice *dev, int offset, u32 clr, u32 set) 591 { 592 u8 val; 593 int ret; 594 595 ret = dm_pci_read_config8(dev, offset, &val); 596 if (ret) 597 return ret; 598 val &= ~clr; 599 val |= set; 600 601 return dm_pci_write_config8(dev, offset, val); 602 } 603 604 int dm_pci_clrset_config16(struct udevice *dev, int offset, u32 clr, u32 set) 605 { 606 u16 val; 607 int ret; 608 609 ret = dm_pci_read_config16(dev, offset, &val); 610 if (ret) 611 return ret; 612 val &= ~clr; 613 val |= set; 614 615 return dm_pci_write_config16(dev, offset, val); 616 } 617 618 int dm_pci_clrset_config32(struct udevice *dev, int offset, u32 clr, u32 set) 619 { 620 u32 val; 621 int ret; 622 623 ret = dm_pci_read_config32(dev, offset, &val); 624 if (ret) 625 return ret; 626 val &= ~clr; 627 val |= set; 628 629 return dm_pci_write_config32(dev, offset, val); 630 } 631 632 static void set_vga_bridge_bits(struct udevice *dev) 633 { 634 struct udevice *parent = dev->parent; 635 u16 bc; 636 637 while (parent->seq != 0) { 638 dm_pci_read_config16(parent, PCI_BRIDGE_CONTROL, &bc); 639 bc |= PCI_BRIDGE_CTL_VGA; 640 dm_pci_write_config16(parent, PCI_BRIDGE_CONTROL, bc); 641 parent = parent->parent; 642 } 643 } 644 645 int pci_auto_config_devices(struct udevice *bus) 646 { 647 struct pci_controller *hose = bus->uclass_priv; 648 struct pci_child_platdata *pplat; 649 unsigned int sub_bus; 650 struct udevice *dev; 651 int ret; 652 653 sub_bus = bus->seq; 654 debug("%s: start\n", __func__); 655 pciauto_config_init(hose); 656 for (ret = device_find_first_child(bus, &dev); 657 !ret && dev; 658 ret = device_find_next_child(&dev)) { 659 unsigned int max_bus; 660 int ret; 661 662 debug("%s: device %s\n", __func__, dev->name); 663 ret = dm_pciauto_config_device(dev); 664 if (ret < 0) 665 return ret; 666 max_bus = ret; 667 sub_bus = max(sub_bus, max_bus); 668 669 pplat = dev_get_parent_platdata(dev); 670 if (pplat->class == (PCI_CLASS_DISPLAY_VGA << 8)) 671 set_vga_bridge_bits(dev); 672 } 673 debug("%s: done\n", __func__); 674 675 return sub_bus; 676 } 677 678 int dm_pci_hose_probe_bus(struct udevice *bus) 679 { 680 int sub_bus; 681 int ret; 682 683 debug("%s\n", __func__); 684 685 sub_bus = pci_get_bus_max() + 1; 686 debug("%s: bus = %d/%s\n", __func__, sub_bus, bus->name); 687 dm_pciauto_prescan_setup_bridge(bus, sub_bus); 688 689 ret = device_probe(bus); 690 if (ret) { 691 debug("%s: Cannot probe bus %s: %d\n", __func__, bus->name, 692 ret); 693 return ret; 694 } 695 if (sub_bus != bus->seq) { 696 printf("%s: Internal error, bus '%s' got seq %d, expected %d\n", 697 __func__, bus->name, bus->seq, sub_bus); 698 return -EPIPE; 699 } 700 sub_bus = pci_get_bus_max(); 701 dm_pciauto_postscan_setup_bridge(bus, sub_bus); 702 703 return sub_bus; 704 } 705 706 /** 707 * pci_match_one_device - Tell if a PCI device structure has a matching 708 * PCI device id structure 709 * @id: single PCI device id structure to match 710 * @find: the PCI device id structure to match against 711 * 712 * Returns true if the finding pci_device_id structure matched or false if 713 * there is no match. 714 */ 715 static bool pci_match_one_id(const struct pci_device_id *id, 716 const struct pci_device_id *find) 717 { 718 if ((id->vendor == PCI_ANY_ID || id->vendor == find->vendor) && 719 (id->device == PCI_ANY_ID || id->device == find->device) && 720 (id->subvendor == PCI_ANY_ID || id->subvendor == find->subvendor) && 721 (id->subdevice == PCI_ANY_ID || id->subdevice == find->subdevice) && 722 !((id->class ^ find->class) & id->class_mask)) 723 return true; 724 725 return false; 726 } 727 728 /** 729 * pci_find_and_bind_driver() - Find and bind the right PCI driver 730 * 731 * This only looks at certain fields in the descriptor. 732 * 733 * @parent: Parent bus 734 * @find_id: Specification of the driver to find 735 * @bdf: Bus/device/function addreess - see PCI_BDF() 736 * @devp: Returns a pointer to the device created 737 * @return 0 if OK, -EPERM if the device is not needed before relocation and 738 * therefore was not created, other -ve value on error 739 */ 740 static int pci_find_and_bind_driver(struct udevice *parent, 741 struct pci_device_id *find_id, 742 pci_dev_t bdf, struct udevice **devp) 743 { 744 struct pci_driver_entry *start, *entry; 745 const char *drv; 746 int n_ents; 747 int ret; 748 char name[30], *str; 749 bool bridge; 750 751 *devp = NULL; 752 753 debug("%s: Searching for driver: vendor=%x, device=%x\n", __func__, 754 find_id->vendor, find_id->device); 755 start = ll_entry_start(struct pci_driver_entry, pci_driver_entry); 756 n_ents = ll_entry_count(struct pci_driver_entry, pci_driver_entry); 757 for (entry = start; entry != start + n_ents; entry++) { 758 const struct pci_device_id *id; 759 struct udevice *dev; 760 const struct driver *drv; 761 762 for (id = entry->match; 763 id->vendor || id->subvendor || id->class_mask; 764 id++) { 765 if (!pci_match_one_id(id, find_id)) 766 continue; 767 768 drv = entry->driver; 769 770 /* 771 * In the pre-relocation phase, we only bind devices 772 * whose driver has the DM_FLAG_PRE_RELOC set, to save 773 * precious memory space as on some platforms as that 774 * space is pretty limited (ie: using Cache As RAM). 775 */ 776 if (!(gd->flags & GD_FLG_RELOC) && 777 !(drv->flags & DM_FLAG_PRE_RELOC)) 778 return -EPERM; 779 780 /* 781 * We could pass the descriptor to the driver as 782 * platdata (instead of NULL) and allow its bind() 783 * method to return -ENOENT if it doesn't support this 784 * device. That way we could continue the search to 785 * find another driver. For now this doesn't seem 786 * necesssary, so just bind the first match. 787 */ 788 ret = device_bind(parent, drv, drv->name, NULL, -1, 789 &dev); 790 if (ret) 791 goto error; 792 debug("%s: Match found: %s\n", __func__, drv->name); 793 dev->driver_data = find_id->driver_data; 794 *devp = dev; 795 return 0; 796 } 797 } 798 799 bridge = (find_id->class >> 8) == PCI_CLASS_BRIDGE_PCI; 800 /* 801 * In the pre-relocation phase, we only bind bridge devices to save 802 * precious memory space as on some platforms as that space is pretty 803 * limited (ie: using Cache As RAM). 804 */ 805 if (!(gd->flags & GD_FLG_RELOC) && !bridge) 806 return -EPERM; 807 808 /* Bind a generic driver so that the device can be used */ 809 sprintf(name, "pci_%x:%x.%x", parent->seq, PCI_DEV(bdf), 810 PCI_FUNC(bdf)); 811 str = strdup(name); 812 if (!str) 813 return -ENOMEM; 814 drv = bridge ? "pci_bridge_drv" : "pci_generic_drv"; 815 816 ret = device_bind_driver(parent, drv, str, devp); 817 if (ret) { 818 debug("%s: Failed to bind generic driver: %d\n", __func__, ret); 819 free(str); 820 return ret; 821 } 822 debug("%s: No match found: bound generic driver instead\n", __func__); 823 824 return 0; 825 826 error: 827 debug("%s: No match found: error %d\n", __func__, ret); 828 return ret; 829 } 830 831 int pci_bind_bus_devices(struct udevice *bus) 832 { 833 ulong vendor, device; 834 ulong header_type; 835 pci_dev_t bdf, end; 836 bool found_multi; 837 int ret; 838 839 found_multi = false; 840 end = PCI_BDF(bus->seq, PCI_MAX_PCI_DEVICES - 1, 841 PCI_MAX_PCI_FUNCTIONS - 1); 842 for (bdf = PCI_BDF(bus->seq, 0, 0); bdf <= end; 843 bdf += PCI_BDF(0, 0, 1)) { 844 struct pci_child_platdata *pplat; 845 struct udevice *dev; 846 ulong class; 847 848 if (PCI_FUNC(bdf) && !found_multi) 849 continue; 850 /* Check only the first access, we don't expect problems */ 851 ret = pci_bus_read_config(bus, bdf, PCI_HEADER_TYPE, 852 &header_type, PCI_SIZE_8); 853 if (ret) 854 goto error; 855 pci_bus_read_config(bus, bdf, PCI_VENDOR_ID, &vendor, 856 PCI_SIZE_16); 857 if (vendor == 0xffff || vendor == 0x0000) 858 continue; 859 860 if (!PCI_FUNC(bdf)) 861 found_multi = header_type & 0x80; 862 863 debug("%s: bus %d/%s: found device %x, function %d\n", __func__, 864 bus->seq, bus->name, PCI_DEV(bdf), PCI_FUNC(bdf)); 865 pci_bus_read_config(bus, bdf, PCI_DEVICE_ID, &device, 866 PCI_SIZE_16); 867 pci_bus_read_config(bus, bdf, PCI_CLASS_REVISION, &class, 868 PCI_SIZE_32); 869 class >>= 8; 870 871 /* Find this device in the device tree */ 872 ret = pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), &dev); 873 874 /* If nothing in the device tree, bind a device */ 875 if (ret == -ENODEV) { 876 struct pci_device_id find_id; 877 ulong val; 878 879 memset(&find_id, '\0', sizeof(find_id)); 880 find_id.vendor = vendor; 881 find_id.device = device; 882 find_id.class = class; 883 if ((header_type & 0x7f) == PCI_HEADER_TYPE_NORMAL) { 884 pci_bus_read_config(bus, bdf, 885 PCI_SUBSYSTEM_VENDOR_ID, 886 &val, PCI_SIZE_32); 887 find_id.subvendor = val & 0xffff; 888 find_id.subdevice = val >> 16; 889 } 890 ret = pci_find_and_bind_driver(bus, &find_id, bdf, 891 &dev); 892 } 893 if (ret == -EPERM) 894 continue; 895 else if (ret) 896 return ret; 897 898 /* Update the platform data */ 899 pplat = dev_get_parent_platdata(dev); 900 pplat->devfn = PCI_MASK_BUS(bdf); 901 pplat->vendor = vendor; 902 pplat->device = device; 903 pplat->class = class; 904 } 905 906 return 0; 907 error: 908 printf("Cannot read bus configuration: %d\n", ret); 909 910 return ret; 911 } 912 913 static int decode_regions(struct pci_controller *hose, ofnode parent_node, 914 ofnode node) 915 { 916 int pci_addr_cells, addr_cells, size_cells; 917 phys_addr_t base = 0, size; 918 int cells_per_record; 919 const u32 *prop; 920 int len; 921 int i; 922 923 prop = ofnode_get_property(node, "ranges", &len); 924 if (!prop) 925 return -EINVAL; 926 pci_addr_cells = ofnode_read_simple_addr_cells(node); 927 addr_cells = ofnode_read_simple_addr_cells(parent_node); 928 size_cells = ofnode_read_simple_size_cells(node); 929 930 /* PCI addresses are always 3-cells */ 931 len /= sizeof(u32); 932 cells_per_record = pci_addr_cells + addr_cells + size_cells; 933 hose->region_count = 0; 934 debug("%s: len=%d, cells_per_record=%d\n", __func__, len, 935 cells_per_record); 936 for (i = 0; i < MAX_PCI_REGIONS; i++, len -= cells_per_record) { 937 u64 pci_addr, addr, size; 938 int space_code; 939 u32 flags; 940 int type; 941 int pos; 942 943 if (len < cells_per_record) 944 break; 945 flags = fdt32_to_cpu(prop[0]); 946 space_code = (flags >> 24) & 3; 947 pci_addr = fdtdec_get_number(prop + 1, 2); 948 prop += pci_addr_cells; 949 addr = fdtdec_get_number(prop, addr_cells); 950 prop += addr_cells; 951 size = fdtdec_get_number(prop, size_cells); 952 prop += size_cells; 953 debug("%s: region %d, pci_addr=%" PRIx64 ", addr=%" PRIx64 954 ", size=%" PRIx64 ", space_code=%d\n", __func__, 955 hose->region_count, pci_addr, addr, size, space_code); 956 if (space_code & 2) { 957 type = flags & (1U << 30) ? PCI_REGION_PREFETCH : 958 PCI_REGION_MEM; 959 #ifndef CONFIG_SYS_PCI_64BIT 960 if (upper_32_bits(pci_addr)) 961 continue; 962 #endif 963 } else if (space_code & 1) { 964 type = PCI_REGION_IO; 965 } else { 966 continue; 967 } 968 pos = -1; 969 for (i = 0; i < hose->region_count; i++) { 970 if (hose->regions[i].flags == type) { 971 #if defined(CONFIG_SYS_PCI_64BIT) 972 if (type == PCI_REGION_MEM) { 973 if ((upper_32_bits(pci_addr) && 974 !upper_32_bits(hose->regions[i].bus_start)) || 975 (!upper_32_bits(pci_addr) && 976 upper_32_bits(hose->regions[i].bus_start))) 977 continue; 978 } 979 #endif 980 pos = i; 981 } 982 } 983 if (pos == -1) 984 pos = hose->region_count++; 985 debug(" - type=%d, pos=%d\n", type, pos); 986 pci_set_region(hose->regions + pos, pci_addr, addr, size, type); 987 } 988 989 /* Add a region for our local memory */ 990 size = gd->ram_size; 991 #ifdef CONFIG_SYS_SDRAM_BASE 992 base = CONFIG_SYS_SDRAM_BASE; 993 #endif 994 if (gd->pci_ram_top && gd->pci_ram_top < base + size) 995 size = gd->pci_ram_top - base; 996 pci_set_region(hose->regions + hose->region_count++, base, base, 997 size, PCI_REGION_MEM | PCI_REGION_SYS_MEMORY); 998 999 return 0; 1000 } 1001 1002 static int pci_uclass_pre_probe(struct udevice *bus) 1003 { 1004 struct pci_controller *hose; 1005 int ret; 1006 1007 debug("%s, bus=%d/%s, parent=%s\n", __func__, bus->seq, bus->name, 1008 bus->parent->name); 1009 hose = bus->uclass_priv; 1010 1011 /* For bridges, use the top-level PCI controller */ 1012 if (!device_is_on_pci_bus(bus)) { 1013 hose->ctlr = bus; 1014 ret = decode_regions(hose, dev_ofnode(bus->parent), 1015 dev_ofnode(bus)); 1016 if (ret) { 1017 debug("%s: Cannot decode regions\n", __func__); 1018 return ret; 1019 } 1020 } else { 1021 struct pci_controller *parent_hose; 1022 1023 parent_hose = dev_get_uclass_priv(bus->parent); 1024 hose->ctlr = parent_hose->bus; 1025 } 1026 hose->bus = bus; 1027 hose->first_busno = bus->seq; 1028 hose->last_busno = bus->seq; 1029 1030 return 0; 1031 } 1032 1033 static int pci_uclass_post_probe(struct udevice *bus) 1034 { 1035 int ret; 1036 1037 debug("%s: probing bus %d\n", __func__, bus->seq); 1038 ret = pci_bind_bus_devices(bus); 1039 if (ret) 1040 return ret; 1041 1042 #ifdef CONFIG_PCI_PNP 1043 ret = pci_auto_config_devices(bus); 1044 if (ret < 0) 1045 return ret; 1046 #endif 1047 1048 #if defined(CONFIG_X86) && defined(CONFIG_HAVE_FSP) 1049 /* 1050 * Per Intel FSP specification, we should call FSP notify API to 1051 * inform FSP that PCI enumeration has been done so that FSP will 1052 * do any necessary initialization as required by the chipset's 1053 * BIOS Writer's Guide (BWG). 1054 * 1055 * Unfortunately we have to put this call here as with driver model, 1056 * the enumeration is all done on a lazy basis as needed, so until 1057 * something is touched on PCI it won't happen. 1058 * 1059 * Note we only call this 1) after U-Boot is relocated, and 2) 1060 * root bus has finished probing. 1061 */ 1062 if ((gd->flags & GD_FLG_RELOC) && (bus->seq == 0)) { 1063 ret = fsp_init_phase_pci(); 1064 if (ret) 1065 return ret; 1066 } 1067 #endif 1068 1069 return 0; 1070 } 1071 1072 static int pci_uclass_child_post_bind(struct udevice *dev) 1073 { 1074 struct pci_child_platdata *pplat; 1075 struct fdt_pci_addr addr; 1076 int ret; 1077 1078 if (!dev_of_valid(dev)) 1079 return 0; 1080 1081 /* 1082 * We could read vendor, device, class if available. But for now we 1083 * just check the address. 1084 */ 1085 pplat = dev_get_parent_platdata(dev); 1086 ret = ofnode_read_pci_addr(dev_ofnode(dev), FDT_PCI_SPACE_CONFIG, "reg", 1087 &addr); 1088 1089 if (ret) { 1090 if (ret != -ENOENT) 1091 return -EINVAL; 1092 } else { 1093 /* extract the devfn from fdt_pci_addr */ 1094 pplat->devfn = addr.phys_hi & 0xff00; 1095 } 1096 1097 return 0; 1098 } 1099 1100 static int pci_bridge_read_config(struct udevice *bus, pci_dev_t bdf, 1101 uint offset, ulong *valuep, 1102 enum pci_size_t size) 1103 { 1104 struct pci_controller *hose = bus->uclass_priv; 1105 1106 return pci_bus_read_config(hose->ctlr, bdf, offset, valuep, size); 1107 } 1108 1109 static int pci_bridge_write_config(struct udevice *bus, pci_dev_t bdf, 1110 uint offset, ulong value, 1111 enum pci_size_t size) 1112 { 1113 struct pci_controller *hose = bus->uclass_priv; 1114 1115 return pci_bus_write_config(hose->ctlr, bdf, offset, value, size); 1116 } 1117 1118 static int skip_to_next_device(struct udevice *bus, struct udevice **devp) 1119 { 1120 struct udevice *dev; 1121 1122 /* 1123 * Scan through all the PCI controllers. On x86 there will only be one 1124 * but that is not necessarily true on other hardware. 1125 */ 1126 do { 1127 device_find_first_child(bus, &dev); 1128 if (dev) { 1129 *devp = dev; 1130 return 0; 1131 } 1132 uclass_next_device(&bus); 1133 } while (bus); 1134 1135 return 0; 1136 } 1137 1138 int pci_find_next_device(struct udevice **devp) 1139 { 1140 struct udevice *child = *devp; 1141 struct udevice *bus = child->parent; 1142 1143 /* First try all the siblings */ 1144 *devp = NULL; 1145 while (child) { 1146 device_find_next_child(&child); 1147 if (child) { 1148 *devp = child; 1149 return 0; 1150 } 1151 } 1152 1153 /* We ran out of siblings. Try the next bus */ 1154 uclass_next_device(&bus); 1155 1156 return bus ? skip_to_next_device(bus, devp) : 0; 1157 } 1158 1159 int pci_find_first_device(struct udevice **devp) 1160 { 1161 struct udevice *bus; 1162 1163 *devp = NULL; 1164 uclass_first_device(UCLASS_PCI, &bus); 1165 1166 return skip_to_next_device(bus, devp); 1167 } 1168 1169 ulong pci_conv_32_to_size(ulong value, uint offset, enum pci_size_t size) 1170 { 1171 switch (size) { 1172 case PCI_SIZE_8: 1173 return (value >> ((offset & 3) * 8)) & 0xff; 1174 case PCI_SIZE_16: 1175 return (value >> ((offset & 2) * 8)) & 0xffff; 1176 default: 1177 return value; 1178 } 1179 } 1180 1181 ulong pci_conv_size_to_32(ulong old, ulong value, uint offset, 1182 enum pci_size_t size) 1183 { 1184 uint off_mask; 1185 uint val_mask, shift; 1186 ulong ldata, mask; 1187 1188 switch (size) { 1189 case PCI_SIZE_8: 1190 off_mask = 3; 1191 val_mask = 0xff; 1192 break; 1193 case PCI_SIZE_16: 1194 off_mask = 2; 1195 val_mask = 0xffff; 1196 break; 1197 default: 1198 return value; 1199 } 1200 shift = (offset & off_mask) * 8; 1201 ldata = (value & val_mask) << shift; 1202 mask = val_mask << shift; 1203 value = (old & ~mask) | ldata; 1204 1205 return value; 1206 } 1207 1208 int pci_get_regions(struct udevice *dev, struct pci_region **iop, 1209 struct pci_region **memp, struct pci_region **prefp) 1210 { 1211 struct udevice *bus = pci_get_controller(dev); 1212 struct pci_controller *hose = dev_get_uclass_priv(bus); 1213 int i; 1214 1215 *iop = NULL; 1216 *memp = NULL; 1217 *prefp = NULL; 1218 for (i = 0; i < hose->region_count; i++) { 1219 switch (hose->regions[i].flags) { 1220 case PCI_REGION_IO: 1221 if (!*iop || (*iop)->size < hose->regions[i].size) 1222 *iop = hose->regions + i; 1223 break; 1224 case PCI_REGION_MEM: 1225 if (!*memp || (*memp)->size < hose->regions[i].size) 1226 *memp = hose->regions + i; 1227 break; 1228 case (PCI_REGION_MEM | PCI_REGION_PREFETCH): 1229 if (!*prefp || (*prefp)->size < hose->regions[i].size) 1230 *prefp = hose->regions + i; 1231 break; 1232 } 1233 } 1234 1235 return (*iop != NULL) + (*memp != NULL) + (*prefp != NULL); 1236 } 1237 1238 u32 dm_pci_read_bar32(struct udevice *dev, int barnum) 1239 { 1240 u32 addr; 1241 int bar; 1242 1243 bar = PCI_BASE_ADDRESS_0 + barnum * 4; 1244 dm_pci_read_config32(dev, bar, &addr); 1245 if (addr & PCI_BASE_ADDRESS_SPACE_IO) 1246 return addr & PCI_BASE_ADDRESS_IO_MASK; 1247 else 1248 return addr & PCI_BASE_ADDRESS_MEM_MASK; 1249 } 1250 1251 void dm_pci_write_bar32(struct udevice *dev, int barnum, u32 addr) 1252 { 1253 int bar; 1254 1255 bar = PCI_BASE_ADDRESS_0 + barnum * 4; 1256 dm_pci_write_config32(dev, bar, addr); 1257 } 1258 1259 static int _dm_pci_bus_to_phys(struct udevice *ctlr, 1260 pci_addr_t bus_addr, unsigned long flags, 1261 unsigned long skip_mask, phys_addr_t *pa) 1262 { 1263 struct pci_controller *hose = dev_get_uclass_priv(ctlr); 1264 struct pci_region *res; 1265 int i; 1266 1267 for (i = 0; i < hose->region_count; i++) { 1268 res = &hose->regions[i]; 1269 1270 if (((res->flags ^ flags) & PCI_REGION_TYPE) != 0) 1271 continue; 1272 1273 if (res->flags & skip_mask) 1274 continue; 1275 1276 if (bus_addr >= res->bus_start && 1277 (bus_addr - res->bus_start) < res->size) { 1278 *pa = (bus_addr - res->bus_start + res->phys_start); 1279 return 0; 1280 } 1281 } 1282 1283 return 1; 1284 } 1285 1286 phys_addr_t dm_pci_bus_to_phys(struct udevice *dev, pci_addr_t bus_addr, 1287 unsigned long flags) 1288 { 1289 phys_addr_t phys_addr = 0; 1290 struct udevice *ctlr; 1291 int ret; 1292 1293 /* The root controller has the region information */ 1294 ctlr = pci_get_controller(dev); 1295 1296 /* 1297 * if PCI_REGION_MEM is set we do a two pass search with preference 1298 * on matches that don't have PCI_REGION_SYS_MEMORY set 1299 */ 1300 if ((flags & PCI_REGION_TYPE) == PCI_REGION_MEM) { 1301 ret = _dm_pci_bus_to_phys(ctlr, bus_addr, 1302 flags, PCI_REGION_SYS_MEMORY, 1303 &phys_addr); 1304 if (!ret) 1305 return phys_addr; 1306 } 1307 1308 ret = _dm_pci_bus_to_phys(ctlr, bus_addr, flags, 0, &phys_addr); 1309 1310 if (ret) 1311 puts("pci_hose_bus_to_phys: invalid physical address\n"); 1312 1313 return phys_addr; 1314 } 1315 1316 int _dm_pci_phys_to_bus(struct udevice *dev, phys_addr_t phys_addr, 1317 unsigned long flags, unsigned long skip_mask, 1318 pci_addr_t *ba) 1319 { 1320 struct pci_region *res; 1321 struct udevice *ctlr; 1322 pci_addr_t bus_addr; 1323 int i; 1324 struct pci_controller *hose; 1325 1326 /* The root controller has the region information */ 1327 ctlr = pci_get_controller(dev); 1328 hose = dev_get_uclass_priv(ctlr); 1329 1330 for (i = 0; i < hose->region_count; i++) { 1331 res = &hose->regions[i]; 1332 1333 if (((res->flags ^ flags) & PCI_REGION_TYPE) != 0) 1334 continue; 1335 1336 if (res->flags & skip_mask) 1337 continue; 1338 1339 bus_addr = phys_addr - res->phys_start + res->bus_start; 1340 1341 if (bus_addr >= res->bus_start && 1342 (bus_addr - res->bus_start) < res->size) { 1343 *ba = bus_addr; 1344 return 0; 1345 } 1346 } 1347 1348 return 1; 1349 } 1350 1351 pci_addr_t dm_pci_phys_to_bus(struct udevice *dev, phys_addr_t phys_addr, 1352 unsigned long flags) 1353 { 1354 pci_addr_t bus_addr = 0; 1355 int ret; 1356 1357 /* 1358 * if PCI_REGION_MEM is set we do a two pass search with preference 1359 * on matches that don't have PCI_REGION_SYS_MEMORY set 1360 */ 1361 if ((flags & PCI_REGION_TYPE) == PCI_REGION_MEM) { 1362 ret = _dm_pci_phys_to_bus(dev, phys_addr, flags, 1363 PCI_REGION_SYS_MEMORY, &bus_addr); 1364 if (!ret) 1365 return bus_addr; 1366 } 1367 1368 ret = _dm_pci_phys_to_bus(dev, phys_addr, flags, 0, &bus_addr); 1369 1370 if (ret) 1371 puts("pci_hose_phys_to_bus: invalid physical address\n"); 1372 1373 return bus_addr; 1374 } 1375 1376 void *dm_pci_map_bar(struct udevice *dev, int bar, int flags) 1377 { 1378 pci_addr_t pci_bus_addr; 1379 u32 bar_response; 1380 1381 /* read BAR address */ 1382 dm_pci_read_config32(dev, bar, &bar_response); 1383 pci_bus_addr = (pci_addr_t)(bar_response & ~0xf); 1384 1385 #if defined(CONFIG_SYS_PCI_64BIT) 1386 if (bar_response & PCI_BASE_ADDRESS_MEM_TYPE_64) { 1387 dm_pci_read_config32(dev, bar + 4, &bar_response); 1388 pci_bus_addr |= (pci_addr_t)bar_response << 32; } 1389 #endif /* CONFIG_SYS_PCI_64BIT */ 1390 /* 1391 * Pass "0" as the length argument to pci_bus_to_virt. The arg 1392 * isn't actualy used on any platform because u-boot assumes a static 1393 * linear mapping. In the future, this could read the BAR size 1394 * and pass that as the size if needed. 1395 */ 1396 return dm_pci_bus_to_virt(dev, pci_bus_addr, flags, 0, MAP_NOCACHE); 1397 } 1398 1399 static int _dm_pci_find_next_capability(struct udevice *dev, u8 pos, int cap) 1400 { 1401 int ttl = PCI_FIND_CAP_TTL; 1402 u8 id; 1403 u16 ent; 1404 1405 dm_pci_read_config8(dev, pos, &pos); 1406 1407 while (ttl--) { 1408 if (pos < PCI_STD_HEADER_SIZEOF) 1409 break; 1410 pos &= ~3; 1411 dm_pci_read_config16(dev, pos, &ent); 1412 1413 id = ent & 0xff; 1414 if (id == 0xff) 1415 break; 1416 if (id == cap) 1417 return pos; 1418 pos = (ent >> 8); 1419 } 1420 1421 return 0; 1422 } 1423 1424 int dm_pci_find_next_capability(struct udevice *dev, u8 start, int cap) 1425 { 1426 return _dm_pci_find_next_capability(dev, start + PCI_CAP_LIST_NEXT, 1427 cap); 1428 } 1429 1430 int dm_pci_find_capability(struct udevice *dev, int cap) 1431 { 1432 u16 status; 1433 u8 header_type; 1434 u8 pos; 1435 1436 dm_pci_read_config16(dev, PCI_STATUS, &status); 1437 if (!(status & PCI_STATUS_CAP_LIST)) 1438 return 0; 1439 1440 dm_pci_read_config8(dev, PCI_HEADER_TYPE, &header_type); 1441 if ((header_type & 0x7f) == PCI_HEADER_TYPE_CARDBUS) 1442 pos = PCI_CB_CAPABILITY_LIST; 1443 else 1444 pos = PCI_CAPABILITY_LIST; 1445 1446 return _dm_pci_find_next_capability(dev, pos, cap); 1447 } 1448 1449 int dm_pci_find_next_ext_capability(struct udevice *dev, int start, int cap) 1450 { 1451 u32 header; 1452 int ttl; 1453 int pos = PCI_CFG_SPACE_SIZE; 1454 1455 /* minimum 8 bytes per capability */ 1456 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; 1457 1458 if (start) 1459 pos = start; 1460 1461 dm_pci_read_config32(dev, pos, &header); 1462 /* 1463 * If we have no capabilities, this is indicated by cap ID, 1464 * cap version and next pointer all being 0. 1465 */ 1466 if (header == 0) 1467 return 0; 1468 1469 while (ttl--) { 1470 if (PCI_EXT_CAP_ID(header) == cap) 1471 return pos; 1472 1473 pos = PCI_EXT_CAP_NEXT(header); 1474 if (pos < PCI_CFG_SPACE_SIZE) 1475 break; 1476 1477 dm_pci_read_config32(dev, pos, &header); 1478 } 1479 1480 return 0; 1481 } 1482 1483 int dm_pci_find_ext_capability(struct udevice *dev, int cap) 1484 { 1485 return dm_pci_find_next_ext_capability(dev, 0, cap); 1486 } 1487 1488 UCLASS_DRIVER(pci) = { 1489 .id = UCLASS_PCI, 1490 .name = "pci", 1491 .flags = DM_UC_FLAG_SEQ_ALIAS, 1492 .post_bind = dm_scan_fdt_dev, 1493 .pre_probe = pci_uclass_pre_probe, 1494 .post_probe = pci_uclass_post_probe, 1495 .child_post_bind = pci_uclass_child_post_bind, 1496 .per_device_auto_alloc_size = sizeof(struct pci_controller), 1497 .per_child_platdata_auto_alloc_size = 1498 sizeof(struct pci_child_platdata), 1499 }; 1500 1501 static const struct dm_pci_ops pci_bridge_ops = { 1502 .read_config = pci_bridge_read_config, 1503 .write_config = pci_bridge_write_config, 1504 }; 1505 1506 static const struct udevice_id pci_bridge_ids[] = { 1507 { .compatible = "pci-bridge" }, 1508 { } 1509 }; 1510 1511 U_BOOT_DRIVER(pci_bridge_drv) = { 1512 .name = "pci_bridge_drv", 1513 .id = UCLASS_PCI, 1514 .of_match = pci_bridge_ids, 1515 .ops = &pci_bridge_ops, 1516 }; 1517 1518 UCLASS_DRIVER(pci_generic) = { 1519 .id = UCLASS_PCI_GENERIC, 1520 .name = "pci_generic", 1521 }; 1522 1523 static const struct udevice_id pci_generic_ids[] = { 1524 { .compatible = "pci-generic" }, 1525 { } 1526 }; 1527 1528 U_BOOT_DRIVER(pci_generic_drv) = { 1529 .name = "pci_generic_drv", 1530 .id = UCLASS_PCI_GENERIC, 1531 .of_match = pci_generic_ids, 1532 }; 1533 1534 void pci_init(void) 1535 { 1536 struct udevice *bus; 1537 1538 /* 1539 * Enumerate all known controller devices. Enumeration has the side- 1540 * effect of probing them, so PCIe devices will be enumerated too. 1541 */ 1542 for (uclass_first_device(UCLASS_PCI, &bus); 1543 bus; 1544 uclass_next_device(&bus)) { 1545 ; 1546 } 1547 } 1548