Lines Matching refs:nhi
43 bit += ring->nhi->hop_count; in ring_interrupt_index()
68 index = ring->hop + ring->nhi->hop_count; in ring_interrupt_active()
74 misc = ioread32(ring->nhi->iobase + REG_DMA_MISC); in ring_interrupt_active()
77 iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC); in ring_interrupt_active()
80 ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE; in ring_interrupt_active()
90 old = ioread32(ring->nhi->iobase + reg); in ring_interrupt_active()
96 dev_dbg(&ring->nhi->pdev->dev, in ring_interrupt_active()
101 dev_WARN(&ring->nhi->pdev->dev, in ring_interrupt_active()
105 iowrite32(new, ring->nhi->iobase + reg); in ring_interrupt_active()
113 static void nhi_disable_interrupts(struct tb_nhi *nhi) in nhi_disable_interrupts() argument
117 for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++) in nhi_disable_interrupts()
118 iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i); in nhi_disable_interrupts()
121 for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++) in nhi_disable_interrupts()
122 ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i); in nhi_disable_interrupts()
129 void __iomem *io = ring->nhi->iobase; in ring_desc_base()
137 void __iomem *io = ring->nhi->iobase; in ring_options_base()
339 val = ioread32(ring->nhi->iobase + reg); in __ring_interrupt_mask()
344 iowrite32(val, ring->nhi->iobase + reg); in __ring_interrupt_mask()
372 spin_lock_irqsave(&ring->nhi->lock, flags); in tb_ring_poll_complete()
377 spin_unlock_irqrestore(&ring->nhi->lock, flags); in tb_ring_poll_complete()
385 spin_lock(&ring->nhi->lock); in ring_msix()
389 spin_unlock(&ring->nhi->lock); in ring_msix()
396 struct tb_nhi *nhi = ring->nhi; in ring_request_msix() local
400 if (!nhi->pdev->msix_enabled) in ring_request_msix()
403 ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL); in ring_request_msix()
409 ret = pci_irq_vector(ring->nhi->pdev, ring->vector); in ring_request_msix()
423 ida_simple_remove(&nhi->msix_ida, ring->vector); in ring_request_msix()
434 ida_simple_remove(&ring->nhi->msix_ida, ring->vector); in ring_release_msix()
439 static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring) in nhi_alloc_hop() argument
443 spin_lock_irq(&nhi->lock); in nhi_alloc_hop()
452 for (i = RING_FIRST_USABLE_HOPID; i < nhi->hop_count; i++) { in nhi_alloc_hop()
454 if (!nhi->tx_rings[i]) { in nhi_alloc_hop()
459 if (!nhi->rx_rings[i]) { in nhi_alloc_hop()
467 if (ring->hop < 0 || ring->hop >= nhi->hop_count) { in nhi_alloc_hop()
468 dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop); in nhi_alloc_hop()
472 if (ring->is_tx && nhi->tx_rings[ring->hop]) { in nhi_alloc_hop()
473 dev_warn(&nhi->pdev->dev, "TX hop %d already allocated\n", in nhi_alloc_hop()
477 } else if (!ring->is_tx && nhi->rx_rings[ring->hop]) { in nhi_alloc_hop()
478 dev_warn(&nhi->pdev->dev, "RX hop %d already allocated\n", in nhi_alloc_hop()
485 nhi->tx_rings[ring->hop] = ring; in nhi_alloc_hop()
487 nhi->rx_rings[ring->hop] = ring; in nhi_alloc_hop()
490 spin_unlock_irq(&nhi->lock); in nhi_alloc_hop()
495 static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size, in tb_ring_alloc() argument
503 dev_dbg(&nhi->pdev->dev, "allocating %s ring %d of size %d\n", in tb_ring_alloc()
515 ring->nhi = nhi; in tb_ring_alloc()
528 ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev, in tb_ring_alloc()
537 if (nhi_alloc_hop(nhi, ring)) in tb_ring_alloc()
545 dma_free_coherent(&ring->nhi->pdev->dev, in tb_ring_alloc()
561 struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size, in tb_ring_alloc_tx() argument
564 return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0, NULL, NULL); in tb_ring_alloc_tx()
581 struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size, in tb_ring_alloc_rx() argument
585 return tb_ring_alloc(nhi, hop, size, false, flags, sof_mask, eof_mask, in tb_ring_alloc_rx()
600 spin_lock_irq(&ring->nhi->lock); in tb_ring_start()
602 if (ring->nhi->going_away) in tb_ring_start()
605 dev_WARN(&ring->nhi->pdev->dev, "ring already started\n"); in tb_ring_start()
608 dev_dbg(&ring->nhi->pdev->dev, "starting %s %d\n", in tb_ring_start()
636 spin_unlock_irq(&ring->nhi->lock); in tb_ring_start()
655 spin_lock_irq(&ring->nhi->lock); in tb_ring_stop()
657 dev_dbg(&ring->nhi->pdev->dev, "stopping %s %d\n", in tb_ring_stop()
659 if (ring->nhi->going_away) in tb_ring_stop()
662 dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n", in tb_ring_stop()
678 spin_unlock_irq(&ring->nhi->lock); in tb_ring_stop()
700 spin_lock_irq(&ring->nhi->lock); in tb_ring_free()
706 ring->nhi->tx_rings[ring->hop] = NULL; in tb_ring_free()
708 ring->nhi->rx_rings[ring->hop] = NULL; in tb_ring_free()
711 dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n", in tb_ring_free()
714 spin_unlock_irq(&ring->nhi->lock); in tb_ring_free()
718 dma_free_coherent(&ring->nhi->pdev->dev, in tb_ring_free()
726 dev_dbg(&ring->nhi->pdev->dev, "freeing %s %d\n", RING_TYPE(ring), in tb_ring_free()
748 int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data) in nhi_mailbox_cmd() argument
753 iowrite32(data, nhi->iobase + REG_INMAIL_DATA); in nhi_mailbox_cmd()
755 val = ioread32(nhi->iobase + REG_INMAIL_CMD); in nhi_mailbox_cmd()
758 iowrite32(val, nhi->iobase + REG_INMAIL_CMD); in nhi_mailbox_cmd()
762 val = ioread32(nhi->iobase + REG_INMAIL_CMD); in nhi_mailbox_cmd()
783 enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi) in nhi_mailbox_mode() argument
787 val = ioread32(nhi->iobase + REG_OUTMAIL_CMD); in nhi_mailbox_mode()
796 struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work); in nhi_interrupt_work() local
803 spin_lock_irq(&nhi->lock); in nhi_interrupt_work()
810 for (bit = 0; bit < 3 * nhi->hop_count; bit++) { in nhi_interrupt_work()
812 value = ioread32(nhi->iobase in nhi_interrupt_work()
815 if (++hop == nhi->hop_count) { in nhi_interrupt_work()
822 dev_warn(&nhi->pdev->dev, in nhi_interrupt_work()
828 ring = nhi->tx_rings[hop]; in nhi_interrupt_work()
830 ring = nhi->rx_rings[hop]; in nhi_interrupt_work()
832 dev_warn(&nhi->pdev->dev, in nhi_interrupt_work()
843 spin_unlock_irq(&nhi->lock); in nhi_interrupt_work()
848 struct tb_nhi *nhi = data; in nhi_msi() local
849 schedule_work(&nhi->interrupt_work); in nhi_msi()
857 struct tb_nhi *nhi = tb->nhi; in __nhi_suspend_noirq() local
864 if (nhi->ops && nhi->ops->suspend_noirq) { in __nhi_suspend_noirq()
865 ret = nhi->ops->suspend_noirq(tb->nhi, wakeup); in __nhi_suspend_noirq()
917 static void nhi_enable_int_throttling(struct tb_nhi *nhi) in nhi_enable_int_throttling() argument
929 iowrite32(throttle, nhi->iobase + reg); in nhi_enable_int_throttling()
937 struct tb_nhi *nhi = tb->nhi; in nhi_resume_noirq() local
946 nhi->going_away = true; in nhi_resume_noirq()
948 if (nhi->ops && nhi->ops->resume_noirq) { in nhi_resume_noirq()
949 ret = nhi->ops->resume_noirq(nhi); in nhi_resume_noirq()
953 nhi_enable_int_throttling(tb->nhi); in nhi_resume_noirq()
987 struct tb_nhi *nhi = tb->nhi; in nhi_runtime_suspend() local
994 if (nhi->ops && nhi->ops->runtime_suspend) { in nhi_runtime_suspend()
995 ret = nhi->ops->runtime_suspend(tb->nhi); in nhi_runtime_suspend()
1006 struct tb_nhi *nhi = tb->nhi; in nhi_runtime_resume() local
1009 if (nhi->ops && nhi->ops->runtime_resume) { in nhi_runtime_resume()
1010 ret = nhi->ops->runtime_resume(nhi); in nhi_runtime_resume()
1015 nhi_enable_int_throttling(nhi); in nhi_runtime_resume()
1019 static void nhi_shutdown(struct tb_nhi *nhi) in nhi_shutdown() argument
1023 dev_dbg(&nhi->pdev->dev, "shutdown\n"); in nhi_shutdown()
1025 for (i = 0; i < nhi->hop_count; i++) { in nhi_shutdown()
1026 if (nhi->tx_rings[i]) in nhi_shutdown()
1027 dev_WARN(&nhi->pdev->dev, in nhi_shutdown()
1029 if (nhi->rx_rings[i]) in nhi_shutdown()
1030 dev_WARN(&nhi->pdev->dev, in nhi_shutdown()
1033 nhi_disable_interrupts(nhi); in nhi_shutdown()
1038 if (!nhi->pdev->msix_enabled) { in nhi_shutdown()
1039 devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi); in nhi_shutdown()
1040 flush_work(&nhi->interrupt_work); in nhi_shutdown()
1042 ida_destroy(&nhi->msix_ida); in nhi_shutdown()
1044 if (nhi->ops && nhi->ops->shutdown) in nhi_shutdown()
1045 nhi->ops->shutdown(nhi); in nhi_shutdown()
1048 static int nhi_init_msi(struct tb_nhi *nhi) in nhi_init_msi() argument
1050 struct pci_dev *pdev = nhi->pdev; in nhi_init_msi()
1054 nhi_disable_interrupts(nhi); in nhi_init_msi()
1056 nhi_enable_int_throttling(nhi); in nhi_init_msi()
1058 ida_init(&nhi->msix_ida); in nhi_init_msi()
1073 INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work); in nhi_init_msi()
1075 irq = pci_irq_vector(nhi->pdev, 0); in nhi_init_msi()
1080 IRQF_NO_SUSPEND, "thunderbolt", nhi); in nhi_init_msi()
1107 static void tb_apple_add_links(struct tb_nhi *nhi) in tb_apple_add_links() argument
1114 switch (nhi->pdev->device) { in tb_apple_add_links()
1124 upstream = pci_upstream_bridge(nhi->pdev); in tb_apple_add_links()
1150 link = device_link_add(&pdev->dev, &nhi->pdev->dev, in tb_apple_add_links()
1154 dev_dbg(&nhi->pdev->dev, "created link from %s\n", in tb_apple_add_links()
1157 dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n", in tb_apple_add_links()
1165 struct tb_nhi *nhi; in nhi_probe() local
1186 nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL); in nhi_probe()
1187 if (!nhi) in nhi_probe()
1190 nhi->pdev = pdev; in nhi_probe()
1191 nhi->ops = (const struct tb_nhi_ops *)id->driver_data; in nhi_probe()
1193 nhi->iobase = pcim_iomap_table(pdev)[0]; in nhi_probe()
1194 nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff; in nhi_probe()
1195 dev_dbg(&pdev->dev, "total paths: %d\n", nhi->hop_count); in nhi_probe()
1197 nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count, in nhi_probe()
1198 sizeof(*nhi->tx_rings), GFP_KERNEL); in nhi_probe()
1199 nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count, in nhi_probe()
1200 sizeof(*nhi->rx_rings), GFP_KERNEL); in nhi_probe()
1201 if (!nhi->tx_rings || !nhi->rx_rings) in nhi_probe()
1204 res = nhi_init_msi(nhi); in nhi_probe()
1210 spin_lock_init(&nhi->lock); in nhi_probe()
1222 if (nhi->ops && nhi->ops->init) { in nhi_probe()
1223 res = nhi->ops->init(nhi); in nhi_probe()
1228 tb_apple_add_links(nhi); in nhi_probe()
1229 tb_acpi_add_links(nhi); in nhi_probe()
1231 tb = icm_probe(nhi); in nhi_probe()
1233 tb = tb_probe(nhi); in nhi_probe()
1235 dev_err(&nhi->pdev->dev, in nhi_probe()
1240 dev_dbg(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n"); in nhi_probe()
1249 nhi_shutdown(nhi); in nhi_probe()
1267 struct tb_nhi *nhi = tb->nhi; in nhi_remove() local
1274 nhi_shutdown(nhi); in nhi_remove()