Lines Matching +full:pdc +full:- +full:global

1 // SPDX-License-Identifier: GPL-2.0-or-later
5 ** (c) Copyright 2000-2004 Grant Grundler <grundler @ parisc-linux x org>
7 ** (c) Copyright 2000-2004 Hewlett-Packard Company
14 ** J5000/J7000/N-class/L-class machines and their successors.
28 #include <linux/dma-map-ops.h>
30 #include <linux/iommu-helper.h>
46 #include <asm/pdc.h> /* for PDC_MODEL_* */
48 #include <asm/parisc-device.h>
55 ** The number of debug flags is a clue - this code is fragile.
101 /* global count of IOMMUs in the system */
127 ** Superdome (in particular, REO) allows only 64-bit CSR accesses.
144 /* NOTE: When CONFIG_64BIT isn't defined, READ_REG64() is two 32-bit reads */
147 * sba_dump_ranges - debugging only - print ranges assigned to this IOA
167 * sba_dump_tlb - debugging only - print IOMMU operating parameters
190 * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
201 u64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]); in sba_dump_pdir_entry()
202 unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]); in sba_dump_pdir_entry()
207 rptr, pide & (BITS_PER_LONG - 1), *rptr); in sba_dump_pdir_entry()
212 (rcnt == (pide & (BITS_PER_LONG - 1))) in sba_dump_pdir_entry()
213 ? " -->" : " ", in sba_dump_pdir_entry()
223 * sba_check_pdir - debugging only - consistency checker
232 u32 *rptr_end = (u32 *) &(ioc->res_map[ioc->res_size]); in sba_check_pdir()
233 u32 *rptr = (u32 *) ioc->res_map; /* resource map ptr */ in sba_check_pdir()
234 u64 *pptr = ioc->pdir_base; /* pdir ptr */ in sba_check_pdir()
247 ** BUMMER! -- res_map != pdir -- in sba_check_pdir()
253 rcnt--; in sba_check_pdir()
266 * sba_dump_sg - debugging only - print Scatter-Gather list
276 while (nents-- > 0) { in sba_dump_sg()
281 sg_virt(startsg), startsg->length); in sba_dump_sg()
306 #define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset))
307 #define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask)
316 #define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n)))
317 #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
322 return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3) in ptr_to_pide()
327 * sba_search_bitmap - find free space in IO PDIR resource bitmap
339 unsigned long *res_ptr = ioc->res_hint; in sba_search_bitmap()
340 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); in sba_search_bitmap()
349 BUG_ON(ioc->ibase & ~IOVP_MASK); in sba_search_bitmap()
350 shift = ioc->ibase >> IOVP_SHIFT; in sba_search_bitmap()
356 /* Search word at a time - no mask needed */ in sba_search_bitmap()
370 ioc->res_bitshift = 0; in sba_search_bitmap()
373 ** Search the resource bit map on well-aligned values. in sba_search_bitmap()
379 uint bitshiftcnt = ALIGN(ioc->res_bitshift, o); in sba_search_bitmap()
411 ioc->res_bitshift = bitshiftcnt + bits_wanted; in sba_search_bitmap()
416 ioc->res_hint = (unsigned long *) ioc->res_map; in sba_search_bitmap()
417 ioc->res_bitshift = 0; in sba_search_bitmap()
419 ioc->res_hint = res_ptr; in sba_search_bitmap()
426 * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
443 if (pide >= (ioc->res_size << 3)) { in sba_alloc_range()
445 if (pide >= (ioc->res_size << 3)) in sba_alloc_range()
447 __FILE__, ioc->ioc_hpa); in sba_alloc_range()
452 if(0x00 != ((u8 *) ioc->pdir_base)[pide*sizeof(u64) + 7]) { in sba_alloc_range()
457 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n", in sba_alloc_range()
459 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map), in sba_alloc_range()
460 ioc->res_bitshift ); in sba_alloc_range()
465 unsigned long tmp = cr_end - cr_start; in sba_alloc_range()
467 cr_start = (cr_end < cr_start) ? -(tmp) : (tmp); in sba_alloc_range()
469 ioc->avg_search[ioc->avg_idx++] = cr_start; in sba_alloc_range()
470 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1; in sba_alloc_range()
472 ioc->used_pages += pages_needed; in sba_alloc_range()
480 * sba_free_range - unmark bits in IO PDIR resource bitmap
493 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]); in sba_free_range()
497 /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */ in sba_free_range()
498 unsigned long m = RESMAP_MASK(bits_not_wanted) >> (pide & (BITS_PER_LONG - 1)); in sba_free_range()
505 ioc->used_pages -= bits_not_wanted; in sba_free_range()
519 #define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir)
526 * sba_io_pdir_entry - fill in one IO PDIR entry
528 * @sid: process Space ID - currently only support KERNEL_SPACE
537 * Using the bass-ackwards HP bit numbering, Each IO Pdir entry
542 * +-+---------------------+----------------------------------+----+--------+
544 * +-+---------------------+----------------------------------+----+--------+
549 * +-+------------------------+-------------------------------+----+--------+
551 * +-+------------------------+-------------------------------+----+--------+
561 * We pre-swap the bytes since PCX-W is Big Endian and the
582 * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set in sba_io_pdir_entry()
584 * IO-PDIR is changed in Ike/Astro. in sba_io_pdir_entry()
591 * sba_mark_invalid - invalidate one or more IO PDIR entries
610 u64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)]; in sba_mark_invalid()
615 ** Even though this is a big-endian machine, the entries in sba_mark_invalid()
629 - (unsigned long) pdir_ptr; in sba_mark_invalid()
642 entries_per_cacheline = L1_CACHE_SHIFT - 3; in sba_mark_invalid()
646 byte_cnt -= IOVP_SIZE; in sba_mark_invalid()
661 WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM); in sba_mark_invalid()
665 * sba_dma_supported - PCI driver can query DMA support
669 * See Documentation/core-api/dma-api-howto.rst
689 return((int)(mask >= (ioc->ibase - 1 + in sba_dma_supported()
690 (ioc->pdir_size / sizeof(u64) * IOVP_SIZE) ))); in sba_dma_supported()
695 * sba_map_single - map one buffer and return IOVA for DMA
701 * See Documentation/core-api/dma-api-howto.rst
724 spin_lock_irqsave(&ioc->res_lock, flags); in sba_map_single()
730 ioc->msingle_calls++; in sba_map_single()
731 ioc->msingle_pages += size >> IOVP_SHIFT; in sba_map_single()
736 DBG_RUN("%s() 0x%p -> 0x%lx\n", in sba_map_single()
739 pdir_start = &(ioc->pdir_base[pide]); in sba_map_single()
757 size -= IOVP_SIZE; in sba_map_single()
767 spin_unlock_irqrestore(&ioc->res_lock, flags); in sba_map_single()
785 * sba_unmap_page - unmap one IOVA and free resources
791 * See Documentation/core-api/dma-api-howto.rst
816 spin_lock_irqsave(&ioc->res_lock, flags); in sba_unmap_page()
819 ioc->usingle_calls++; in sba_unmap_page()
820 ioc->usingle_pages += size >> IOVP_SHIFT; in sba_unmap_page()
826 /* Delaying when we re-use a IO Pdir entry reduces the number in sba_unmap_page()
829 d = &(ioc->saved[ioc->saved_cnt]); in sba_unmap_page()
830 d->iova = iova; in sba_unmap_page()
831 d->size = size; in sba_unmap_page()
832 if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) { in sba_unmap_page()
833 int cnt = ioc->saved_cnt; in sba_unmap_page()
834 while (cnt--) { in sba_unmap_page()
835 sba_free_range(ioc, d->iova, d->size); in sba_unmap_page()
836 d--; in sba_unmap_page()
838 ioc->saved_cnt = 0; in sba_unmap_page()
840 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ in sba_unmap_page()
848 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ in sba_unmap_page()
851 spin_unlock_irqrestore(&ioc->res_lock, flags); in sba_unmap_page()
853 /* XXX REVISIT for 2.5 Linux - need syncdma for zero-copy support. in sba_unmap_page()
865 * sba_alloc - allocate/map shared mem for DMA
870 * See Documentation/core-api/dma-api-howto.rst
895 * sba_free - free/unmap shared mem for DMA
901 * See Documentation/core-api/dma-api-howto.rst
922 #include "iommu-helpers.h"
930 * sba_map_sg - map Scatter/Gather list
936 * See Documentation/core-api/dma-api-howto.rst
955 sglist->length, direction); in sba_map_sg()
956 sg_dma_len(sglist) = sglist->length; in sba_map_sg()
960 spin_lock_irqsave(&ioc->res_lock, flags); in sba_map_sg()
971 ioc->msg_calls++; in sba_map_sg()
1005 spin_unlock_irqrestore(&ioc->res_lock, flags); in sba_map_sg()
1014 * sba_unmap_sg - unmap Scatter/Gather list
1020 * See Documentation/core-api/dma-api-howto.rst
1032 __func__, nents, sg_virt(sglist), sglist->length); in sba_unmap_sg()
1041 ioc->usg_calls++; in sba_unmap_sg()
1045 spin_lock_irqsave(&ioc->res_lock, flags); in sba_unmap_sg()
1047 spin_unlock_irqrestore(&ioc->res_lock, flags); in sba_unmap_sg()
1055 …ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> … in sba_unmap_sg()
1056 ioc->usingle_calls--; /* kluge since call is unmap_sg() */ in sba_unmap_sg()
1059 nents--; in sba_unmap_sg()
1065 spin_lock_irqsave(&ioc->res_lock, flags); in sba_unmap_sg()
1067 spin_unlock_irqrestore(&ioc->res_lock, flags); in sba_unmap_sg()
1088 ** SBA PAT PDC support
1101 ** PAT PDC to program the SBA/LBA directed range registers...this in sba_get_pat_resources()
1103 ** PCI subsystem. It's not clear yet. - ggg in sba_get_pat_resources()
1105 PAT_MOD(mod)->mod_info.mod_pages = PAT_GET_MOD_PAGES(temp); in sba_get_pat_resources()
1107 PAT_MOD(mod)->mod_info.dvi = PAT_GET_DVI(temp); in sba_get_pat_resources()
1109 PAT_MOD(mod)->mod_info.ioc = PAT_GET_IOC(temp); in sba_get_pat_resources()
1134 /* If this is not PA8700 (PCX-W2) in sba_alloc_pdir()
1140 if ( ((boot_cpu_data.pdc.cpuid >> 5) & 0x7f) != 0x13 in sba_alloc_pdir()
1141 || (boot_cpu_data.pdc.versions > 0x202) in sba_alloc_pdir()
1142 || (boot_cpu_data.pdc.capabilities & 0x08L) ) in sba_alloc_pdir()
1146 * PA8700 (PCX-W2, aka piranha) silent data corruption fix in sba_alloc_pdir()
1151 * (little-endian) bits 17 and 18 are on and bit 20 is off. in sba_alloc_pdir()
1156 * IO Pdir to a maximum size of 2MB-128K (1902K). in sba_alloc_pdir()
1163 if (pdir_order <= (19-12)) { in sba_alloc_pdir()
1164 if (((virt_to_phys(pdir_base)+pdir_size-1) & PIRANHA_ADDR_MASK) == PIRANHA_ADDR_VAL) { in sba_alloc_pdir()
1166 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, (19-12)); in sba_alloc_pdir()
1173 while (pdir_order < (19-12)) { in sba_alloc_pdir()
1191 free_pages(new_pdir, 20-12); in sba_alloc_pdir()
1195 if (pdir_order > (20-12)) { in sba_alloc_pdir()
1206 free_pages(new_pdir, 20-12); in sba_alloc_pdir()
1209 free_pages(new_pdir - 128*1024 , 17-12); in sba_alloc_pdir()
1211 pdir_size -= 128*1024; in sba_alloc_pdir()
1230 int rope_num = (lba->hpa.start >> 13) & 0xf; in setup_ibase_imask_callback()
1231 if (rope_num >> 3 == ibd->ioc_num) in setup_ibase_imask_callback()
1232 lba_set_iregs(lba, ibd->ioc->ibase, ibd->ioc->imask); in setup_ibase_imask_callback()
1245 device_for_each_child(&sba->dev, &ibase_data, in setup_ibase_imask()
1276 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1fffffULL; in sba_ioc_init_pluto()
1277 iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1; in sba_ioc_init_pluto()
1279 if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) { in sba_ioc_init_pluto()
1288 iov_order = get_order(iova_space_size >> (IOVP_SHIFT - PAGE_SHIFT)); in sba_ioc_init_pluto()
1289 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64); in sba_ioc_init_pluto()
1292 __func__, ioc->ioc_hpa, iova_space_size >> 20, in sba_ioc_init_pluto()
1295 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL, in sba_ioc_init_pluto()
1296 get_order(ioc->pdir_size)); in sba_ioc_init_pluto()
1297 if (!ioc->pdir_base) in sba_ioc_init_pluto()
1300 memset(ioc->pdir_base, 0, ioc->pdir_size); in sba_ioc_init_pluto()
1303 __func__, ioc->pdir_base, ioc->pdir_size); in sba_ioc_init_pluto()
1306 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT; in sba_ioc_init_pluto()
1307 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT)); in sba_ioc_init_pluto()
1310 ioc->hint_shift_pdir, ioc->hint_mask_pdir); in sba_ioc_init_pluto()
1313 WARN_ON((((unsigned long) ioc->pdir_base) & PAGE_MASK) != (unsigned long) ioc->pdir_base); in sba_ioc_init_pluto()
1314 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE); in sba_ioc_init_pluto()
1319 ioc->imask = iova_space_mask; in sba_ioc_init_pluto()
1321 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1); in sba_ioc_init_pluto()
1323 sba_dump_tlb(ioc->ioc_hpa); in sba_ioc_init_pluto()
1327 WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK); in sba_ioc_init_pluto()
1334 ioc->imask |= 0xFFFFFFFF00000000UL; in sba_ioc_init_pluto()
1348 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG); in sba_ioc_init_pluto()
1354 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE); in sba_ioc_init_pluto()
1360 WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM); in sba_ioc_init_pluto()
1372 device_for_each_child(&sba->dev, &agp_found, sba_ioc_find_quicksilver); in sba_ioc_init_pluto()
1377 ioc->pdir_size /= 2; in sba_ioc_init_pluto()
1378 ioc->pdir_base[PDIR_INDEX(iova_space_size/2)] = SBA_AGPGART_COOKIE; in sba_ioc_init_pluto()
1395 ** can be outstanding based on PCI Class/sub-class. Both in sba_ioc_init()
1397 ** Hot-Plug/Removal of PCI cards. (aka PCI OLARD). in sba_ioc_init()
1399 ** While we have 32-bits "IOVA" space, top two 2 bits are used in sba_ioc_init()
1400 ** for DMA hints - ergo only 30 bits max. in sba_ioc_init()
1405 /* limit IOVA space size to 1MB-1GB */ in sba_ioc_init()
1406 if (iova_space_size < (1 << (20 - PAGE_SHIFT))) { in sba_ioc_init()
1407 iova_space_size = 1 << (20 - PAGE_SHIFT); in sba_ioc_init()
1409 else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) { in sba_ioc_init()
1410 iova_space_size = 1 << (30 - PAGE_SHIFT); in sba_ioc_init()
1423 ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64); in sba_ioc_init()
1427 ioc->ioc_hpa, in sba_ioc_init()
1428 (unsigned long) totalram_pages() >> (20 - PAGE_SHIFT), in sba_ioc_init()
1432 ioc->pdir_base = sba_alloc_pdir(pdir_size); in sba_ioc_init()
1435 __func__, ioc->pdir_base, pdir_size); in sba_ioc_init()
1439 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT; in sba_ioc_init()
1440 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT)); in sba_ioc_init()
1443 ioc->hint_shift_pdir, ioc->hint_mask_pdir); in sba_ioc_init()
1446 WRITE_REG64(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE); in sba_ioc_init()
1453 ** On C3000 w/512MB mem, HP-UX 10.20 reports: in sba_ioc_init()
1456 ioc->ibase = 0; in sba_ioc_init()
1457 ioc->imask = iova_space_mask; /* save it */ in sba_ioc_init()
1459 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1); in sba_ioc_init()
1463 __func__, ioc->ibase, ioc->imask); in sba_ioc_init()
1476 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE); in sba_ioc_init()
1477 WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK); in sba_ioc_init()
1491 WRITE_REG(tcnfg, ioc->ioc_hpa+IOC_TCNFG); in sba_ioc_init()
1497 WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM); in sba_ioc_init()
1499 ioc->ibase = 0; /* used by SBA_IOVA and related macros */ in sba_ioc_init()
1519 return ioremap(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE); in ioc_remap()
1529 /* Shutdown the USB controller on Astro-based workstations. in sba_hw_init()
1546 if (PAGE0->mem_kbd.cl_class == CL_KEYBD) { in sba_hw_init()
1554 printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa, in sba_hw_init()
1555 PAGE0->mem_boot.spa, PAGE0->mem_boot.pad, PAGE0->mem_boot.cl_class); in sba_hw_init()
1560 ** to PDC about which device to shutdown. in sba_hw_init()
1566 if ((PAGE0->mem_boot.cl_class != CL_RANDOM) in sba_hw_init()
1567 && (PAGE0->mem_boot.cl_class != CL_SEQU)) { in sba_hw_init()
1572 if (!IS_PLUTO(sba_dev->dev)) { in sba_hw_init()
1573 ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL); in sba_hw_init()
1574 DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->", in sba_hw_init()
1575 __func__, sba_dev->sba_hpa, ioc_ctl); in sba_hw_init()
1581 WRITE_REG(ioc_ctl, sba_dev->sba_hpa+IOC_CTRL); in sba_hw_init()
1584 ioc_ctl = READ_REG64(sba_dev->sba_hpa+IOC_CTRL); in sba_hw_init()
1589 if (IS_ASTRO(sba_dev->dev)) { in sba_hw_init()
1591 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, ASTRO_IOC_OFFSET); in sba_hw_init()
1594 sba_dev->chip_resv.name = "Astro Intr Ack"; in sba_hw_init()
1595 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfef00000UL; in sba_hw_init()
1596 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff000000UL - 1) ; in sba_hw_init()
1597 err = request_resource(&iomem_resource, &(sba_dev->chip_resv)); in sba_hw_init()
1600 } else if (IS_PLUTO(sba_dev->dev)) { in sba_hw_init()
1603 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, PLUTO_IOC_OFFSET); in sba_hw_init()
1606 sba_dev->chip_resv.name = "Pluto Intr/PIOP/VGA"; in sba_hw_init()
1607 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfee00000UL; in sba_hw_init()
1608 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff200000UL - 1); in sba_hw_init()
1609 err = request_resource(&iomem_resource, &(sba_dev->chip_resv)); in sba_hw_init()
1612 sba_dev->iommu_resv.name = "IOVA Space"; in sba_hw_init()
1613 sba_dev->iommu_resv.start = 0x40000000UL; in sba_hw_init()
1614 sba_dev->iommu_resv.end = 0x50000000UL - 1; in sba_hw_init()
1615 err = request_resource(&iomem_resource, &(sba_dev->iommu_resv)); in sba_hw_init()
1619 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(0)); in sba_hw_init()
1620 sba_dev->ioc[1].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(1)); in sba_hw_init()
1623 /* TODO - LOOKUP Ike/Stretch chipset mem map */ in sba_hw_init()
1627 sba_dev->num_ioc = num_ioc; in sba_hw_init()
1629 void __iomem *ioc_hpa = sba_dev->ioc[i].ioc_hpa; in sba_hw_init()
1640 if (IS_PLUTO(sba_dev->dev)) { in sba_hw_init()
1657 READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL); in sba_hw_init()
1661 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40), in sba_hw_init()
1662 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50) in sba_hw_init()
1665 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108), in sba_hw_init()
1666 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400) in sba_hw_init()
1669 if (IS_PLUTO(sba_dev->dev)) { in sba_hw_init()
1670 sba_ioc_init_pluto(sba_dev->dev, &(sba_dev->ioc[i]), i); in sba_hw_init()
1672 sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i); in sba_hw_init()
1683 ** This will be useful for debugging - especially if we get coredumps in sba_common_init()
1685 sba_dev->next = sba_list; in sba_common_init()
1688 for(i=0; i< sba_dev->num_ioc; i++) { in sba_common_init()
1697 res_size = sba_dev->ioc[i].pdir_size/sizeof(u64); /* entries */ in sba_common_init()
1701 res_size -= (128*1024)/sizeof(u64); in sba_common_init()
1708 sba_dev->ioc[i].res_size = res_size; in sba_common_init()
1709 sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size)); in sba_common_init()
1712 iterate_pages( sba_dev->ioc[i].res_map, res_size, in sba_common_init()
1716 if (NULL == sba_dev->ioc[i].res_map) in sba_common_init()
1722 memset(sba_dev->ioc[i].res_map, 0, res_size); in sba_common_init()
1723 /* next available IOVP - circular search */ in sba_common_init()
1724 sba_dev->ioc[i].res_hint = (unsigned long *) in sba_common_init()
1725 &(sba_dev->ioc[i].res_map[L1_CACHE_BYTES]); in sba_common_init()
1728 /* Mark first bit busy - ie no IOVA 0 */ in sba_common_init()
1729 sba_dev->ioc[i].res_map[0] = 0x80; in sba_common_init()
1730 sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL; in sba_common_init()
1735 /* region from +1408K to +1536 is un-usable. */ in sba_common_init()
1739 long *p_start = (long *) &(sba_dev->ioc[i].res_map[idx_start]); in sba_common_init()
1740 long *p_end = (long *) &(sba_dev->ioc[i].res_map[idx_end]); in sba_common_init()
1744 *p_start++ = -1; in sba_common_init()
1749 iterate_pages( sba_dev->ioc[i].res_map, res_size, in sba_common_init()
1751 iterate_pages( sba_dev->ioc[i].pdir_base, sba_dev->ioc[i].pdir_size, in sba_common_init()
1756 __func__, i, res_size, sba_dev->ioc[i].res_map); in sba_common_init()
1759 spin_lock_init(&sba_dev->sba_lock); in sba_common_init()
1760 ioc_needs_fdc = boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC; in sba_common_init()
1764 * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set in sba_common_init()
1766 * IO-PDIR is changed in Ike/Astro. in sba_common_init()
1780 struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */ in sba_proc_info()
1781 int total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */ in sba_proc_info()
1788 sba_dev->name, in sba_proc_info()
1789 (sba_dev->hw_rev & 0x7) + 1, in sba_proc_info()
1790 (sba_dev->hw_rev & 0x18) >> 3); in sba_proc_info()
1792 (int)((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */ in sba_proc_info()
1796 ioc->res_size, ioc->res_size << 3); /* 8 bits per byte */ in sba_proc_info()
1799 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_BASE), in sba_proc_info()
1800 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_MASK), in sba_proc_info()
1801 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_ROUTE)); in sba_proc_info()
1806 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_BASE + i*0x18), in sba_proc_info()
1807 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_MASK + i*0x18), in sba_proc_info()
1808 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_ROUTE + i*0x18)); in sba_proc_info()
1812 total_pages - ioc->used_pages, ioc->used_pages, in sba_proc_info()
1813 (int)(ioc->used_pages * 100 / total_pages)); in sba_proc_info()
1815 min = max = ioc->avg_search[0]; in sba_proc_info()
1817 avg += ioc->avg_search[i]; in sba_proc_info()
1818 if (ioc->avg_search[i] > max) max = ioc->avg_search[i]; in sba_proc_info()
1819 if (ioc->avg_search[i] < min) min = ioc->avg_search[i]; in sba_proc_info()
1826 ioc->msingle_calls, ioc->msingle_pages, in sba_proc_info()
1827 (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls)); in sba_proc_info()
1829 /* KLUGE - unmap_sg calls unmap_single for each mapped page */ in sba_proc_info()
1830 min = ioc->usingle_calls; in sba_proc_info()
1831 max = ioc->usingle_pages - ioc->usg_pages; in sba_proc_info()
1836 ioc->msg_calls, ioc->msg_pages, in sba_proc_info()
1837 (int)((ioc->msg_pages * 1000)/ioc->msg_calls)); in sba_proc_info()
1840 ioc->usg_calls, ioc->usg_pages, in sba_proc_info()
1841 (int)((ioc->usg_pages * 1000)/ioc->usg_calls)); in sba_proc_info()
1851 struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */ in sba_proc_bitmap_info()
1853 seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, ioc->res_map, in sba_proc_bitmap_info()
1854 ioc->res_size, false); in sba_proc_bitmap_info()
1889 void __iomem *sba_addr = ioremap(dev->hpa.start, SBA_FUNC_SIZE); in sba_driver_callback()
1934 MODULE_NAME, version, (unsigned long long)dev->hpa.start); in sba_driver_callback()
1938 printk(KERN_ERR MODULE_NAME " - couldn't alloc sba_device\n"); in sba_driver_callback()
1939 return -ENOMEM; in sba_driver_callback()
1945 spin_lock_init(&(sba_dev->ioc[i].res_lock)); in sba_driver_callback()
1947 sba_dev->dev = dev; in sba_driver_callback()
1948 sba_dev->hw_rev = func_class; in sba_driver_callback()
1949 sba_dev->name = dev->name; in sba_driver_callback()
1950 sba_dev->sba_hpa = sba_addr; in sba_driver_callback()
1959 switch (dev->id.hversion) { in sba_driver_callback()
1971 proc_create_single("sba_iommu-bitmap", 0, root, sba_proc_bitmap_info); in sba_driver_callback()
1988 * sba_get_iommu - Assign the iommu pointer for the pci bus controller.
1997 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev); in sba_get_iommu()
1998 char t = sba_dev->id.hw_type; in sba_get_iommu()
1999 int iocnum = (pci_hba->hw_path >> 3); /* rope # */ in sba_get_iommu()
2003 return &(sba->ioc[iocnum]); in sba_get_iommu()
2008 * sba_directed_lmmio - return first directed LMMIO range routed to rope
2018 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev); in sba_directed_lmmio()
2019 char t = sba_dev->id.hw_type; in sba_directed_lmmio()
2021 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */ in sba_directed_lmmio()
2025 r->start = r->end = 0; in sba_directed_lmmio()
2030 void __iomem *reg = sba->sba_hpa + i*0x18; in sba_directed_lmmio()
2038 if ((size & (ROPES_PER_IOC-1)) != rope) in sba_directed_lmmio()
2041 r->start = (base & ~1UL) | PCI_F_EXTEND; in sba_directed_lmmio()
2043 r->end = r->start + size; in sba_directed_lmmio()
2044 r->flags = IORESOURCE_MEM; in sba_directed_lmmio()
2050 * sba_distributed_lmmio - return portion of distributed LMMIO range
2061 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev); in sba_distributed_lmmio()
2062 char t = sba_dev->id.hw_type; in sba_distributed_lmmio()
2064 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */ in sba_distributed_lmmio()
2068 r->start = r->end = 0; in sba_distributed_lmmio()
2070 base = READ_REG32(sba->sba_hpa + LMMIO_DIST_BASE); in sba_distributed_lmmio()
2076 r->start = (base & ~1UL) | PCI_F_EXTEND; in sba_distributed_lmmio()
2078 size = (~READ_REG32(sba->sba_hpa + LMMIO_DIST_MASK)) / ROPES_PER_IOC; in sba_distributed_lmmio()
2079 r->start += rope * (size + 1); /* adjust base for this rope */ in sba_distributed_lmmio()
2080 r->end = r->start + size; in sba_distributed_lmmio()
2081 r->flags = IORESOURCE_MEM; in sba_distributed_lmmio()