Lines Matching full:devid

105 	u16 devid;  member
123 u16 devid; member
141 u16 devid; member
283 static inline void update_last_devid(u16 devid) in update_last_devid() argument
285 if (devid > amd_iommu_last_bdf) in update_last_devid()
286 amd_iommu_last_bdf = devid; in update_last_devid()
580 update_last_devid(dev->devid); in find_last_devid_from_ivhd()
916 static void set_dev_entry_bit(u16 devid, u8 bit) in set_dev_entry_bit() argument
921 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit); in set_dev_entry_bit()
924 static int get_dev_entry_bit(u16 devid, u8 bit) in get_dev_entry_bit() argument
929 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit; in get_dev_entry_bit()
937 u32 lo, hi, devid, old_devtb_size; in copy_device_table() local
995 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { in copy_device_table()
996 old_dev_tbl_cpy[devid] = old_devtb[devid]; in copy_device_table()
997 dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK; in copy_device_table()
998 dte_v = old_devtb[devid].data[0] & DTE_FLAG_V; in copy_device_table()
1001 old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0]; in copy_device_table()
1002 old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1]; in copy_device_table()
1005 if (old_devtb[devid].data[0] & DTE_FLAG_GV) { in copy_device_table()
1008 old_dev_tbl_cpy[devid].data[1] &= ~tmp; in copy_device_table()
1011 old_dev_tbl_cpy[devid].data[0] &= ~tmp; in copy_device_table()
1015 irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE; in copy_device_table()
1016 int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK; in copy_device_table()
1017 int_tab_len = old_devtb[devid].data[2] & DTE_IRQ_TABLE_LEN_MASK; in copy_device_table()
1021 pr_err("Wrong old irq remapping flag: %#x\n", devid); in copy_device_table()
1025 old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2]; in copy_device_table()
1033 void amd_iommu_apply_erratum_63(u16 devid) in amd_iommu_apply_erratum_63() argument
1037 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) | in amd_iommu_apply_erratum_63()
1038 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1); in amd_iommu_apply_erratum_63()
1041 set_dev_entry_bit(devid, DEV_ENTRY_IW); in amd_iommu_apply_erratum_63()
1045 static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) in set_iommu_for_device() argument
1047 amd_iommu_rlookup_table[devid] = iommu; in set_iommu_for_device()
1055 u16 devid, u32 flags, u32 ext_flags) in set_dev_entry_from_acpi() argument
1058 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS); in set_dev_entry_from_acpi()
1060 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS); in set_dev_entry_from_acpi()
1062 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS); in set_dev_entry_from_acpi()
1064 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1); in set_dev_entry_from_acpi()
1066 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2); in set_dev_entry_from_acpi()
1068 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS); in set_dev_entry_from_acpi()
1070 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS); in set_dev_entry_from_acpi()
1072 amd_iommu_apply_erratum_63(devid); in set_dev_entry_from_acpi()
1074 set_iommu_for_device(iommu, devid); in set_dev_entry_from_acpi()
1077 int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line) in add_special_device() argument
1096 *devid = entry->devid; in add_special_device()
1106 entry->devid = *devid; in add_special_device()
1114 static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid, in add_acpi_hid_device() argument
1128 *devid = entry->devid; in add_acpi_hid_device()
1138 entry->devid = *devid; in add_acpi_hid_device()
1140 entry->root_devid = (entry->devid & (~0x7)); in add_acpi_hid_device()
1157 &early_ioapic_map[i].devid, in add_early_maps()
1166 &early_hpet_map[i].devid, in add_early_maps()
1175 &early_acpihid_map[i].devid, in add_early_maps()
1193 u16 devid = 0, devid_start = 0, devid_to = 0; in init_iommu_from_acpi() local
1238 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x " in init_iommu_from_acpi()
1240 PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1241 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1242 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1245 devid = e->devid; in init_iommu_from_acpi()
1246 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); in init_iommu_from_acpi()
1251 "devid: %02x:%02x.%x flags: %02x\n", in init_iommu_from_acpi()
1252 PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1253 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1254 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1257 devid_start = e->devid; in init_iommu_from_acpi()
1264 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x " in init_iommu_from_acpi()
1266 PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1267 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1268 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1274 devid = e->devid; in init_iommu_from_acpi()
1276 set_dev_entry_from_acpi(iommu, devid , e->flags, 0); in init_iommu_from_acpi()
1278 amd_iommu_alias_table[devid] = devid_to; in init_iommu_from_acpi()
1283 "devid: %02x:%02x.%x flags: %02x " in init_iommu_from_acpi()
1285 PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1286 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1287 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1293 devid_start = e->devid; in init_iommu_from_acpi()
1301 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x " in init_iommu_from_acpi()
1303 PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1304 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1305 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1308 devid = e->devid; in init_iommu_from_acpi()
1309 set_dev_entry_from_acpi(iommu, devid, e->flags, in init_iommu_from_acpi()
1314 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: " in init_iommu_from_acpi()
1316 PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1317 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1318 PCI_FUNC(e->devid), in init_iommu_from_acpi()
1321 devid_start = e->devid; in init_iommu_from_acpi()
1328 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n", in init_iommu_from_acpi()
1329 PCI_BUS_NUM(e->devid), in init_iommu_from_acpi()
1330 PCI_SLOT(e->devid), in init_iommu_from_acpi()
1331 PCI_FUNC(e->devid)); in init_iommu_from_acpi()
1333 devid = e->devid; in init_iommu_from_acpi()
1334 for (dev_i = devid_start; dev_i <= devid; ++dev_i) { in init_iommu_from_acpi()
1347 u16 devid; in init_iommu_from_acpi() local
1351 devid = (e->ext >> 8) & 0xffff; in init_iommu_from_acpi()
1363 PCI_BUS_NUM(devid), in init_iommu_from_acpi()
1364 PCI_SLOT(devid), in init_iommu_from_acpi()
1365 PCI_FUNC(devid)); in init_iommu_from_acpi()
1367 ret = add_special_device(type, handle, &devid, false); in init_iommu_from_acpi()
1372 * add_special_device might update the devid in case a in init_iommu_from_acpi()
1376 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); in init_iommu_from_acpi()
1381 u16 devid; in init_iommu_from_acpi() local
1423 devid = e->devid; in init_iommu_from_acpi()
1426 PCI_BUS_NUM(devid), in init_iommu_from_acpi()
1427 PCI_SLOT(devid), in init_iommu_from_acpi()
1428 PCI_FUNC(devid)); in init_iommu_from_acpi()
1432 ret = add_acpi_hid_device(hid, uid, &devid, false); in init_iommu_from_acpi()
1437 * add_special_device might update the devid in case a in init_iommu_from_acpi()
1441 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); in init_iommu_from_acpi()
1561 iommu->devid = h->devid; in init_iommu_one()
1658 amd_iommu_rlookup_table[iommu->devid] = NULL; in init_iommu_one()
1675 u16 devid = ivhd->devid; in get_highest_supported_ivhd_type() local
1681 if (ivhd->devid == devid) in get_highest_supported_ivhd_type()
1709 PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid), in init_iommu_all()
1710 PCI_FUNC(h->devid), h->cap_ptr, in init_iommu_all()
1818 iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid), in iommu_init_pci()
1819 iommu->devid & 0xff); in iommu_init_pci()
2094 pr_err("Failed to register irq affinity notifier (devid=%#x, irq %d)\n", in iommu_init_intcapxt()
2095 iommu->devid, iommu->dev->irq); in iommu_init_intcapxt()
2168 e->devid_start = e->devid_end = m->devid; in init_unity_map_range()
2177 e->devid_start = m->devid; in init_unity_map_range()
2232 u32 devid; in init_device_table_dma() local
2234 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { in init_device_table_dma()
2235 set_dev_entry_bit(devid, DEV_ENTRY_VALID); in init_device_table_dma()
2236 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION); in init_device_table_dma()
2242 u32 devid; in uninit_device_table_dma() local
2244 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { in uninit_device_table_dma()
2245 amd_iommu_dev_table[devid].data[0] = 0ULL; in uninit_device_table_dma()
2246 amd_iommu_dev_table[devid].data[1] = 0ULL; in uninit_device_table_dma()
2252 u32 devid; in init_device_table() local
2257 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) in init_device_table()
2258 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN); in init_device_table()
2530 int devid, id = mpc_ioapic_id(idx); in check_ioapic_information() local
2532 devid = get_ioapic_devid(id); in check_ioapic_information()
2533 if (devid < 0) { in check_ioapic_information()
2537 } else if (devid == IOAPIC_SB_DEVID) { in check_ioapic_information()
3051 u16 devid; in parse_ivrs_ioapic() local
3066 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); in parse_ivrs_ioapic()
3071 early_ioapic_map[i].devid = devid; in parse_ivrs_ioapic()
3081 u16 devid; in parse_ivrs_hpet() local
3096 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); in parse_ivrs_hpet()
3101 early_hpet_map[i].devid = devid; in parse_ivrs_hpet()
3132 early_acpihid_map[i].devid = in parse_ivrs_acpihid()