Lines Matching refs:qi
1178 if (iommu->qi) { in free_iommu()
1179 free_page((unsigned long)iommu->qi->desc); in free_iommu()
1180 kfree(iommu->qi->desc_status); in free_iommu()
1181 kfree(iommu->qi); in free_iommu()
1194 static inline void reclaim_free_desc(struct q_inval *qi) in reclaim_free_desc() argument
1196 while (qi->desc_status[qi->free_tail] == QI_DONE || in reclaim_free_desc()
1197 qi->desc_status[qi->free_tail] == QI_ABORT) { in reclaim_free_desc()
1198 qi->desc_status[qi->free_tail] = QI_FREE; in reclaim_free_desc()
1199 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH; in reclaim_free_desc()
1200 qi->free_cnt++; in reclaim_free_desc()
1208 struct q_inval *qi = iommu->qi; in qi_check_fault() local
1211 if (qi->desc_status[wait_index] == QI_ABORT) in qi_check_fault()
1224 struct qi_desc *desc = qi->desc + head; in qi_check_fault()
1234 memcpy(desc, qi->desc + (wait_index << shift), in qi_check_fault()
1255 if (qi->desc_status[head] == QI_IN_USE) in qi_check_fault()
1256 qi->desc_status[head] = QI_ABORT; in qi_check_fault()
1260 if (qi->desc_status[wait_index] == QI_ABORT) in qi_check_fault()
1280 struct q_inval *qi = iommu->qi; in qi_submit_sync() local
1287 if (!qi) in qi_submit_sync()
1293 raw_spin_lock_irqsave(&qi->q_lock, flags); in qi_submit_sync()
1299 while (qi->free_cnt < count + 2) { in qi_submit_sync()
1300 raw_spin_unlock_irqrestore(&qi->q_lock, flags); in qi_submit_sync()
1302 raw_spin_lock_irqsave(&qi->q_lock, flags); in qi_submit_sync()
1305 index = qi->free_head; in qi_submit_sync()
1311 memcpy(qi->desc + offset, &desc[i], 1 << shift); in qi_submit_sync()
1312 qi->desc_status[(index + i) % QI_LENGTH] = QI_IN_USE; in qi_submit_sync()
1314 qi->desc_status[wait_index] = QI_IN_USE; in qi_submit_sync()
1320 wait_desc.qw1 = virt_to_phys(&qi->desc_status[wait_index]); in qi_submit_sync()
1325 memcpy(qi->desc + offset, &wait_desc, 1 << shift); in qi_submit_sync()
1327 qi->free_head = (qi->free_head + count + 1) % QI_LENGTH; in qi_submit_sync()
1328 qi->free_cnt -= count + 1; in qi_submit_sync()
1334 writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG); in qi_submit_sync()
1336 while (qi->desc_status[wait_index] != QI_DONE) { in qi_submit_sync()
1348 raw_spin_unlock(&qi->q_lock); in qi_submit_sync()
1350 raw_spin_lock(&qi->q_lock); in qi_submit_sync()
1354 qi->desc_status[(index + i) % QI_LENGTH] = QI_DONE; in qi_submit_sync()
1356 reclaim_free_desc(qi); in qi_submit_sync()
1357 raw_spin_unlock_irqrestore(&qi->q_lock, flags); in qi_submit_sync()
1578 struct q_inval *qi = iommu->qi; in __dmar_enable_qi() local
1579 u64 val = virt_to_phys(qi->desc); in __dmar_enable_qi()
1581 qi->free_head = qi->free_tail = 0; in __dmar_enable_qi()
1582 qi->free_cnt = QI_LENGTH; in __dmar_enable_qi()
1614 struct q_inval *qi; in dmar_enable_qi() local
1623 if (iommu->qi) in dmar_enable_qi()
1626 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC); in dmar_enable_qi()
1627 if (!iommu->qi) in dmar_enable_qi()
1630 qi = iommu->qi; in dmar_enable_qi()
1639 kfree(qi); in dmar_enable_qi()
1640 iommu->qi = NULL; in dmar_enable_qi()
1644 qi->desc = page_address(desc_page); in dmar_enable_qi()
1646 qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC); in dmar_enable_qi()
1647 if (!qi->desc_status) { in dmar_enable_qi()
1648 free_page((unsigned long) qi->desc); in dmar_enable_qi()
1649 kfree(qi); in dmar_enable_qi()
1650 iommu->qi = NULL; in dmar_enable_qi()
1654 raw_spin_lock_init(&qi->q_lock); in dmar_enable_qi()
2004 if (!iommu->qi) in dmar_reenable_qi()