Lines Matching refs:iovad
18 static bool iova_rcache_insert(struct iova_domain *iovad,
21 static unsigned long iova_rcache_get(struct iova_domain *iovad,
24 static void init_iova_rcaches(struct iova_domain *iovad);
25 static void free_iova_rcaches(struct iova_domain *iovad);
26 static void fq_destroy_all_entries(struct iova_domain *iovad);
30 init_iova_domain(struct iova_domain *iovad, unsigned long granule, in init_iova_domain() argument
40 spin_lock_init(&iovad->iova_rbtree_lock); in init_iova_domain()
41 iovad->rbroot = RB_ROOT; in init_iova_domain()
42 iovad->cached_node = &iovad->anchor.node; in init_iova_domain()
43 iovad->cached32_node = &iovad->anchor.node; in init_iova_domain()
44 iovad->granule = granule; in init_iova_domain()
45 iovad->start_pfn = start_pfn; in init_iova_domain()
46 iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad)); in init_iova_domain()
47 iovad->max32_alloc_size = iovad->dma_32bit_pfn; in init_iova_domain()
48 iovad->flush_cb = NULL; in init_iova_domain()
49 iovad->fq = NULL; in init_iova_domain()
50 iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR; in init_iova_domain()
51 rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node); in init_iova_domain()
52 rb_insert_color(&iovad->anchor.node, &iovad->rbroot); in init_iova_domain()
53 iovad->best_fit = false; in init_iova_domain()
54 init_iova_rcaches(iovad); in init_iova_domain()
58 bool has_iova_flush_queue(struct iova_domain *iovad) in has_iova_flush_queue() argument
60 return !!iovad->fq; in has_iova_flush_queue()
63 static void free_iova_flush_queue(struct iova_domain *iovad) in free_iova_flush_queue() argument
65 if (!has_iova_flush_queue(iovad)) in free_iova_flush_queue()
68 del_timer_sync(&iovad->fq_timer); in free_iova_flush_queue()
70 fq_destroy_all_entries(iovad); in free_iova_flush_queue()
72 free_percpu(iovad->fq); in free_iova_flush_queue()
74 iovad->fq = NULL; in free_iova_flush_queue()
75 iovad->flush_cb = NULL; in free_iova_flush_queue()
76 iovad->entry_dtor = NULL; in free_iova_flush_queue()
79 int init_iova_flush_queue(struct iova_domain *iovad, in init_iova_flush_queue() argument
85 atomic64_set(&iovad->fq_flush_start_cnt, 0); in init_iova_flush_queue()
86 atomic64_set(&iovad->fq_flush_finish_cnt, 0); in init_iova_flush_queue()
92 iovad->flush_cb = flush_cb; in init_iova_flush_queue()
93 iovad->entry_dtor = entry_dtor; in init_iova_flush_queue()
107 iovad->fq = queue; in init_iova_flush_queue()
109 timer_setup(&iovad->fq_timer, fq_flush_timeout, 0); in init_iova_flush_queue()
110 atomic_set(&iovad->fq_timer_on, 0); in init_iova_flush_queue()
117 __get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn) in __get_cached_rbnode() argument
119 if (limit_pfn <= iovad->dma_32bit_pfn) in __get_cached_rbnode()
120 return iovad->cached32_node; in __get_cached_rbnode()
122 return iovad->cached_node; in __get_cached_rbnode()
126 __cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new) in __cached_rbnode_insert_update() argument
128 if (new->pfn_hi < iovad->dma_32bit_pfn) in __cached_rbnode_insert_update()
129 iovad->cached32_node = &new->node; in __cached_rbnode_insert_update()
131 iovad->cached_node = &new->node; in __cached_rbnode_insert_update()
135 __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) in __cached_rbnode_delete_update() argument
139 cached_iova = rb_entry(iovad->cached32_node, struct iova, node); in __cached_rbnode_delete_update()
141 (free->pfn_hi < iovad->dma_32bit_pfn && in __cached_rbnode_delete_update()
143 iovad->cached32_node = rb_next(&free->node); in __cached_rbnode_delete_update()
145 if (free->pfn_lo < iovad->dma_32bit_pfn) in __cached_rbnode_delete_update()
146 iovad->max32_alloc_size = iovad->dma_32bit_pfn; in __cached_rbnode_delete_update()
148 cached_iova = rb_entry(iovad->cached_node, struct iova, node); in __cached_rbnode_delete_update()
150 iovad->cached_node = rb_next(&free->node); in __cached_rbnode_delete_update()
182 static unsigned long limit_align_shift(struct iova_domain *iovad, in limit_align_shift() argument
188 - iova_shift(iovad); in limit_align_shift()
192 static unsigned long limit_align_shift(struct iova_domain *iovad, in limit_align_shift() argument
199 static int __alloc_and_insert_iova_range(struct iova_domain *iovad, in __alloc_and_insert_iova_range() argument
208 unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn; in __alloc_and_insert_iova_range()
211 align_mask <<= limit_align_shift(iovad, fls_long(size - 1)); in __alloc_and_insert_iova_range()
214 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); in __alloc_and_insert_iova_range()
215 if (limit_pfn <= iovad->dma_32bit_pfn && in __alloc_and_insert_iova_range()
216 size >= iovad->max32_alloc_size) in __alloc_and_insert_iova_range()
219 curr = __get_cached_rbnode(iovad, limit_pfn); in __alloc_and_insert_iova_range()
233 if (low_pfn == iovad->start_pfn && low_pfn_new < limit_pfn) { in __alloc_and_insert_iova_range()
236 curr = &iovad->anchor.node; in __alloc_and_insert_iova_range()
240 iovad->max32_alloc_size = size; in __alloc_and_insert_iova_range()
249 iova_insert_rbtree(&iovad->rbroot, new, prev); in __alloc_and_insert_iova_range()
250 __cached_rbnode_insert_update(iovad, new); in __alloc_and_insert_iova_range()
252 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); in __alloc_and_insert_iova_range()
256 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); in __alloc_and_insert_iova_range()
260 static int __alloc_and_insert_iova_best_fit(struct iova_domain *iovad, in __alloc_and_insert_iova_best_fit() argument
274 align_mask <<= limit_align_shift(iovad, fls_long(size - 1)); in __alloc_and_insert_iova_best_fit()
277 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); in __alloc_and_insert_iova_best_fit()
278 curr = &iovad->anchor.node; in __alloc_and_insert_iova_best_fit()
300 gap = curr_iova->pfn_lo - iovad->start_pfn; in __alloc_and_insert_iova_best_fit()
301 if (limit_pfn >= size && new_pfn >= iovad->start_pfn && in __alloc_and_insert_iova_best_fit()
310 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); in __alloc_and_insert_iova_best_fit()
319 iova_insert_rbtree(&iovad->rbroot, new, candidate_rb_parent); in __alloc_and_insert_iova_best_fit()
320 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); in __alloc_and_insert_iova_best_fit()
388 alloc_iova(struct iova_domain *iovad, unsigned long size, in alloc_iova() argument
399 if (iovad->best_fit) { in alloc_iova()
400 ret = __alloc_and_insert_iova_best_fit(iovad, size, in alloc_iova()
403 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1, in alloc_iova()
417 private_find_iova(struct iova_domain *iovad, unsigned long pfn) in private_find_iova() argument
419 struct rb_node *node = iovad->rbroot.rb_node; in private_find_iova()
421 assert_spin_locked(&iovad->iova_rbtree_lock); in private_find_iova()
437 static void private_free_iova(struct iova_domain *iovad, struct iova *iova) in private_free_iova() argument
439 assert_spin_locked(&iovad->iova_rbtree_lock); in private_free_iova()
440 __cached_rbnode_delete_update(iovad, iova); in private_free_iova()
441 rb_erase(&iova->node, &iovad->rbroot); in private_free_iova()
452 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) in find_iova() argument
458 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); in find_iova()
459 iova = private_find_iova(iovad, pfn); in find_iova()
460 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); in find_iova()
472 __free_iova(struct iova_domain *iovad, struct iova *iova) in __free_iova() argument
476 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); in __free_iova()
477 private_free_iova(iovad, iova); in __free_iova()
478 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); in __free_iova()
490 free_iova(struct iova_domain *iovad, unsigned long pfn) in free_iova() argument
492 struct iova *iova = find_iova(iovad, pfn); in free_iova()
495 __free_iova(iovad, iova); in free_iova()
511 alloc_iova_fast(struct iova_domain *iovad, unsigned long size, in alloc_iova_fast() argument
517 iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1); in alloc_iova_fast()
522 new_iova = alloc_iova(iovad, size, limit_pfn, true); in alloc_iova_fast()
532 free_cpu_cached_iovas(cpu, iovad); in alloc_iova_fast()
533 free_global_cached_iovas(iovad); in alloc_iova_fast()
550 free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size) in free_iova_fast() argument
552 if (iova_rcache_insert(iovad, pfn, size)) in free_iova_fast()
555 free_iova(iovad, pfn); in free_iova_fast()
579 static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq) in fq_ring_free() argument
581 u64 counter = atomic64_read(&iovad->fq_flush_finish_cnt); in fq_ring_free()
591 if (iovad->entry_dtor) in fq_ring_free()
592 iovad->entry_dtor(fq->entries[idx].data); in fq_ring_free()
594 free_iova_fast(iovad, in fq_ring_free()
602 static void iova_domain_flush(struct iova_domain *iovad) in iova_domain_flush() argument
604 atomic64_inc(&iovad->fq_flush_start_cnt); in iova_domain_flush()
605 iovad->flush_cb(iovad); in iova_domain_flush()
606 atomic64_inc(&iovad->fq_flush_finish_cnt); in iova_domain_flush()
609 static void fq_destroy_all_entries(struct iova_domain *iovad) in fq_destroy_all_entries() argument
618 if (!iovad->entry_dtor) in fq_destroy_all_entries()
622 struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu); in fq_destroy_all_entries()
626 iovad->entry_dtor(fq->entries[idx].data); in fq_destroy_all_entries()
632 struct iova_domain *iovad = from_timer(iovad, t, fq_timer); in fq_flush_timeout() local
635 atomic_set(&iovad->fq_timer_on, 0); in fq_flush_timeout()
636 iova_domain_flush(iovad); in fq_flush_timeout()
642 fq = per_cpu_ptr(iovad->fq, cpu); in fq_flush_timeout()
644 fq_ring_free(iovad, fq); in fq_flush_timeout()
649 void queue_iova(struct iova_domain *iovad, in queue_iova() argument
653 struct iova_fq *fq = raw_cpu_ptr(iovad->fq); in queue_iova()
664 fq_ring_free(iovad, fq); in queue_iova()
667 iova_domain_flush(iovad); in queue_iova()
668 fq_ring_free(iovad, fq); in queue_iova()
676 fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt); in queue_iova()
681 if (!atomic_read(&iovad->fq_timer_on) && in queue_iova()
682 !atomic_xchg(&iovad->fq_timer_on, 1)) in queue_iova()
683 mod_timer(&iovad->fq_timer, in queue_iova()
693 void put_iova_domain(struct iova_domain *iovad) in put_iova_domain() argument
697 free_iova_flush_queue(iovad); in put_iova_domain()
698 free_iova_rcaches(iovad); in put_iova_domain()
699 rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node) in put_iova_domain()
730 __insert_new_range(struct iova_domain *iovad, in __insert_new_range() argument
737 iova_insert_rbtree(&iovad->rbroot, iova, NULL); in __insert_new_range()
761 reserve_iova(struct iova_domain *iovad, in reserve_iova() argument
770 if (WARN_ON((pfn_hi | pfn_lo) > (ULLONG_MAX >> iova_shift(iovad)))) in reserve_iova()
773 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); in reserve_iova()
774 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { in reserve_iova()
790 iova = __insert_new_range(iovad, pfn_lo, pfn_hi); in reserve_iova()
793 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); in reserve_iova()
829 split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, in split_and_remove_iova() argument
835 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); in split_and_remove_iova()
847 __cached_rbnode_delete_update(iovad, iova); in split_and_remove_iova()
848 rb_erase(&iova->node, &iovad->rbroot); in split_and_remove_iova()
851 iova_insert_rbtree(&iovad->rbroot, prev, NULL); in split_and_remove_iova()
855 iova_insert_rbtree(&iovad->rbroot, next, NULL); in split_and_remove_iova()
858 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); in split_and_remove_iova()
863 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); in split_and_remove_iova()
901 iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad) in iova_magazine_free_pfns() argument
909 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); in iova_magazine_free_pfns()
912 struct iova *iova = private_find_iova(iovad, mag->pfns[i]); in iova_magazine_free_pfns()
917 private_free_iova(iovad, iova); in iova_magazine_free_pfns()
920 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); in iova_magazine_free_pfns()
962 static void init_iova_rcaches(struct iova_domain *iovad) in init_iova_rcaches() argument
970 rcache = &iovad->rcaches[i]; in init_iova_rcaches()
991 static bool __iova_rcache_insert(struct iova_domain *iovad, in __iova_rcache_insert() argument
1032 iova_magazine_free_pfns(mag_to_free, iovad); in __iova_rcache_insert()
1039 static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn, in iova_rcache_insert() argument
1047 return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn); in iova_rcache_insert()
1094 static unsigned long iova_rcache_get(struct iova_domain *iovad, in iova_rcache_get() argument
1103 return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size); in iova_rcache_get()
1109 static void free_iova_rcaches(struct iova_domain *iovad) in free_iova_rcaches() argument
1117 rcache = &iovad->rcaches[i]; in free_iova_rcaches()
1132 void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad) in free_cpu_cached_iovas() argument
1140 rcache = &iovad->rcaches[i]; in free_cpu_cached_iovas()
1143 iova_magazine_free_pfns(cpu_rcache->loaded, iovad); in free_cpu_cached_iovas()
1144 iova_magazine_free_pfns(cpu_rcache->prev, iovad); in free_cpu_cached_iovas()
1152 void free_global_cached_iovas(struct iova_domain *iovad) in free_global_cached_iovas() argument
1159 rcache = &iovad->rcaches[i]; in free_global_cached_iovas()
1162 iova_magazine_free_pfns(rcache->depot[j], iovad); in free_global_cached_iovas()