Lines Matching refs:subscriptions

93 mn_itree_is_invalidating(struct mmu_notifier_subscriptions *subscriptions)  in mn_itree_is_invalidating()  argument
95 lockdep_assert_held(&subscriptions->lock); in mn_itree_is_invalidating()
96 return subscriptions->invalidate_seq & 1; in mn_itree_is_invalidating()
100 mn_itree_inv_start_range(struct mmu_notifier_subscriptions *subscriptions, in mn_itree_inv_start_range() argument
107 spin_lock(&subscriptions->lock); in mn_itree_inv_start_range()
108 subscriptions->active_invalidate_ranges++; in mn_itree_inv_start_range()
109 node = interval_tree_iter_first(&subscriptions->itree, range->start, in mn_itree_inv_start_range()
112 subscriptions->invalidate_seq |= 1; in mn_itree_inv_start_range()
117 *seq = subscriptions->invalidate_seq; in mn_itree_inv_start_range()
118 spin_unlock(&subscriptions->lock); in mn_itree_inv_start_range()
135 static void mn_itree_inv_end(struct mmu_notifier_subscriptions *subscriptions) in mn_itree_inv_end() argument
140 spin_lock(&subscriptions->lock); in mn_itree_inv_end()
141 if (--subscriptions->active_invalidate_ranges || in mn_itree_inv_end()
142 !mn_itree_is_invalidating(subscriptions)) { in mn_itree_inv_end()
143 spin_unlock(&subscriptions->lock); in mn_itree_inv_end()
148 subscriptions->invalidate_seq++; in mn_itree_inv_end()
157 &subscriptions->deferred_list, in mn_itree_inv_end()
161 &subscriptions->itree); in mn_itree_inv_end()
164 &subscriptions->itree); in mn_itree_inv_end()
167 spin_unlock(&subscriptions->lock); in mn_itree_inv_end()
169 wake_up_all(&subscriptions->wq); in mn_itree_inv_end()
194 struct mmu_notifier_subscriptions *subscriptions = in mmu_interval_read_begin() local
238 spin_lock(&subscriptions->lock); in mmu_interval_read_begin()
241 is_invalidating = seq == subscriptions->invalidate_seq; in mmu_interval_read_begin()
242 spin_unlock(&subscriptions->lock); in mmu_interval_read_begin()
254 wait_event(subscriptions->wq, in mmu_interval_read_begin()
255 READ_ONCE(subscriptions->invalidate_seq) != seq); in mmu_interval_read_begin()
267 static void mn_itree_release(struct mmu_notifier_subscriptions *subscriptions, in mn_itree_release() argument
282 mn_itree_inv_start_range(subscriptions, &range, &cur_seq); in mn_itree_release()
290 mn_itree_inv_end(subscriptions); in mn_itree_release()
305 static void mn_hlist_release(struct mmu_notifier_subscriptions *subscriptions, in mn_hlist_release() argument
316 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist, in mn_hlist_release()
327 spin_lock(&subscriptions->lock); in mn_hlist_release()
328 while (unlikely(!hlist_empty(&subscriptions->list))) { in mn_hlist_release()
329 subscription = hlist_entry(subscriptions->list.first, in mn_hlist_release()
339 spin_unlock(&subscriptions->lock); in mn_hlist_release()
356 struct mmu_notifier_subscriptions *subscriptions = in __mmu_notifier_release() local
359 if (subscriptions->has_itree) in __mmu_notifier_release()
360 mn_itree_release(subscriptions, mm); in __mmu_notifier_release()
362 if (!hlist_empty(&subscriptions->list)) in __mmu_notifier_release()
363 mn_hlist_release(subscriptions, mm); in __mmu_notifier_release()
450 static int mn_itree_invalidate(struct mmu_notifier_subscriptions *subscriptions, in mn_itree_invalidate() argument
457 mn_itree_inv_start_range(subscriptions, range, &cur_seq); in mn_itree_invalidate()
477 mn_itree_inv_end(subscriptions); in mn_itree_invalidate()
482 struct mmu_notifier_subscriptions *subscriptions, in mn_hlist_invalidate_range_start() argument
490 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist, in mn_hlist_invalidate_range_start()
528 hlist_for_each_entry_rcu(subscription, &subscriptions->list, in mn_hlist_invalidate_range_start()
544 struct mmu_notifier_subscriptions *subscriptions = in __mmu_notifier_invalidate_range_start() local
548 if (subscriptions->has_itree) { in __mmu_notifier_invalidate_range_start()
549 ret = mn_itree_invalidate(subscriptions, range); in __mmu_notifier_invalidate_range_start()
553 if (!hlist_empty(&subscriptions->list)) in __mmu_notifier_invalidate_range_start()
554 return mn_hlist_invalidate_range_start(subscriptions, range); in __mmu_notifier_invalidate_range_start()
559 mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions, in mn_hlist_invalidate_end() argument
566 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist, in mn_hlist_invalidate_end()
601 struct mmu_notifier_subscriptions *subscriptions = in __mmu_notifier_invalidate_range_end() local
605 if (subscriptions->has_itree) in __mmu_notifier_invalidate_range_end()
606 mn_itree_inv_end(subscriptions); in __mmu_notifier_invalidate_range_end()
608 if (!hlist_empty(&subscriptions->list)) in __mmu_notifier_invalidate_range_end()
609 mn_hlist_invalidate_end(subscriptions, range, only_end); in __mmu_notifier_invalidate_range_end()
651 static void init_subscriptions(struct mmu_notifier_subscriptions *subscriptions) in init_subscriptions() argument
653 INIT_HLIST_HEAD(&subscriptions->list); in init_subscriptions()
654 spin_lock_init(&subscriptions->lock); in init_subscriptions()
655 subscriptions->invalidate_seq = 2; in init_subscriptions()
656 subscriptions->itree = RB_ROOT_CACHED; in init_subscriptions()
657 init_waitqueue_head(&subscriptions->wq); in init_subscriptions()
658 INIT_HLIST_HEAD(&subscriptions->deferred_list); in init_subscriptions()
669 struct mmu_notifier_subscriptions *subscriptions = NULL; in __mmu_notifier_register() local
688 subscriptions = kzalloc( in __mmu_notifier_register()
690 if (!subscriptions) in __mmu_notifier_register()
693 init_subscriptions(subscriptions); in __mmu_notifier_register()
720 if (subscriptions) in __mmu_notifier_register()
721 smp_store_release(&mm->notifier_subscriptions, subscriptions); in __mmu_notifier_register()
743 kfree(subscriptions); in __mmu_notifier_register()
960 struct mmu_notifier_subscriptions *subscriptions, unsigned long start, in __mmu_interval_notifier_insert() argument
996 spin_lock(&subscriptions->lock); in __mmu_interval_notifier_insert()
997 if (subscriptions->active_invalidate_ranges) { in __mmu_interval_notifier_insert()
998 if (mn_itree_is_invalidating(subscriptions)) in __mmu_interval_notifier_insert()
1000 &subscriptions->deferred_list); in __mmu_interval_notifier_insert()
1002 subscriptions->invalidate_seq |= 1; in __mmu_interval_notifier_insert()
1004 &subscriptions->itree); in __mmu_interval_notifier_insert()
1006 interval_sub->invalidate_seq = subscriptions->invalidate_seq; in __mmu_interval_notifier_insert()
1008 WARN_ON(mn_itree_is_invalidating(subscriptions)); in __mmu_interval_notifier_insert()
1016 subscriptions->invalidate_seq - 1; in __mmu_interval_notifier_insert()
1018 &subscriptions->itree); in __mmu_interval_notifier_insert()
1020 spin_unlock(&subscriptions->lock); in __mmu_interval_notifier_insert()
1045 struct mmu_notifier_subscriptions *subscriptions; in mmu_interval_notifier_insert() local
1050 subscriptions = smp_load_acquire(&mm->notifier_subscriptions); in mmu_interval_notifier_insert()
1051 if (!subscriptions || !subscriptions->has_itree) { in mmu_interval_notifier_insert()
1055 subscriptions = mm->notifier_subscriptions; in mmu_interval_notifier_insert()
1057 return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions, in mmu_interval_notifier_insert()
1067 struct mmu_notifier_subscriptions *subscriptions = in mmu_interval_notifier_insert_locked() local
1073 if (!subscriptions || !subscriptions->has_itree) { in mmu_interval_notifier_insert_locked()
1077 subscriptions = mm->notifier_subscriptions; in mmu_interval_notifier_insert_locked()
1079 return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions, in mmu_interval_notifier_insert_locked()
1085 mmu_interval_seq_released(struct mmu_notifier_subscriptions *subscriptions, in mmu_interval_seq_released() argument
1090 spin_lock(&subscriptions->lock); in mmu_interval_seq_released()
1091 ret = subscriptions->invalidate_seq != seq; in mmu_interval_seq_released()
1092 spin_unlock(&subscriptions->lock); in mmu_interval_seq_released()
1109 struct mmu_notifier_subscriptions *subscriptions = in mmu_interval_notifier_remove() local
1115 spin_lock(&subscriptions->lock); in mmu_interval_notifier_remove()
1116 if (mn_itree_is_invalidating(subscriptions)) { in mmu_interval_notifier_remove()
1125 &subscriptions->deferred_list); in mmu_interval_notifier_remove()
1126 seq = subscriptions->invalidate_seq; in mmu_interval_notifier_remove()
1131 &subscriptions->itree); in mmu_interval_notifier_remove()
1133 spin_unlock(&subscriptions->lock); in mmu_interval_notifier_remove()
1142 wait_event(subscriptions->wq, in mmu_interval_notifier_remove()
1143 mmu_interval_seq_released(subscriptions, seq)); in mmu_interval_notifier_remove()
1183 struct mmu_notifier_subscriptions *subscriptions; in mmu_notifier_subscriptions_init() local
1186 subscriptions = kzalloc( in mmu_notifier_subscriptions_init()
1188 if (!subscriptions) in mmu_notifier_subscriptions_init()
1193 kfree(subscriptions); in mmu_notifier_subscriptions_init()
1198 init_subscriptions(subscriptions); in mmu_notifier_subscriptions_init()
1199 subscriptions->has_itree = true; in mmu_notifier_subscriptions_init()
1200 subscriptions->hdr.valid = false; in mmu_notifier_subscriptions_init()
1201 subscriptions->hdr.mmu_notifier_lock = sem; in mmu_notifier_subscriptions_init()
1202 mm->notifier_subscriptions = subscriptions; in mmu_notifier_subscriptions_init()