Lines Matching refs:gl

54 	struct gfs2_glock *gl;		/* current glock struct        */  member
58 typedef void (*glock_examiner) (struct gfs2_glock * gl);
60 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
116 static void wake_up_glock(struct gfs2_glock *gl) in wake_up_glock() argument
118 wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name); in wake_up_glock()
121 __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name); in wake_up_glock()
126 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); in gfs2_glock_dealloc() local
128 kfree(gl->gl_lksb.sb_lvbptr); in gfs2_glock_dealloc()
129 if (gl->gl_ops->go_flags & GLOF_ASPACE) in gfs2_glock_dealloc()
130 kmem_cache_free(gfs2_glock_aspace_cachep, gl); in gfs2_glock_dealloc()
132 kmem_cache_free(gfs2_glock_cachep, gl); in gfs2_glock_dealloc()
148 static bool glock_blocked_by_withdraw(struct gfs2_glock *gl) in glock_blocked_by_withdraw() argument
150 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in glock_blocked_by_withdraw()
154 if (gl->gl_ops->go_flags & GLOF_NONDISK) in glock_blocked_by_withdraw()
157 gl->gl_name.ln_number == sdp->sd_jdesc->jd_no_addr) in glock_blocked_by_withdraw()
162 void gfs2_glock_free(struct gfs2_glock *gl) in gfs2_glock_free() argument
164 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_glock_free()
166 gfs2_glock_assert_withdraw(gl, atomic_read(&gl->gl_revokes) == 0); in gfs2_glock_free()
167 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms); in gfs2_glock_free()
169 wake_up_glock(gl); in gfs2_glock_free()
170 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc); in gfs2_glock_free()
181 void gfs2_glock_hold(struct gfs2_glock *gl) in gfs2_glock_hold() argument
183 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); in gfs2_glock_hold()
184 lockref_get(&gl->gl_lockref); in gfs2_glock_hold()
194 static int demote_ok(const struct gfs2_glock *gl) in demote_ok() argument
196 const struct gfs2_glock_operations *glops = gl->gl_ops; in demote_ok()
198 if (gl->gl_state == LM_ST_UNLOCKED) in demote_ok()
200 if (!list_empty(&gl->gl_holders)) in demote_ok()
203 return glops->go_demote_ok(gl); in demote_ok()
208 void gfs2_glock_add_to_lru(struct gfs2_glock *gl) in gfs2_glock_add_to_lru() argument
210 if (!(gl->gl_ops->go_flags & GLOF_LRU)) in gfs2_glock_add_to_lru()
215 list_del(&gl->gl_lru); in gfs2_glock_add_to_lru()
216 list_add_tail(&gl->gl_lru, &lru_list); in gfs2_glock_add_to_lru()
218 if (!test_bit(GLF_LRU, &gl->gl_flags)) { in gfs2_glock_add_to_lru()
219 set_bit(GLF_LRU, &gl->gl_flags); in gfs2_glock_add_to_lru()
226 static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl) in gfs2_glock_remove_from_lru() argument
228 if (!(gl->gl_ops->go_flags & GLOF_LRU)) in gfs2_glock_remove_from_lru()
232 if (test_bit(GLF_LRU, &gl->gl_flags)) { in gfs2_glock_remove_from_lru()
233 list_del_init(&gl->gl_lru); in gfs2_glock_remove_from_lru()
235 clear_bit(GLF_LRU, &gl->gl_flags); in gfs2_glock_remove_from_lru()
244 static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { in __gfs2_glock_queue_work() argument
245 if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) { in __gfs2_glock_queue_work()
252 GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2); in __gfs2_glock_queue_work()
253 gl->gl_lockref.count--; in __gfs2_glock_queue_work()
257 static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { in gfs2_glock_queue_work() argument
258 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_queue_work()
259 __gfs2_glock_queue_work(gl, delay); in gfs2_glock_queue_work()
260 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_queue_work()
263 static void __gfs2_glock_put(struct gfs2_glock *gl) in __gfs2_glock_put() argument
265 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in __gfs2_glock_put()
266 struct address_space *mapping = gfs2_glock2aspace(gl); in __gfs2_glock_put()
268 lockref_mark_dead(&gl->gl_lockref); in __gfs2_glock_put()
270 gfs2_glock_remove_from_lru(gl); in __gfs2_glock_put()
271 spin_unlock(&gl->gl_lockref.lock); in __gfs2_glock_put()
272 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); in __gfs2_glock_put()
276 GLOCK_BUG_ON(gl, mapping->nrpages || in __gfs2_glock_put()
279 trace_gfs2_glock_put(gl); in __gfs2_glock_put()
280 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl); in __gfs2_glock_put()
286 void gfs2_glock_queue_put(struct gfs2_glock *gl) in gfs2_glock_queue_put() argument
288 gfs2_glock_queue_work(gl, 0); in gfs2_glock_queue_put()
297 void gfs2_glock_put(struct gfs2_glock *gl) in gfs2_glock_put() argument
299 if (lockref_put_or_lock(&gl->gl_lockref)) in gfs2_glock_put()
302 __gfs2_glock_put(gl); in gfs2_glock_put()
313 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh) in may_grant() argument
315 …const struct gfs2_holder *gh_head = list_first_entry(&gl->gl_holders, const struct gfs2_holder, gh… in may_grant()
319 if (gl->gl_state == gh->gh_state) in may_grant()
323 if (gl->gl_state == LM_ST_EXCLUSIVE) { in may_grant()
329 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY)) in may_grant()
351 static void do_error(struct gfs2_glock *gl, const int ret) in do_error() argument
355 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { in do_error()
378 static int do_promote(struct gfs2_glock *gl) in do_promote() argument
379 __releases(&gl->gl_lockref.lock) in do_promote()
380 __acquires(&gl->gl_lockref.lock) in do_promote()
382 const struct gfs2_glock_operations *glops = gl->gl_ops; in do_promote()
387 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { in do_promote()
390 if (may_grant(gl, gh)) { in do_promote()
391 if (gh->gh_list.prev == &gl->gl_holders && in do_promote()
393 spin_unlock(&gl->gl_lockref.lock); in do_promote()
396 spin_lock(&gl->gl_lockref.lock); in do_promote()
416 if (gh->gh_list.prev == &gl->gl_holders) in do_promote()
418 do_error(gl, 0); in do_promote()
429 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl) in find_first_waiter() argument
433 list_for_each_entry(gh, &gl->gl_holders, gh_list) { in find_first_waiter()
447 static void state_change(struct gfs2_glock *gl, unsigned int new_state) in state_change() argument
451 held1 = (gl->gl_state != LM_ST_UNLOCKED); in state_change()
455 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); in state_change()
457 gl->gl_lockref.count++; in state_change()
459 gl->gl_lockref.count--; in state_change()
461 if (new_state != gl->gl_target) in state_change()
463 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR, in state_change()
465 gl->gl_state = new_state; in state_change()
466 gl->gl_tchange = jiffies; in state_change()
469 static void gfs2_set_demote(struct gfs2_glock *gl) in gfs2_set_demote() argument
471 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_set_demote()
473 set_bit(GLF_DEMOTE, &gl->gl_flags); in gfs2_set_demote()
478 static void gfs2_demote_wake(struct gfs2_glock *gl) in gfs2_demote_wake() argument
480 gl->gl_demote_state = LM_ST_EXCLUSIVE; in gfs2_demote_wake()
481 clear_bit(GLF_DEMOTE, &gl->gl_flags); in gfs2_demote_wake()
483 wake_up_bit(&gl->gl_flags, GLF_DEMOTE); in gfs2_demote_wake()
493 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) in finish_xmote() argument
495 const struct gfs2_glock_operations *glops = gl->gl_ops; in finish_xmote()
500 spin_lock(&gl->gl_lockref.lock); in finish_xmote()
501 trace_gfs2_glock_state_change(gl, state); in finish_xmote()
502 state_change(gl, state); in finish_xmote()
503 gh = find_first_waiter(gl); in finish_xmote()
506 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) && in finish_xmote()
507 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED) in finish_xmote()
508 gl->gl_target = LM_ST_UNLOCKED; in finish_xmote()
511 if (unlikely(state != gl->gl_target)) { in finish_xmote()
512 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) { in finish_xmote()
516 list_move_tail(&gh->gh_list, &gl->gl_holders); in finish_xmote()
517 gh = find_first_waiter(gl); in finish_xmote()
518 gl->gl_target = gh->gh_state; in finish_xmote()
524 gl->gl_target = gl->gl_state; in finish_xmote()
525 do_error(gl, ret); in finish_xmote()
533 do_xmote(gl, gh, gl->gl_target); in finish_xmote()
538 do_xmote(gl, gh, LM_ST_UNLOCKED); in finish_xmote()
541 fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n", in finish_xmote()
542 gl->gl_target, state); in finish_xmote()
543 GLOCK_BUG_ON(gl, 1); in finish_xmote()
545 spin_unlock(&gl->gl_lockref.lock); in finish_xmote()
550 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) in finish_xmote()
551 gfs2_demote_wake(gl); in finish_xmote()
554 spin_unlock(&gl->gl_lockref.lock); in finish_xmote()
555 rv = glops->go_xmote_bh(gl, gh); in finish_xmote()
556 spin_lock(&gl->gl_lockref.lock); in finish_xmote()
558 do_error(gl, rv); in finish_xmote()
562 rv = do_promote(gl); in finish_xmote()
567 clear_bit(GLF_LOCK, &gl->gl_flags); in finish_xmote()
569 spin_unlock(&gl->gl_lockref.lock); in finish_xmote()
572 static bool is_system_glock(struct gfs2_glock *gl) in is_system_glock() argument
574 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in is_system_glock()
577 if (gl == m_ip->i_gl) in is_system_glock()
590 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target) in do_xmote() argument
591 __releases(&gl->gl_lockref.lock) in do_xmote()
592 __acquires(&gl->gl_lockref.lock) in do_xmote()
594 const struct gfs2_glock_operations *glops = gl->gl_ops; in do_xmote()
595 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in do_xmote()
599 if (target != LM_ST_UNLOCKED && glock_blocked_by_withdraw(gl) && in do_xmote()
604 GLOCK_BUG_ON(gl, gl->gl_state == target); in do_xmote()
605 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target); in do_xmote()
614 &gl->gl_flags)) in do_xmote()
616 do_error(gl, 0); /* Fail queued try locks */ in do_xmote()
618 gl->gl_req = target; in do_xmote()
619 set_bit(GLF_BLOCKING, &gl->gl_flags); in do_xmote()
620 if ((gl->gl_req == LM_ST_UNLOCKED) || in do_xmote()
621 (gl->gl_state == LM_ST_EXCLUSIVE) || in do_xmote()
623 clear_bit(GLF_BLOCKING, &gl->gl_flags); in do_xmote()
624 spin_unlock(&gl->gl_lockref.lock); in do_xmote()
626 ret = glops->go_sync(gl); in do_xmote()
634 gfs2_dump_glock(NULL, gl, true); in do_xmote()
639 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) { in do_xmote()
647 if ((atomic_read(&gl->gl_ail_count) != 0) && in do_xmote()
649 gfs2_glock_assert_warn(gl, in do_xmote()
650 !atomic_read(&gl->gl_ail_count)); in do_xmote()
651 gfs2_dump_glock(NULL, gl, true); in do_xmote()
653 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA); in do_xmote()
654 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); in do_xmote()
658 gfs2_glock_hold(gl); in do_xmote()
682 if (glock_blocked_by_withdraw(gl) && in do_xmote()
685 if (!is_system_glock(gl)) { in do_xmote()
686 gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD); in do_xmote()
689 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); in do_xmote()
695 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags); in do_xmote()
696 if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED && in do_xmote()
699 finish_xmote(gl, target); in do_xmote()
700 gfs2_glock_queue_work(gl, 0); in do_xmote()
703 GLOCK_BUG_ON(gl, !gfs2_withdrawn(sdp)); in do_xmote()
706 finish_xmote(gl, target); in do_xmote()
707 gfs2_glock_queue_work(gl, 0); in do_xmote()
710 spin_lock(&gl->gl_lockref.lock); in do_xmote()
718 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl) in find_first_holder() argument
722 if (!list_empty(&gl->gl_holders)) { in find_first_holder()
723 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list); in find_first_holder()
737 static void run_queue(struct gfs2_glock *gl, const int nonblock) in run_queue() argument
738 __releases(&gl->gl_lockref.lock) in run_queue()
739 __acquires(&gl->gl_lockref.lock) in run_queue()
744 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) in run_queue()
747 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); in run_queue()
749 if (test_bit(GLF_DEMOTE, &gl->gl_flags) && in run_queue()
750 gl->gl_demote_state != gl->gl_state) { in run_queue()
751 if (find_first_holder(gl)) in run_queue()
755 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); in run_queue()
756 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); in run_queue()
757 gl->gl_target = gl->gl_demote_state; in run_queue()
759 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) in run_queue()
760 gfs2_demote_wake(gl); in run_queue()
761 ret = do_promote(gl); in run_queue()
766 gh = find_first_waiter(gl); in run_queue()
767 gl->gl_target = gh->gh_state; in run_queue()
769 do_error(gl, 0); /* Fail queued try locks */ in run_queue()
771 do_xmote(gl, gh, gl->gl_target); in run_queue()
776 clear_bit(GLF_LOCK, &gl->gl_flags); in run_queue()
778 gl->gl_lockref.count++; in run_queue()
779 __gfs2_glock_queue_work(gl, 0); in run_queue()
783 clear_bit(GLF_LOCK, &gl->gl_flags); in run_queue()
788 void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation) in gfs2_inode_remember_delete() argument
790 struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr; in gfs2_inode_remember_delete()
798 bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation) in gfs2_inode_already_deleted() argument
800 struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr; in gfs2_inode_already_deleted()
807 static void gfs2_glock_poke(struct gfs2_glock *gl) in gfs2_glock_poke() argument
813 gfs2_holder_init(gl, LM_ST_SHARED, flags, &gh); in gfs2_glock_poke()
820 static bool gfs2_try_evict(struct gfs2_glock *gl) in gfs2_try_evict() argument
835 spin_lock(&gl->gl_lockref.lock); in gfs2_try_evict()
836 ip = gl->gl_object; in gfs2_try_evict()
839 spin_unlock(&gl->gl_lockref.lock); in gfs2_try_evict()
843 gl->gl_no_formal_ino = ip->i_no_formal_ino; in gfs2_try_evict()
849 spin_lock(&gl->gl_lockref.lock); in gfs2_try_evict()
850 ip = gl->gl_object; in gfs2_try_evict()
856 spin_unlock(&gl->gl_lockref.lock); in gfs2_try_evict()
869 struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete); in delete_work_func() local
870 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in delete_work_func()
872 u64 no_addr = gl->gl_name.ln_number; in delete_work_func()
874 spin_lock(&gl->gl_lockref.lock); in delete_work_func()
875 clear_bit(GLF_PENDING_DELETE, &gl->gl_flags); in delete_work_func()
876 spin_unlock(&gl->gl_lockref.lock); in delete_work_func()
881 if (test_bit(GLF_INODE_CREATING, &gl->gl_flags)) in delete_work_func()
884 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { in delete_work_func()
902 if (gfs2_try_evict(gl)) { in delete_work_func()
903 if (gfs2_queue_delete_work(gl, 5 * HZ)) in delete_work_func()
909 inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino, in delete_work_func()
916 gfs2_glock_put(gl); in delete_work_func()
922 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); in glock_work_func() local
925 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { in glock_work_func()
926 finish_xmote(gl, gl->gl_reply); in glock_work_func()
929 spin_lock(&gl->gl_lockref.lock); in glock_work_func()
930 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && in glock_work_func()
931 gl->gl_state != LM_ST_UNLOCKED && in glock_work_func()
932 gl->gl_demote_state != LM_ST_EXCLUSIVE) { in glock_work_func()
935 holdtime = gl->gl_tchange + gl->gl_hold_time; in glock_work_func()
940 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); in glock_work_func()
941 gfs2_set_demote(gl); in glock_work_func()
944 run_queue(gl, 0); in glock_work_func()
948 if (gl->gl_name.ln_type != LM_TYPE_INODE) in glock_work_func()
950 __gfs2_glock_queue_work(gl, delay); in glock_work_func()
958 gl->gl_lockref.count -= drop_refs; in glock_work_func()
959 if (!gl->gl_lockref.count) { in glock_work_func()
960 __gfs2_glock_put(gl); in glock_work_func()
963 spin_unlock(&gl->gl_lockref.lock); in glock_work_func()
971 struct gfs2_glock *gl; in find_insert_glock() local
981 gl = rhashtable_lookup_get_insert_fast(&gl_hash_table, in find_insert_glock()
983 if (IS_ERR(gl)) in find_insert_glock()
986 gl = rhashtable_lookup_fast(&gl_hash_table, in find_insert_glock()
989 if (gl && !lockref_get_not_dead(&gl->gl_lockref)) { in find_insert_glock()
997 return gl; in find_insert_glock()
1021 struct gfs2_glock *gl, *tmp; in gfs2_glock_get() local
1026 gl = find_insert_glock(&name, NULL); in gfs2_glock_get()
1027 if (gl) { in gfs2_glock_get()
1028 *glp = gl; in gfs2_glock_get()
1038 gl = kmem_cache_alloc(cachep, GFP_NOFS); in gfs2_glock_get()
1039 if (!gl) in gfs2_glock_get()
1042 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); in gfs2_glock_get()
1045 gl->gl_lksb.sb_lvbptr = kzalloc(GDLM_LVB_SIZE, GFP_NOFS); in gfs2_glock_get()
1046 if (!gl->gl_lksb.sb_lvbptr) { in gfs2_glock_get()
1047 kmem_cache_free(cachep, gl); in gfs2_glock_get()
1053 gl->gl_node.next = NULL; in gfs2_glock_get()
1054 gl->gl_flags = 0; in gfs2_glock_get()
1055 gl->gl_name = name; in gfs2_glock_get()
1056 lockdep_set_subclass(&gl->gl_lockref.lock, glops->go_subclass); in gfs2_glock_get()
1057 gl->gl_lockref.count = 1; in gfs2_glock_get()
1058 gl->gl_state = LM_ST_UNLOCKED; in gfs2_glock_get()
1059 gl->gl_target = LM_ST_UNLOCKED; in gfs2_glock_get()
1060 gl->gl_demote_state = LM_ST_EXCLUSIVE; in gfs2_glock_get()
1061 gl->gl_ops = glops; in gfs2_glock_get()
1062 gl->gl_dstamp = 0; in gfs2_glock_get()
1065 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type]; in gfs2_glock_get()
1067 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0; in gfs2_glock_get()
1068 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0; in gfs2_glock_get()
1069 gl->gl_tchange = jiffies; in gfs2_glock_get()
1070 gl->gl_object = NULL; in gfs2_glock_get()
1071 gl->gl_hold_time = GL_GLOCK_DFT_HOLD; in gfs2_glock_get()
1072 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); in gfs2_glock_get()
1073 if (gl->gl_name.ln_type == LM_TYPE_IOPEN) in gfs2_glock_get()
1074 INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func); in gfs2_glock_get()
1076 mapping = gfs2_glock2aspace(gl); in gfs2_glock_get()
1086 tmp = find_insert_glock(&name, gl); in gfs2_glock_get()
1088 *glp = gl; in gfs2_glock_get()
1098 kfree(gl->gl_lksb.sb_lvbptr); in gfs2_glock_get()
1099 kmem_cache_free(cachep, gl); in gfs2_glock_get()
1116 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags, in gfs2_holder_init() argument
1120 gh->gh_gl = gl; in gfs2_holder_init()
1127 gfs2_glock_hold(gl); in gfs2_holder_init()
1164 static void gfs2_glock_update_hold_time(struct gfs2_glock *gl, in gfs2_glock_update_hold_time() argument
1170 gl->gl_hold_time = min(gl->gl_hold_time + GL_GLOCK_HOLD_INCR, in gfs2_glock_update_hold_time()
1286 static void handle_callback(struct gfs2_glock *gl, unsigned int state, in handle_callback() argument
1290 set_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); in handle_callback()
1292 gfs2_set_demote(gl); in handle_callback()
1293 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { in handle_callback()
1294 gl->gl_demote_state = state; in handle_callback()
1295 gl->gl_demote_time = jiffies; in handle_callback()
1296 } else if (gl->gl_demote_state != LM_ST_UNLOCKED && in handle_callback()
1297 gl->gl_demote_state != state) { in handle_callback()
1298 gl->gl_demote_state = LM_ST_UNLOCKED; in handle_callback()
1300 if (gl->gl_ops->go_callback) in handle_callback()
1301 gl->gl_ops->go_callback(gl, remote); in handle_callback()
1302 trace_gfs2_demote_rq(gl, remote); in handle_callback()
1335 __releases(&gl->gl_lockref.lock) in add_to_queue()
1336 __acquires(&gl->gl_lockref.lock) in add_to_queue()
1338 struct gfs2_glock *gl = gh->gh_gl; in add_to_queue() local
1339 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in add_to_queue()
1344 GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL); in add_to_queue()
1346 GLOCK_BUG_ON(gl, true); in add_to_queue()
1349 if (test_bit(GLF_LOCK, &gl->gl_flags)) in add_to_queue()
1350 try_futile = !may_grant(gl, gh); in add_to_queue()
1351 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) in add_to_queue()
1355 list_for_each_entry(gh2, &gl->gl_holders, gh_list) { in add_to_queue()
1372 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT); in add_to_queue()
1373 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT); in add_to_queue()
1375 list_add_tail(&gh->gh_list, &gl->gl_holders); in add_to_queue()
1382 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list); in add_to_queue()
1384 spin_unlock(&gl->gl_lockref.lock); in add_to_queue()
1386 sdp->sd_lockstruct.ls_ops->lm_cancel(gl); in add_to_queue()
1387 spin_lock(&gl->gl_lockref.lock); in add_to_queue()
1400 gfs2_dump_glock(NULL, gl, true); in add_to_queue()
1415 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_nq() local
1418 if (glock_blocked_by_withdraw(gl) && !(gh->gh_flags & LM_FLAG_NOEXP)) in gfs2_glock_nq()
1421 if (test_bit(GLF_LRU, &gl->gl_flags)) in gfs2_glock_nq()
1422 gfs2_glock_remove_from_lru(gl); in gfs2_glock_nq()
1424 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_nq()
1427 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) { in gfs2_glock_nq()
1428 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); in gfs2_glock_nq()
1429 gl->gl_lockref.count++; in gfs2_glock_nq()
1430 __gfs2_glock_queue_work(gl, 0); in gfs2_glock_nq()
1432 run_queue(gl, 1); in gfs2_glock_nq()
1433 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_nq()
1461 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_dq() local
1462 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_glock_dq()
1466 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_dq()
1475 glock_blocked_by_withdraw(gl) && in gfs2_glock_dq()
1478 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_dq()
1482 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_dq()
1485 handle_callback(gl, LM_ST_UNLOCKED, 0, false); in gfs2_glock_dq()
1489 if (find_first_holder(gl) == NULL) { in gfs2_glock_dq()
1490 if (list_empty(&gl->gl_holders) && in gfs2_glock_dq()
1491 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && in gfs2_glock_dq()
1492 !test_bit(GLF_DEMOTE, &gl->gl_flags)) in gfs2_glock_dq()
1495 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl)) in gfs2_glock_dq()
1496 gfs2_glock_add_to_lru(gl); in gfs2_glock_dq()
1500 gl->gl_lockref.count++; in gfs2_glock_dq()
1501 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && in gfs2_glock_dq()
1502 !test_bit(GLF_DEMOTE, &gl->gl_flags) && in gfs2_glock_dq()
1503 gl->gl_name.ln_type == LM_TYPE_INODE) in gfs2_glock_dq()
1504 delay = gl->gl_hold_time; in gfs2_glock_dq()
1505 __gfs2_glock_queue_work(gl, delay); in gfs2_glock_dq()
1507 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_dq()
1512 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_dq_wait() local
1515 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE); in gfs2_glock_dq_wait()
1546 struct gfs2_glock *gl; in gfs2_glock_nq_num() local
1549 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl); in gfs2_glock_nq_num()
1551 error = gfs2_glock_nq_init(gl, state, flags, gh); in gfs2_glock_nq_num()
1552 gfs2_glock_put(gl); in gfs2_glock_nq_num()
1666 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) in gfs2_glock_cb() argument
1672 gfs2_glock_hold(gl); in gfs2_glock_cb()
1673 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_cb()
1674 holdtime = gl->gl_tchange + gl->gl_hold_time; in gfs2_glock_cb()
1675 if (!list_empty(&gl->gl_holders) && in gfs2_glock_cb()
1676 gl->gl_name.ln_type == LM_TYPE_INODE) { in gfs2_glock_cb()
1679 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) in gfs2_glock_cb()
1680 delay = gl->gl_hold_time; in gfs2_glock_cb()
1682 handle_callback(gl, state, delay, true); in gfs2_glock_cb()
1683 __gfs2_glock_queue_work(gl, delay); in gfs2_glock_cb()
1684 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_cb()
1698 static int gfs2_should_freeze(const struct gfs2_glock *gl) in gfs2_should_freeze() argument
1702 if (gl->gl_reply & ~LM_OUT_ST_MASK) in gfs2_should_freeze()
1704 if (gl->gl_target == LM_ST_UNLOCKED) in gfs2_should_freeze()
1707 list_for_each_entry(gh, &gl->gl_holders, gh_list) { in gfs2_should_freeze()
1726 void gfs2_glock_complete(struct gfs2_glock *gl, int ret) in gfs2_glock_complete() argument
1728 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; in gfs2_glock_complete()
1730 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_complete()
1731 gl->gl_reply = ret; in gfs2_glock_complete()
1734 if (gfs2_should_freeze(gl)) { in gfs2_glock_complete()
1735 set_bit(GLF_FROZEN, &gl->gl_flags); in gfs2_glock_complete()
1736 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_complete()
1741 gl->gl_lockref.count++; in gfs2_glock_complete()
1742 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); in gfs2_glock_complete()
1743 __gfs2_glock_queue_work(gl, 0); in gfs2_glock_complete()
1744 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_complete()
1780 struct gfs2_glock *gl; in gfs2_dispose_glock_lru() local
1785 gl = list_first_entry(list, struct gfs2_glock, gl_lru); in gfs2_dispose_glock_lru()
1786 list_del_init(&gl->gl_lru); in gfs2_dispose_glock_lru()
1787 clear_bit(GLF_LRU, &gl->gl_flags); in gfs2_dispose_glock_lru()
1788 if (!spin_trylock(&gl->gl_lockref.lock)) { in gfs2_dispose_glock_lru()
1790 list_add(&gl->gl_lru, &lru_list); in gfs2_dispose_glock_lru()
1791 set_bit(GLF_LRU, &gl->gl_flags); in gfs2_dispose_glock_lru()
1795 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { in gfs2_dispose_glock_lru()
1796 spin_unlock(&gl->gl_lockref.lock); in gfs2_dispose_glock_lru()
1799 gl->gl_lockref.count++; in gfs2_dispose_glock_lru()
1800 if (demote_ok(gl)) in gfs2_dispose_glock_lru()
1801 handle_callback(gl, LM_ST_UNLOCKED, 0, false); in gfs2_dispose_glock_lru()
1802 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags)); in gfs2_dispose_glock_lru()
1803 __gfs2_glock_queue_work(gl, 0); in gfs2_dispose_glock_lru()
1804 spin_unlock(&gl->gl_lockref.lock); in gfs2_dispose_glock_lru()
1820 struct gfs2_glock *gl; in gfs2_scan_glock_lru() local
1827 gl = list_first_entry(&lru_list, struct gfs2_glock, gl_lru); in gfs2_scan_glock_lru()
1830 if (!test_bit(GLF_LOCK, &gl->gl_flags)) { in gfs2_scan_glock_lru()
1831 list_move(&gl->gl_lru, &dispose); in gfs2_scan_glock_lru()
1837 list_move(&gl->gl_lru, &skipped); in gfs2_scan_glock_lru()
1879 struct gfs2_glock *gl; in glock_hash_walk() local
1887 while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) { in glock_hash_walk()
1888 if (gl->gl_name.ln_sbd == sdp) in glock_hash_walk()
1889 examiner(gl); in glock_hash_walk()
1893 } while (cond_resched(), gl == ERR_PTR(-EAGAIN)); in glock_hash_walk()
1898 bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay) in gfs2_queue_delete_work() argument
1902 spin_lock(&gl->gl_lockref.lock); in gfs2_queue_delete_work()
1904 &gl->gl_delete, delay); in gfs2_queue_delete_work()
1906 set_bit(GLF_PENDING_DELETE, &gl->gl_flags); in gfs2_queue_delete_work()
1907 spin_unlock(&gl->gl_lockref.lock); in gfs2_queue_delete_work()
1911 void gfs2_cancel_delete_work(struct gfs2_glock *gl) in gfs2_cancel_delete_work() argument
1913 if (cancel_delayed_work(&gl->gl_delete)) { in gfs2_cancel_delete_work()
1914 clear_bit(GLF_PENDING_DELETE, &gl->gl_flags); in gfs2_cancel_delete_work()
1915 gfs2_glock_put(gl); in gfs2_cancel_delete_work()
1919 bool gfs2_delete_work_queued(const struct gfs2_glock *gl) in gfs2_delete_work_queued() argument
1921 return test_bit(GLF_PENDING_DELETE, &gl->gl_flags); in gfs2_delete_work_queued()
1924 static void flush_delete_work(struct gfs2_glock *gl) in flush_delete_work() argument
1926 if (gl->gl_name.ln_type == LM_TYPE_IOPEN) { in flush_delete_work()
1927 if (cancel_delayed_work(&gl->gl_delete)) { in flush_delete_work()
1929 &gl->gl_delete, 0); in flush_delete_work()
1946 static void thaw_glock(struct gfs2_glock *gl) in thaw_glock() argument
1948 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) in thaw_glock()
1950 if (!lockref_get_not_dead(&gl->gl_lockref)) in thaw_glock()
1952 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); in thaw_glock()
1953 gfs2_glock_queue_work(gl, 0); in thaw_glock()
1962 static void clear_glock(struct gfs2_glock *gl) in clear_glock() argument
1964 gfs2_glock_remove_from_lru(gl); in clear_glock()
1966 spin_lock(&gl->gl_lockref.lock); in clear_glock()
1967 if (!__lockref_is_dead(&gl->gl_lockref)) { in clear_glock()
1968 gl->gl_lockref.count++; in clear_glock()
1969 if (gl->gl_state != LM_ST_UNLOCKED) in clear_glock()
1970 handle_callback(gl, LM_ST_UNLOCKED, 0, false); in clear_glock()
1971 __gfs2_glock_queue_work(gl, 0); in clear_glock()
1973 spin_unlock(&gl->gl_lockref.lock); in clear_glock()
1987 static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid) in dump_glock() argument
1989 spin_lock(&gl->gl_lockref.lock); in dump_glock()
1990 gfs2_dump_glock(seq, gl, fsid); in dump_glock()
1991 spin_unlock(&gl->gl_lockref.lock); in dump_glock()
1994 static void dump_glock_func(struct gfs2_glock *gl) in dump_glock_func() argument
1996 dump_glock(NULL, gl, true); in dump_glock_func()
2021 struct gfs2_glock *gl = ip->i_gl; in gfs2_glock_finish_truncate() local
2025 gfs2_glock_assert_withdraw(gl, ret == 0); in gfs2_glock_finish_truncate()
2027 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_finish_truncate()
2028 clear_bit(GLF_LOCK, &gl->gl_flags); in gfs2_glock_finish_truncate()
2029 run_queue(gl, 1); in gfs2_glock_finish_truncate()
2030 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_finish_truncate()
2104 static const char *gflags2str(char *buf, const struct gfs2_glock *gl) in gflags2str() argument
2106 const unsigned long *gflags = &gl->gl_flags; in gflags2str()
2129 if (!list_empty(&gl->gl_holders)) in gflags2str()
2133 if (gl->gl_object) in gflags2str()
2165 void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid) in gfs2_dump_glock() argument
2167 const struct gfs2_glock_operations *glops = gl->gl_ops; in gfs2_dump_glock()
2171 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_dump_glock()
2175 if (gl->gl_ops->go_flags & GLOF_ASPACE) { in gfs2_dump_glock()
2176 struct address_space *mapping = gfs2_glock2aspace(gl); in gfs2_dump_glock()
2183 dtime = jiffies - gl->gl_demote_time; in gfs2_dump_glock()
2185 if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) in gfs2_dump_glock()
2189 fs_id_buf, state2str(gl->gl_state), in gfs2_dump_glock()
2190 gl->gl_name.ln_type, in gfs2_dump_glock()
2191 (unsigned long long)gl->gl_name.ln_number, in gfs2_dump_glock()
2192 gflags2str(gflags_buf, gl), in gfs2_dump_glock()
2193 state2str(gl->gl_target), in gfs2_dump_glock()
2194 state2str(gl->gl_demote_state), dtime, in gfs2_dump_glock()
2195 atomic_read(&gl->gl_ail_count), in gfs2_dump_glock()
2196 atomic_read(&gl->gl_revokes), in gfs2_dump_glock()
2197 (int)gl->gl_lockref.count, gl->gl_hold_time, nrpages); in gfs2_dump_glock()
2199 list_for_each_entry(gh, &gl->gl_holders, gh_list) in gfs2_dump_glock()
2202 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump) in gfs2_dump_glock()
2203 glops->go_dump(seq, gl, fs_id_buf); in gfs2_dump_glock()
2208 struct gfs2_glock *gl = iter_ptr; in gfs2_glstats_seq_show() local
2211 gl->gl_name.ln_type, in gfs2_glstats_seq_show()
2212 (unsigned long long)gl->gl_name.ln_number, in gfs2_glstats_seq_show()
2213 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT], in gfs2_glstats_seq_show()
2214 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR], in gfs2_glstats_seq_show()
2215 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB], in gfs2_glstats_seq_show()
2216 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB], in gfs2_glstats_seq_show()
2217 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT], in gfs2_glstats_seq_show()
2218 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR], in gfs2_glstats_seq_show()
2219 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT], in gfs2_glstats_seq_show()
2220 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]); in gfs2_glstats_seq_show()
2325 struct gfs2_glock *gl = gi->gl; in gfs2_glock_iter_next() local
2327 if (gl) { in gfs2_glock_iter_next()
2330 if (!lockref_put_not_zero(&gl->gl_lockref)) in gfs2_glock_iter_next()
2331 gfs2_glock_queue_put(gl); in gfs2_glock_iter_next()
2334 gl = rhashtable_walk_next(&gi->hti); in gfs2_glock_iter_next()
2335 if (IS_ERR_OR_NULL(gl)) { in gfs2_glock_iter_next()
2336 if (gl == ERR_PTR(-EAGAIN)) { in gfs2_glock_iter_next()
2340 gl = NULL; in gfs2_glock_iter_next()
2343 if (gl->gl_name.ln_sbd != gi->sdp) in gfs2_glock_iter_next()
2346 if (!lockref_get_not_dead(&gl->gl_lockref)) in gfs2_glock_iter_next()
2350 if (__lockref_is_dead(&gl->gl_lockref)) in gfs2_glock_iter_next()
2355 gi->gl = gl; in gfs2_glock_iter_next()
2380 return gi->gl; in gfs2_glock_seq_start()
2391 return gi->gl; in gfs2_glock_seq_next()
2470 gi->gl = NULL; in __gfs2_glocks_open()
2486 if (gi->gl) in gfs2_glocks_release()
2487 gfs2_glock_put(gi->gl); in gfs2_glocks_release()