Lines Matching refs:gl
128 void (*lm_put_lock) (struct gfs2_glock *gl);
129 int (*lm_lock) (struct gfs2_glock *gl, unsigned int req_state,
131 void (*lm_cancel) (struct gfs2_glock *gl);
136 static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl) in gfs2_glock_is_locked_by_me() argument
142 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_is_locked_by_me()
144 list_for_each_entry(gh, &gl->gl_holders, gh_list) { in gfs2_glock_is_locked_by_me()
152 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_is_locked_by_me()
157 static inline int gfs2_glock_is_held_excl(struct gfs2_glock *gl) in gfs2_glock_is_held_excl() argument
159 return gl->gl_state == LM_ST_EXCLUSIVE; in gfs2_glock_is_held_excl()
162 static inline int gfs2_glock_is_held_dfrd(struct gfs2_glock *gl) in gfs2_glock_is_held_dfrd() argument
164 return gl->gl_state == LM_ST_DEFERRED; in gfs2_glock_is_held_dfrd()
167 static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl) in gfs2_glock_is_held_shrd() argument
169 return gl->gl_state == LM_ST_SHARED; in gfs2_glock_is_held_shrd()
172 static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl) in gfs2_glock2aspace() argument
174 if (gl->gl_ops->go_flags & GLOF_ASPACE) in gfs2_glock2aspace()
175 return (struct address_space *)(gl + 1); in gfs2_glock2aspace()
182 extern void gfs2_glock_hold(struct gfs2_glock *gl);
183 extern void gfs2_glock_put(struct gfs2_glock *gl);
184 extern void gfs2_glock_queue_put(struct gfs2_glock *gl);
185 extern void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
203 extern void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl,
205 #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { \ argument
206 gfs2_dump_glock(NULL, gl, true); \
208 #define gfs2_glock_assert_warn(gl, x) do { if (unlikely(!(x))) { \ argument
209 gfs2_dump_glock(NULL, gl, true); \
210 gfs2_assert_warn((gl)->gl_name.ln_sbd, (x)); } } \
212 #define gfs2_glock_assert_withdraw(gl, x) do { if (unlikely(!(x))) { \ argument
213 gfs2_dump_glock(NULL, gl, true); \
214 gfs2_assert_withdraw((gl)->gl_name.ln_sbd, (x)); } } \
230 static inline int gfs2_glock_nq_init(struct gfs2_glock *gl, in gfs2_glock_nq_init() argument
236 gfs2_holder_init(gl, state, flags, gh); in gfs2_glock_nq_init()
245 extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
246 extern void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
247 extern bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay);
248 extern void gfs2_cancel_delete_work(struct gfs2_glock *gl);
249 extern bool gfs2_delete_work_queued(const struct gfs2_glock *gl);
254 extern void gfs2_glock_add_to_lru(struct gfs2_glock *gl);
255 extern void gfs2_glock_free(struct gfs2_glock *gl);
287 static inline void glock_set_object(struct gfs2_glock *gl, void *object) in glock_set_object() argument
289 spin_lock(&gl->gl_lockref.lock); in glock_set_object()
290 if (gfs2_assert_warn(gl->gl_name.ln_sbd, gl->gl_object == NULL)) in glock_set_object()
291 gfs2_dump_glock(NULL, gl, true); in glock_set_object()
292 gl->gl_object = object; in glock_set_object()
293 spin_unlock(&gl->gl_lockref.lock); in glock_set_object()
314 static inline void glock_clear_object(struct gfs2_glock *gl, void *object) in glock_clear_object() argument
316 spin_lock(&gl->gl_lockref.lock); in glock_clear_object()
317 if (gl->gl_object == object) in glock_clear_object()
318 gl->gl_object = NULL; in glock_clear_object()
319 spin_unlock(&gl->gl_lockref.lock); in glock_clear_object()
322 extern void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation);
323 extern bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation);