Lines Matching refs:gh
62 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
188 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh) in may_grant() argument
191 if ((gh->gh_state == LM_ST_EXCLUSIVE || in may_grant()
192 gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head) in may_grant()
194 if (gl->gl_state == gh->gh_state) in may_grant()
196 if (gh->gh_flags & GL_EXACT) in may_grant()
199 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED) in may_grant()
201 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED) in may_grant()
204 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY)) in may_grant()
209 static void gfs2_holder_wake(struct gfs2_holder *gh) in gfs2_holder_wake() argument
211 clear_bit(HIF_WAIT, &gh->gh_iflags); in gfs2_holder_wake()
213 wake_up_bit(&gh->gh_iflags, HIF_WAIT); in gfs2_holder_wake()
223 struct gfs2_holder *gh, *tmp; in do_error() local
225 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { in do_error()
226 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) in do_error()
229 gh->gh_error = -EIO; in do_error()
230 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) in do_error()
231 gh->gh_error = GLR_TRYFAILED; in do_error()
234 list_del_init(&gh->gh_list); in do_error()
235 trace_gfs2_glock_queue(gh, 0); in do_error()
236 gfs2_holder_wake(gh); in do_error()
253 struct gfs2_holder *gh, *tmp; in do_promote() local
257 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { in do_promote()
258 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) in do_promote()
260 if (may_grant(gl, gh)) { in do_promote()
261 if (gh->gh_list.prev == &gl->gl_holders && in do_promote()
265 ret = glops->go_lock(gh); in do_promote()
270 gh->gh_error = ret; in do_promote()
271 list_del_init(&gh->gh_list); in do_promote()
272 trace_gfs2_glock_queue(gh, 0); in do_promote()
273 gfs2_holder_wake(gh); in do_promote()
276 set_bit(HIF_HOLDER, &gh->gh_iflags); in do_promote()
277 trace_gfs2_promote(gh, 1); in do_promote()
278 gfs2_holder_wake(gh); in do_promote()
281 set_bit(HIF_HOLDER, &gh->gh_iflags); in do_promote()
282 trace_gfs2_promote(gh, 0); in do_promote()
283 gfs2_holder_wake(gh); in do_promote()
286 if (gh->gh_list.prev == &gl->gl_holders) in do_promote()
301 struct gfs2_holder *gh; in find_first_waiter() local
303 list_for_each_entry(gh, &gl->gl_holders, gh_list) { in find_first_waiter()
304 if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) in find_first_waiter()
305 return gh; in find_first_waiter()
360 struct gfs2_holder *gh; in finish_xmote() local
367 gh = find_first_waiter(gl); in finish_xmote()
376 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) { in finish_xmote()
379 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0) in finish_xmote()
380 list_move_tail(&gh->gh_list, &gl->gl_holders); in finish_xmote()
381 gh = find_first_waiter(gl); in finish_xmote()
382 gl->gl_target = gh->gh_state; in finish_xmote()
387 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) { in finish_xmote()
397 do_xmote(gl, gh, gl->gl_target); in finish_xmote()
402 do_xmote(gl, gh, LM_ST_UNLOCKED); in finish_xmote()
418 rv = glops->go_xmote_bh(gl, gh); in finish_xmote()
443 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target) in do_xmote() argument
449 unsigned int lck_flags = gh ? gh->gh_flags : 0; in do_xmote()
498 struct gfs2_holder *gh; in find_first_holder() local
501 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); in find_first_holder()
502 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) in find_first_holder()
503 return gh; in find_first_holder()
519 struct gfs2_holder *gh = NULL; in run_queue() local
544 gh = find_first_waiter(gl); in run_queue()
545 gl->gl_target = gh->gh_state; in run_queue()
546 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) in run_queue()
549 do_xmote(gl, gh, gl->gl_target); in run_queue()
754 struct gfs2_holder *gh) in gfs2_holder_init() argument
756 INIT_LIST_HEAD(&gh->gh_list); in gfs2_holder_init()
757 gh->gh_gl = gl; in gfs2_holder_init()
758 gh->gh_ip = _RET_IP_; in gfs2_holder_init()
759 gh->gh_owner_pid = get_pid(task_pid(current)); in gfs2_holder_init()
760 gh->gh_state = state; in gfs2_holder_init()
761 gh->gh_flags = flags; in gfs2_holder_init()
762 gh->gh_error = 0; in gfs2_holder_init()
763 gh->gh_iflags = 0; in gfs2_holder_init()
777 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh) in gfs2_holder_reinit() argument
779 gh->gh_state = state; in gfs2_holder_reinit()
780 gh->gh_flags = flags; in gfs2_holder_reinit()
781 gh->gh_iflags = 0; in gfs2_holder_reinit()
782 gh->gh_ip = _RET_IP_; in gfs2_holder_reinit()
783 put_pid(gh->gh_owner_pid); in gfs2_holder_reinit()
784 gh->gh_owner_pid = get_pid(task_pid(current)); in gfs2_holder_reinit()
793 void gfs2_holder_uninit(struct gfs2_holder *gh) in gfs2_holder_uninit() argument
795 put_pid(gh->gh_owner_pid); in gfs2_holder_uninit()
796 gfs2_glock_put(gh->gh_gl); in gfs2_holder_uninit()
797 gh->gh_gl = NULL; in gfs2_holder_uninit()
798 gh->gh_ip = 0; in gfs2_holder_uninit()
808 int gfs2_glock_wait(struct gfs2_holder *gh) in gfs2_glock_wait() argument
813 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE); in gfs2_glock_wait()
816 gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time + in gfs2_glock_wait()
819 return gh->gh_error; in gfs2_glock_wait()
878 static inline void add_to_queue(struct gfs2_holder *gh) in add_to_queue() argument
882 struct gfs2_glock *gl = gh->gh_gl; in add_to_queue()
888 BUG_ON(gh->gh_owner_pid == NULL); in add_to_queue()
889 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) in add_to_queue()
892 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { in add_to_queue()
894 try_futile = !may_grant(gl, gh); in add_to_queue()
900 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid && in add_to_queue()
901 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK))) in add_to_queue()
906 gh->gh_error = GLR_TRYFAILED; in add_to_queue()
907 gfs2_holder_wake(gh); in add_to_queue()
912 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt)) in add_to_queue()
916 trace_gfs2_glock_queue(gh, 1); in add_to_queue()
920 list_add_tail(&gh->gh_list, &gl->gl_holders); in add_to_queue()
921 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY)) in add_to_queue()
925 list_add_tail(&gh->gh_list, insert_pt); in add_to_queue()
927 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); in add_to_queue()
928 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) { in add_to_queue()
941 pr_err("new: %pSR\n", (void *)gh->gh_ip); in add_to_queue()
942 pr_err("pid: %d\n", pid_nr(gh->gh_owner_pid)); in add_to_queue()
944 gh->gh_gl->gl_name.ln_type, gh->gh_state); in add_to_queue()
958 int gfs2_glock_nq(struct gfs2_holder *gh) in gfs2_glock_nq() argument
960 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_nq()
971 add_to_queue(gh); in gfs2_glock_nq()
972 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) && in gfs2_glock_nq()
982 if (!(gh->gh_flags & GL_ASYNC)) in gfs2_glock_nq()
983 error = gfs2_glock_wait(gh); in gfs2_glock_nq()
995 int gfs2_glock_poll(struct gfs2_holder *gh) in gfs2_glock_poll() argument
997 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1; in gfs2_glock_poll()
1006 void gfs2_glock_dq(struct gfs2_holder *gh) in gfs2_glock_dq() argument
1008 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_dq()
1014 if (gh->gh_flags & GL_NOCACHE) in gfs2_glock_dq()
1017 list_del_init(&gh->gh_list); in gfs2_glock_dq()
1022 glops->go_unlock(gh); in gfs2_glock_dq()
1035 trace_gfs2_glock_queue(gh, 0); in gfs2_glock_dq()
1049 void gfs2_glock_dq_wait(struct gfs2_holder *gh) in gfs2_glock_dq_wait() argument
1051 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_dq_wait()
1052 gfs2_glock_dq(gh); in gfs2_glock_dq_wait()
1063 void gfs2_glock_dq_uninit(struct gfs2_holder *gh) in gfs2_glock_dq_uninit() argument
1065 gfs2_glock_dq(gh); in gfs2_glock_dq_uninit()
1066 gfs2_holder_uninit(gh); in gfs2_glock_dq_uninit()
1083 unsigned int state, int flags, struct gfs2_holder *gh) in gfs2_glock_nq_num() argument
1090 error = gfs2_glock_nq_init(gl, state, flags, gh); in gfs2_glock_nq_num()
1240 const struct gfs2_holder *gh; in gfs2_should_freeze() local
1247 list_for_each_entry(gh, &gl->gl_holders, gh_list) { in gfs2_should_freeze()
1248 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) in gfs2_should_freeze()
1250 if (LM_FLAG_NOEXP & gh->gh_flags) in gfs2_should_freeze()
1578 static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh) in dump_holder() argument
1584 if (gh->gh_owner_pid) in dump_holder()
1585 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID); in dump_holder()
1587 state2str(gh->gh_state), in dump_holder()
1588 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags), in dump_holder()
1589 gh->gh_error, in dump_holder()
1590 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1, in dump_holder()
1592 (void *)gh->gh_ip); in dump_holder()
1654 const struct gfs2_holder *gh; in gfs2_dump_glock() local
1672 list_for_each_entry(gh, &gl->gl_holders, gh_list) in gfs2_dump_glock()
1673 dump_holder(seq, gh); in gfs2_dump_glock()