gl 1375 arch/sparc/include/asm/hypervisor.h unsigned char gl; /* Global register level */ gl 332 drivers/crypto/chelsio/chtls/chtls_main.c static struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl, gl 342 drivers/crypto/chelsio/chtls/chtls_main.c skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) gl 346 drivers/crypto/chelsio/chtls/chtls_main.c __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) gl 351 drivers/crypto/chelsio/chtls/chtls_main.c , gl->va + pktshift, gl 352 drivers/crypto/chelsio/chtls/chtls_main.c gl->tot_len - pktshift); gl 358 drivers/crypto/chelsio/chtls/chtls_main.c const struct pkt_gl *gl, const __be64 *rsp) gl 364 drivers/crypto/chelsio/chtls/chtls_main.c skb = copy_gl_to_skb_pkt(gl, rsp, cdev->lldi->sge_pktshift); gl 432 drivers/crypto/chelsio/chtls/chtls_main.c const struct pkt_gl *gl) gl 441 drivers/crypto/chelsio/chtls/chtls_main.c if (chtls_recv_packet(cdev, gl, rsp) < 0) gl 446 drivers/crypto/chelsio/chtls/chtls_main.c if (!gl) gl 450 drivers/crypto/chelsio/chtls/chtls_main.c skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN); gl 1103 drivers/infiniband/hw/cxgb4/device.c static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl, gl 1116 drivers/infiniband/hw/cxgb4/device.c skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) + gl 1121 drivers/infiniband/hw/cxgb4/device.c __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) + gl 1136 drivers/infiniband/hw/cxgb4/device.c gl->va + pktshift, gl 1137 drivers/infiniband/hw/cxgb4/device.c gl->tot_len - pktshift); gl 1141 drivers/infiniband/hw/cxgb4/device.c static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl, gl 1150 drivers/infiniband/hw/cxgb4/device.c skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift); gl 1166 drivers/infiniband/hw/cxgb4/device.c const struct pkt_gl *gl) gl 1173 drivers/infiniband/hw/cxgb4/device.c if (gl == NULL) { gl 1182 drivers/infiniband/hw/cxgb4/device.c } else if (gl == CXGB4_MSG_AN) { gl 1188 drivers/infiniband/hw/cxgb4/device.c } else if (unlikely(*(u8 *)rsp != *(u8 *)gl->va)) { gl 1189 drivers/infiniband/hw/cxgb4/device.c if (recv_rx_pkt(dev, gl, rsp)) gl 1193 drivers/infiniband/hw/cxgb4/device.c pci_name(ctx->lldi.pdev), gl->va, gl 1195 drivers/infiniband/hw/cxgb4/device.c be64_to_cpu(*(__force __be64 *)gl->va), gl 1196 drivers/infiniband/hw/cxgb4/device.c gl->tot_len); gl 1200 drivers/infiniband/hw/cxgb4/device.c skb = cxgb4_pktgl_to_skb(gl, 128, 128); gl 663 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h const struct pkt_gl *gl); gl 1412 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h const struct pkt_gl *gl); gl 553 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c const struct pkt_gl *gl) gl 104 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c const struct pkt_gl *gl) gl 117 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c rsp, gl, &q->lro_mgr, gl 121 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c rsp, gl); gl 128 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c if (!gl) gl 130 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c else if (gl == CXGB4_MSG_AN) gl 378 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h const struct pkt_gl *gl); gl 382 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h const struct pkt_gl *gl, gl 412 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, gl 2615 drivers/net/ethernet/chelsio/cxgb4/sge.c const struct pkt_gl *gl, unsigned int offset) gl 2620 drivers/net/ethernet/chelsio/cxgb4/sge.c __skb_fill_page_desc(skb, 0, gl->frags[0].page, gl 2621 drivers/net/ethernet/chelsio/cxgb4/sge.c gl->frags[0].offset + offset, gl 2622 drivers/net/ethernet/chelsio/cxgb4/sge.c gl->frags[0].size - offset); gl 2623 drivers/net/ethernet/chelsio/cxgb4/sge.c skb_shinfo(skb)->nr_frags = gl->nfrags; gl 2624 drivers/net/ethernet/chelsio/cxgb4/sge.c for (i = 1; i < gl->nfrags; i++) gl 2625 drivers/net/ethernet/chelsio/cxgb4/sge.c __skb_fill_page_desc(skb, i, gl->frags[i].page, gl 2626 drivers/net/ethernet/chelsio/cxgb4/sge.c gl->frags[i].offset, gl 2627 drivers/net/ethernet/chelsio/cxgb4/sge.c gl->frags[i].size); gl 2630 drivers/net/ethernet/chelsio/cxgb4/sge.c get_page(gl->frags[gl->nfrags - 1].page); gl 2642 drivers/net/ethernet/chelsio/cxgb4/sge.c struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl, gl 2652 drivers/net/ethernet/chelsio/cxgb4/sge.c if (gl->tot_len <= RX_COPY_THRES) { gl 2653 drivers/net/ethernet/chelsio/cxgb4/sge.c skb = dev_alloc_skb(gl->tot_len); gl 2656 drivers/net/ethernet/chelsio/cxgb4/sge.c __skb_put(skb, gl->tot_len); gl 2657 drivers/net/ethernet/chelsio/cxgb4/sge.c skb_copy_to_linear_data(skb, gl->va, gl->tot_len); gl 2663 drivers/net/ethernet/chelsio/cxgb4/sge.c skb_copy_to_linear_data(skb, gl->va, pull_len); gl 2665 drivers/net/ethernet/chelsio/cxgb4/sge.c copy_frags(skb, gl, pull_len); gl 2666 drivers/net/ethernet/chelsio/cxgb4/sge.c skb->len = gl->tot_len; gl 2681 drivers/net/ethernet/chelsio/cxgb4/sge.c static void t4_pktgl_free(const struct pkt_gl *gl) gl 2686 drivers/net/ethernet/chelsio/cxgb4/sge.c for (p = gl->frags, n = gl->nfrags - 1; n--; p++) gl 2695 drivers/net/ethernet/chelsio/cxgb4/sge.c const struct pkt_gl *gl) gl 2699 drivers/net/ethernet/chelsio/cxgb4/sge.c skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN); gl 2701 drivers/net/ethernet/chelsio/cxgb4/sge.c t4_pktgl_free(gl); gl 2739 drivers/net/ethernet/chelsio/cxgb4/sge.c static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, gl 2750 drivers/net/ethernet/chelsio/cxgb4/sge.c t4_pktgl_free(gl); gl 2755 drivers/net/ethernet/chelsio/cxgb4/sge.c copy_frags(skb, gl, s->pktshift); gl 2758 drivers/net/ethernet/chelsio/cxgb4/sge.c skb->len = gl->tot_len - s->pktshift; gl 2766 drivers/net/ethernet/chelsio/cxgb4/sge.c gl->sgetstamp); gl 2887 drivers/net/ethernet/chelsio/cxgb4/sge.c const struct pkt_gl *gl) gl 490 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c const struct pkt_gl *gl) gl 1473 drivers/net/ethernet/chelsio/cxgb4vf/sge.c const struct pkt_gl *gl, gl 1479 drivers/net/ethernet/chelsio/cxgb4vf/sge.c __skb_fill_page_desc(skb, 0, gl->frags[0].page, gl 1480 drivers/net/ethernet/chelsio/cxgb4vf/sge.c gl->frags[0].offset + offset, gl 1481 drivers/net/ethernet/chelsio/cxgb4vf/sge.c gl->frags[0].size - offset); gl 1482 drivers/net/ethernet/chelsio/cxgb4vf/sge.c skb_shinfo(skb)->nr_frags = gl->nfrags; gl 1483 drivers/net/ethernet/chelsio/cxgb4vf/sge.c for (i = 1; i < gl->nfrags; i++) gl 1484 drivers/net/ethernet/chelsio/cxgb4vf/sge.c __skb_fill_page_desc(skb, i, gl->frags[i].page, gl 1485 drivers/net/ethernet/chelsio/cxgb4vf/sge.c gl->frags[i].offset, gl 1486 drivers/net/ethernet/chelsio/cxgb4vf/sge.c gl->frags[i].size); gl 1489 drivers/net/ethernet/chelsio/cxgb4vf/sge.c get_page(gl->frags[gl->nfrags - 1].page); gl 1501 drivers/net/ethernet/chelsio/cxgb4vf/sge.c static struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl, gl 1518 drivers/net/ethernet/chelsio/cxgb4vf/sge.c if (gl->tot_len <= RX_COPY_THRES) { gl 1520 drivers/net/ethernet/chelsio/cxgb4vf/sge.c skb = alloc_skb(gl->tot_len, GFP_ATOMIC); gl 1523 drivers/net/ethernet/chelsio/cxgb4vf/sge.c __skb_put(skb, gl->tot_len); gl 1524 drivers/net/ethernet/chelsio/cxgb4vf/sge.c skb_copy_to_linear_data(skb, gl->va, gl->tot_len); gl 1530 drivers/net/ethernet/chelsio/cxgb4vf/sge.c skb_copy_to_linear_data(skb, gl->va, pull_len); gl 1532 drivers/net/ethernet/chelsio/cxgb4vf/sge.c copy_frags(skb, gl, pull_len); gl 1533 drivers/net/ethernet/chelsio/cxgb4vf/sge.c skb->len = gl->tot_len; gl 1549 drivers/net/ethernet/chelsio/cxgb4vf/sge.c static void t4vf_pktgl_free(const struct pkt_gl *gl) gl 1553 drivers/net/ethernet/chelsio/cxgb4vf/sge.c frag = gl->nfrags - 1; gl 1555 drivers/net/ethernet/chelsio/cxgb4vf/sge.c put_page(gl->frags[frag].page); gl 1567 drivers/net/ethernet/chelsio/cxgb4vf/sge.c static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, gl 1578 drivers/net/ethernet/chelsio/cxgb4vf/sge.c t4vf_pktgl_free(gl); gl 1583 drivers/net/ethernet/chelsio/cxgb4vf/sge.c copy_frags(skb, gl, s->pktshift); gl 1584 drivers/net/ethernet/chelsio/cxgb4vf/sge.c skb->len = gl->tot_len - s->pktshift; gl 1615 drivers/net/ethernet/chelsio/cxgb4vf/sge.c const struct pkt_gl *gl) gl 1633 drivers/net/ethernet/chelsio/cxgb4vf/sge.c do_gro(rxq, gl, pkt); gl 1640 drivers/net/ethernet/chelsio/cxgb4vf/sge.c skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN); gl 1642 drivers/net/ethernet/chelsio/cxgb4vf/sge.c t4vf_pktgl_free(gl); gl 1711 drivers/net/ethernet/chelsio/cxgb4vf/sge.c static void restore_rx_bufs(const struct pkt_gl *gl, struct sge_fl *fl, gl 1722 drivers/net/ethernet/chelsio/cxgb4vf/sge.c sdesc->page = gl->frags[frags].page; gl 1780 drivers/net/ethernet/chelsio/cxgb4vf/sge.c struct pkt_gl gl; gl 1802 drivers/net/ethernet/chelsio/cxgb4vf/sge.c gl.tot_len = len; gl 1807 drivers/net/ethernet/chelsio/cxgb4vf/sge.c for (frag = 0, fp = gl.frags; /**/; frag++, fp++) { gl 1820 drivers/net/ethernet/chelsio/cxgb4vf/sge.c gl.nfrags = frag+1; gl 1830 drivers/net/ethernet/chelsio/cxgb4vf/sge.c gl.va = (page_address(gl.frags[0].page) + gl 1831 drivers/net/ethernet/chelsio/cxgb4vf/sge.c gl.frags[0].offset); gl 1832 drivers/net/ethernet/chelsio/cxgb4vf/sge.c prefetch(gl.va); gl 1838 drivers/net/ethernet/chelsio/cxgb4vf/sge.c ret = rspq->handler(rspq, rspq->cur_desc, &gl); gl 1842 drivers/net/ethernet/chelsio/cxgb4vf/sge.c restore_rx_bufs(&gl, &rxq->fl, frag); gl 2800 drivers/scsi/qedf/qedf_main.c struct global_queue **gl = qedf->global_queues; gl 2803 drivers/scsi/qedf/qedf_main.c if (!gl[i]) gl 2806 drivers/scsi/qedf/qedf_main.c if (gl[i]->cq) gl 2808 drivers/scsi/qedf/qedf_main.c gl[i]->cq_mem_size, gl[i]->cq, gl[i]->cq_dma); gl 2809 drivers/scsi/qedf/qedf_main.c if (gl[i]->cq_pbl) gl 2810 drivers/scsi/qedf/qedf_main.c dma_free_coherent(&qedf->pdev->dev, gl[i]->cq_pbl_size, gl 2811 drivers/scsi/qedf/qedf_main.c gl[i]->cq_pbl, gl[i]->cq_pbl_dma); gl 2813 drivers/scsi/qedf/qedf_main.c kfree(gl[i]); gl 1448 drivers/scsi/qedi/qedi_main.c struct global_queue **gl = qedi->global_queues; gl 1451 drivers/scsi/qedi/qedi_main.c if (!gl[i]) gl 1454 drivers/scsi/qedi/qedi_main.c if (gl[i]->cq) gl 1455 drivers/scsi/qedi/qedi_main.c dma_free_coherent(&qedi->pdev->dev, gl[i]->cq_mem_size, gl 1456 drivers/scsi/qedi/qedi_main.c gl[i]->cq, gl[i]->cq_dma); gl 1457 drivers/scsi/qedi/qedi_main.c if (gl[i]->cq_pbl) gl 1458 drivers/scsi/qedi/qedi_main.c dma_free_coherent(&qedi->pdev->dev, gl[i]->cq_pbl_size, gl 1459 drivers/scsi/qedi/qedi_main.c gl[i]->cq_pbl, gl[i]->cq_pbl_dma); gl 1461 drivers/scsi/qedi/qedi_main.c kfree(gl[i]); gl 212 drivers/target/iscsi/cxgbit/cxgbit_main.c cxgbit_copy_frags(struct sk_buff *skb, const struct pkt_gl *gl, gl 219 drivers/target/iscsi/cxgbit/cxgbit_main.c __skb_fill_page_desc(skb, skb_frag_idx, gl->frags[0].page, gl 220 drivers/target/iscsi/cxgbit/cxgbit_main.c gl->frags[0].offset + offset, gl 221 drivers/target/iscsi/cxgbit/cxgbit_main.c gl->frags[0].size - offset); gl 222 drivers/target/iscsi/cxgbit/cxgbit_main.c for (i = 1; i < gl->nfrags; i++) gl 224 drivers/target/iscsi/cxgbit/cxgbit_main.c gl->frags[i].page, gl 225 drivers/target/iscsi/cxgbit/cxgbit_main.c gl->frags[i].offset, gl 226 drivers/target/iscsi/cxgbit/cxgbit_main.c gl->frags[i].size); gl 228 drivers/target/iscsi/cxgbit/cxgbit_main.c skb_shinfo(skb)->nr_frags += gl->nfrags; gl 231 drivers/target/iscsi/cxgbit/cxgbit_main.c get_page(gl->frags[gl->nfrags - 1].page); gl 235 drivers/target/iscsi/cxgbit/cxgbit_main.c cxgbit_lro_add_packet_gl(struct sk_buff *skb, u8 op, const struct pkt_gl *gl) gl 243 drivers/target/iscsi/cxgbit/cxgbit_main.c struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)gl->va; gl 249 drivers/target/iscsi/cxgbit/cxgbit_main.c pdu_cb->hdr = gl->va + offset; gl 253 drivers/target/iscsi/cxgbit/cxgbit_main.c if (unlikely(gl->nfrags > 1)) gl 258 drivers/target/iscsi/cxgbit/cxgbit_main.c struct cpl_iscsi_data *cpl = (struct cpl_iscsi_data *)gl->va; gl 265 drivers/target/iscsi/cxgbit/cxgbit_main.c pdu_cb->nr_dfrags = gl->nfrags; gl 271 drivers/target/iscsi/cxgbit/cxgbit_main.c cpl = (struct cpl_rx_iscsi_cmp *)gl->va; gl 275 drivers/target/iscsi/cxgbit/cxgbit_main.c pdu_cb->hdr = gl->va + offset; gl 281 drivers/target/iscsi/cxgbit/cxgbit_main.c if (unlikely(gl->nfrags > 1)) gl 299 drivers/target/iscsi/cxgbit/cxgbit_main.c cxgbit_copy_frags(skb, gl, offset); gl 301 drivers/target/iscsi/cxgbit/cxgbit_main.c pdu_cb->frags += gl->nfrags; gl 309 drivers/target/iscsi/cxgbit/cxgbit_main.c cxgbit_lro_init_skb(struct cxgbit_sock *csk, u8 op, const struct pkt_gl *gl, gl 373 drivers/target/iscsi/cxgbit/cxgbit_main.c const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr, gl 393 drivers/target/iscsi/cxgbit/cxgbit_main.c skb = cxgbit_lro_init_skb(csk, op, gl, rsp, napi); gl 406 drivers/target/iscsi/cxgbit/cxgbit_main.c if ((gl && (((skb_shinfo(skb)->nr_frags + gl->nfrags) > gl 413 drivers/target/iscsi/cxgbit/cxgbit_main.c if (gl) gl 414 drivers/target/iscsi/cxgbit/cxgbit_main.c cxgbit_lro_add_packet_gl(skb, op, gl); gl 428 drivers/target/iscsi/cxgbit/cxgbit_main.c const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr, gl 455 drivers/target/iscsi/cxgbit/cxgbit_main.c rpl = gl ? (struct cpl_tx_data *)gl->va : gl 467 drivers/target/iscsi/cxgbit/cxgbit_main.c if (!gl) { gl 483 drivers/target/iscsi/cxgbit/cxgbit_main.c if (unlikely(op != *(u8 *)gl->va)) { gl 485 drivers/target/iscsi/cxgbit/cxgbit_main.c gl->va, be64_to_cpu(*rsp), gl 486 drivers/target/iscsi/cxgbit/cxgbit_main.c get_unaligned_be64(gl->va), gl 487 drivers/target/iscsi/cxgbit/cxgbit_main.c gl->tot_len); gl 493 drivers/target/iscsi/cxgbit/cxgbit_main.c if (!cxgbit_lro_receive(csk, op, rsp, gl, lro_mgr, gl 499 drivers/target/iscsi/cxgbit/cxgbit_main.c skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN); gl 299 fs/gfs2/bmap.c static void gfs2_metapath_ra(struct gfs2_glock *gl, __be64 *start, __be64 *end) gl 309 fs/gfs2/bmap.c rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE); gl 610 fs/gfs2/bmap.c struct gfs2_glock *gl, unsigned int i, gl 618 fs/gfs2/bmap.c mp->mp_bh[i] = gfs2_meta_new(gl, bn); gl 619 fs/gfs2/bmap.c gfs2_trans_add_meta(gl, mp->mp_bh[i]); gl 1484 fs/gfs2/dir.c struct gfs2_glock *gl = ip->i_gl; gl 1504 fs/gfs2/dir.c bh = gfs2_getbuf(gl, blocknr, 1); gl 1218 fs/gfs2/file.c struct gfs2_glock *gl; gl 1241 fs/gfs2/file.c &gfs2_flock_glops, CREATE, &gl); gl 1244 fs/gfs2/file.c gfs2_holder_init(gl, state, flags, fl_gh); gl 1245 fs/gfs2/file.c gfs2_glock_put(gl); gl 54 fs/gfs2/glock.c struct gfs2_glock *gl; /* current glock struct */ gl 58 fs/gfs2/glock.c typedef void (*glock_examiner) (struct gfs2_glock * gl); gl 60 fs/gfs2/glock.c static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target); gl 116 fs/gfs2/glock.c static void wake_up_glock(struct gfs2_glock *gl) gl 118 fs/gfs2/glock.c wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name); gl 121 fs/gfs2/glock.c __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name); gl 126 fs/gfs2/glock.c struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); gl 128 fs/gfs2/glock.c if (gl->gl_ops->go_flags & GLOF_ASPACE) { gl 129 fs/gfs2/glock.c kmem_cache_free(gfs2_glock_aspace_cachep, gl); gl 131 fs/gfs2/glock.c kfree(gl->gl_lksb.sb_lvbptr); gl 132 fs/gfs2/glock.c kmem_cache_free(gfs2_glock_cachep, gl); gl 136 fs/gfs2/glock.c void gfs2_glock_free(struct gfs2_glock *gl) gl 138 fs/gfs2/glock.c struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; gl 140 fs/gfs2/glock.c BUG_ON(atomic_read(&gl->gl_revokes)); gl 141 fs/gfs2/glock.c rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms); gl 143 fs/gfs2/glock.c wake_up_glock(gl); gl 144 fs/gfs2/glock.c call_rcu(&gl->gl_rcu, gfs2_glock_dealloc); gl 155 fs/gfs2/glock.c void gfs2_glock_hold(struct gfs2_glock *gl) gl 157 fs/gfs2/glock.c GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); gl 158 fs/gfs2/glock.c lockref_get(&gl->gl_lockref); gl 168 fs/gfs2/glock.c static int demote_ok(const struct gfs2_glock *gl) gl 170 fs/gfs2/glock.c const struct gfs2_glock_operations *glops = gl->gl_ops; gl 172 fs/gfs2/glock.c if (gl->gl_state == LM_ST_UNLOCKED) gl 174 fs/gfs2/glock.c if (!list_empty(&gl->gl_holders)) gl 177 fs/gfs2/glock.c return glops->go_demote_ok(gl); gl 182 fs/gfs2/glock.c void gfs2_glock_add_to_lru(struct gfs2_glock *gl) gl 184 fs/gfs2/glock.c if (!(gl->gl_ops->go_flags & GLOF_LRU)) gl 189 fs/gfs2/glock.c list_del(&gl->gl_lru); gl 190 fs/gfs2/glock.c list_add_tail(&gl->gl_lru, &lru_list); gl 192 fs/gfs2/glock.c if (!test_bit(GLF_LRU, &gl->gl_flags)) { gl 193 fs/gfs2/glock.c set_bit(GLF_LRU, &gl->gl_flags); gl 200 fs/gfs2/glock.c static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl) gl 202 fs/gfs2/glock.c if (!(gl->gl_ops->go_flags & GLOF_LRU)) gl 206 fs/gfs2/glock.c if (test_bit(GLF_LRU, &gl->gl_flags)) { gl 207 fs/gfs2/glock.c list_del_init(&gl->gl_lru); gl 209 fs/gfs2/glock.c clear_bit(GLF_LRU, &gl->gl_flags); gl 218 fs/gfs2/glock.c static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { gl 219 fs/gfs2/glock.c if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) { gl 226 fs/gfs2/glock.c GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2); gl 227 fs/gfs2/glock.c gl->gl_lockref.count--; gl 231 fs/gfs2/glock.c static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { gl 232 fs/gfs2/glock.c spin_lock(&gl->gl_lockref.lock); gl 233 fs/gfs2/glock.c __gfs2_glock_queue_work(gl, delay); gl 234 fs/gfs2/glock.c spin_unlock(&gl->gl_lockref.lock); gl 237 fs/gfs2/glock.c static void __gfs2_glock_put(struct gfs2_glock *gl) gl 239 fs/gfs2/glock.c struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; gl 240 fs/gfs2/glock.c struct address_space *mapping = gfs2_glock2aspace(gl); gl 242 fs/gfs2/glock.c lockref_mark_dead(&gl->gl_lockref); gl 244 fs/gfs2/glock.c gfs2_glock_remove_from_lru(gl); gl 245 fs/gfs2/glock.c spin_unlock(&gl->gl_lockref.lock); gl 246 fs/gfs2/glock.c GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); gl 247 fs/gfs2/glock.c GLOCK_BUG_ON(gl, mapping && mapping->nrpages); gl 248 fs/gfs2/glock.c trace_gfs2_glock_put(gl); gl 249 fs/gfs2/glock.c sdp->sd_lockstruct.ls_ops->lm_put_lock(gl); gl 255 fs/gfs2/glock.c void gfs2_glock_queue_put(struct gfs2_glock *gl) gl 257 fs/gfs2/glock.c gfs2_glock_queue_work(gl, 0); gl 266 fs/gfs2/glock.c void gfs2_glock_put(struct gfs2_glock *gl) gl 268 fs/gfs2/glock.c if (lockref_put_or_lock(&gl->gl_lockref)) gl 271 fs/gfs2/glock.c __gfs2_glock_put(gl); gl 282 fs/gfs2/glock.c static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh) gl 284 fs/gfs2/glock.c const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list); gl 288 fs/gfs2/glock.c if (gl->gl_state == gh->gh_state) gl 292 fs/gfs2/glock.c if (gl->gl_state == LM_ST_EXCLUSIVE) { gl 298 fs/gfs2/glock.c if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY)) gl 320 fs/gfs2/glock.c static void do_error(struct gfs2_glock *gl, const int ret) gl 324 fs/gfs2/glock.c list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { gl 347 fs/gfs2/glock.c static int do_promote(struct gfs2_glock *gl) gl 348 fs/gfs2/glock.c __releases(&gl->gl_lockref.lock) gl 349 fs/gfs2/glock.c __acquires(&gl->gl_lockref.lock) gl 351 fs/gfs2/glock.c const struct gfs2_glock_operations *glops = gl->gl_ops; gl 356 fs/gfs2/glock.c list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { gl 359 fs/gfs2/glock.c if (may_grant(gl, gh)) { gl 360 fs/gfs2/glock.c if (gh->gh_list.prev == &gl->gl_holders && gl 362 fs/gfs2/glock.c spin_unlock(&gl->gl_lockref.lock); gl 365 fs/gfs2/glock.c spin_lock(&gl->gl_lockref.lock); gl 385 fs/gfs2/glock.c if (gh->gh_list.prev == &gl->gl_holders) gl 387 fs/gfs2/glock.c do_error(gl, 0); gl 398 fs/gfs2/glock.c static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl) gl 402 fs/gfs2/glock.c list_for_each_entry(gh, &gl->gl_holders, gh_list) { gl 416 fs/gfs2/glock.c static void state_change(struct gfs2_glock *gl, unsigned int new_state) gl 420 fs/gfs2/glock.c held1 = (gl->gl_state != LM_ST_UNLOCKED); gl 424 fs/gfs2/glock.c GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); gl 426 fs/gfs2/glock.c gl->gl_lockref.count++; gl 428 fs/gfs2/glock.c gl->gl_lockref.count--; gl 430 fs/gfs2/glock.c if (held1 && held2 && list_empty(&gl->gl_holders)) gl 431 fs/gfs2/glock.c clear_bit(GLF_QUEUED, &gl->gl_flags); gl 433 fs/gfs2/glock.c if (new_state != gl->gl_target) gl 435 fs/gfs2/glock.c gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR, gl 437 fs/gfs2/glock.c gl->gl_state = new_state; gl 438 fs/gfs2/glock.c gl->gl_tchange = jiffies; gl 441 fs/gfs2/glock.c static void gfs2_demote_wake(struct gfs2_glock *gl) gl 443 fs/gfs2/glock.c gl->gl_demote_state = LM_ST_EXCLUSIVE; gl 444 fs/gfs2/glock.c clear_bit(GLF_DEMOTE, &gl->gl_flags); gl 446 fs/gfs2/glock.c wake_up_bit(&gl->gl_flags, GLF_DEMOTE); gl 456 fs/gfs2/glock.c static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) gl 458 fs/gfs2/glock.c const struct gfs2_glock_operations *glops = gl->gl_ops; gl 463 fs/gfs2/glock.c spin_lock(&gl->gl_lockref.lock); gl 464 fs/gfs2/glock.c trace_gfs2_glock_state_change(gl, state); gl 465 fs/gfs2/glock.c state_change(gl, state); gl 466 fs/gfs2/glock.c gh = find_first_waiter(gl); gl 469 fs/gfs2/glock.c if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) && gl 470 fs/gfs2/glock.c state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED) gl 471 fs/gfs2/glock.c gl->gl_target = LM_ST_UNLOCKED; gl 474 fs/gfs2/glock.c if (unlikely(state != gl->gl_target)) { gl 475 fs/gfs2/glock.c if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) { gl 479 fs/gfs2/glock.c list_move_tail(&gh->gh_list, &gl->gl_holders); gl 480 fs/gfs2/glock.c gh = find_first_waiter(gl); gl 481 fs/gfs2/glock.c gl->gl_target = gh->gh_state; gl 487 fs/gfs2/glock.c gl->gl_target = gl->gl_state; gl 488 fs/gfs2/glock.c do_error(gl, ret); gl 496 fs/gfs2/glock.c do_xmote(gl, gh, gl->gl_target); gl 501 fs/gfs2/glock.c do_xmote(gl, gh, LM_ST_UNLOCKED); gl 504 fs/gfs2/glock.c fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n", gl 505 fs/gfs2/glock.c gl->gl_target, state); gl 506 fs/gfs2/glock.c GLOCK_BUG_ON(gl, 1); gl 508 fs/gfs2/glock.c spin_unlock(&gl->gl_lockref.lock); gl 513 fs/gfs2/glock.c if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) gl 514 fs/gfs2/glock.c gfs2_demote_wake(gl); gl 517 fs/gfs2/glock.c spin_unlock(&gl->gl_lockref.lock); gl 518 fs/gfs2/glock.c rv = glops->go_xmote_bh(gl, gh); gl 519 fs/gfs2/glock.c spin_lock(&gl->gl_lockref.lock); gl 521 fs/gfs2/glock.c do_error(gl, rv); gl 525 fs/gfs2/glock.c rv = do_promote(gl); gl 530 fs/gfs2/glock.c clear_bit(GLF_LOCK, &gl->gl_flags); gl 532 fs/gfs2/glock.c spin_unlock(&gl->gl_lockref.lock); gl 543 fs/gfs2/glock.c static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target) gl 544 fs/gfs2/glock.c __releases(&gl->gl_lockref.lock) gl 545 fs/gfs2/glock.c __acquires(&gl->gl_lockref.lock) gl 547 fs/gfs2/glock.c const struct gfs2_glock_operations *glops = gl->gl_ops; gl 548 fs/gfs2/glock.c struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; gl 557 fs/gfs2/glock.c GLOCK_BUG_ON(gl, gl->gl_state == target); gl 558 fs/gfs2/glock.c GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target); gl 561 fs/gfs2/glock.c set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); gl 562 fs/gfs2/glock.c do_error(gl, 0); /* Fail queued try locks */ gl 564 fs/gfs2/glock.c gl->gl_req = target; gl 565 fs/gfs2/glock.c set_bit(GLF_BLOCKING, &gl->gl_flags); gl 566 fs/gfs2/glock.c if ((gl->gl_req == LM_ST_UNLOCKED) || gl 567 fs/gfs2/glock.c (gl->gl_state == LM_ST_EXCLUSIVE) || gl 569 fs/gfs2/glock.c clear_bit(GLF_BLOCKING, &gl->gl_flags); gl 570 fs/gfs2/glock.c spin_unlock(&gl->gl_lockref.lock); gl 572 fs/gfs2/glock.c glops->go_sync(gl); gl 573 fs/gfs2/glock.c if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) gl 574 fs/gfs2/glock.c glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA); gl 575 fs/gfs2/glock.c clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); gl 577 fs/gfs2/glock.c gfs2_glock_hold(gl); gl 580 fs/gfs2/glock.c ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags); gl 581 fs/gfs2/glock.c if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED && gl 584 fs/gfs2/glock.c finish_xmote(gl, target); gl 585 fs/gfs2/glock.c gfs2_glock_queue_work(gl, 0); gl 589 fs/gfs2/glock.c GLOCK_BUG_ON(gl, !test_bit(SDF_WITHDRAWN, gl 593 fs/gfs2/glock.c finish_xmote(gl, target); gl 594 fs/gfs2/glock.c gfs2_glock_queue_work(gl, 0); gl 597 fs/gfs2/glock.c spin_lock(&gl->gl_lockref.lock); gl 605 fs/gfs2/glock.c static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl) gl 609 fs/gfs2/glock.c if (!list_empty(&gl->gl_holders)) { gl 610 fs/gfs2/glock.c gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); gl 624 fs/gfs2/glock.c static void run_queue(struct gfs2_glock *gl, const int nonblock) gl 625 fs/gfs2/glock.c __releases(&gl->gl_lockref.lock) gl 626 fs/gfs2/glock.c __acquires(&gl->gl_lockref.lock) gl 631 fs/gfs2/glock.c if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) gl 634 fs/gfs2/glock.c GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); gl 636 fs/gfs2/glock.c if (test_bit(GLF_DEMOTE, &gl->gl_flags) && gl 637 fs/gfs2/glock.c gl->gl_demote_state != gl->gl_state) { gl 638 fs/gfs2/glock.c if (find_first_holder(gl)) gl 642 fs/gfs2/glock.c set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); gl 643 fs/gfs2/glock.c GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); gl 644 fs/gfs2/glock.c gl->gl_target = gl->gl_demote_state; gl 646 fs/gfs2/glock.c if (test_bit(GLF_DEMOTE, &gl->gl_flags)) gl 647 fs/gfs2/glock.c gfs2_demote_wake(gl); gl 648 fs/gfs2/glock.c ret = do_promote(gl); gl 653 fs/gfs2/glock.c gh = find_first_waiter(gl); gl 654 fs/gfs2/glock.c gl->gl_target = gh->gh_state; gl 656 fs/gfs2/glock.c do_error(gl, 0); /* Fail queued try locks */ gl 658 fs/gfs2/glock.c do_xmote(gl, gh, gl->gl_target); gl 663 fs/gfs2/glock.c clear_bit(GLF_LOCK, &gl->gl_flags); gl 665 fs/gfs2/glock.c gl->gl_lockref.count++; gl 666 fs/gfs2/glock.c __gfs2_glock_queue_work(gl, 0); gl 670 fs/gfs2/glock.c clear_bit(GLF_LOCK, &gl->gl_flags); gl 677 fs/gfs2/glock.c struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete); gl 678 fs/gfs2/glock.c struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; gl 680 fs/gfs2/glock.c u64 no_addr = gl->gl_name.ln_number; gl 685 fs/gfs2/glock.c if (test_bit(GLF_INODE_CREATING, &gl->gl_flags)) gl 694 fs/gfs2/glock.c gfs2_glock_put(gl); gl 700 fs/gfs2/glock.c struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); gl 703 fs/gfs2/glock.c if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { gl 704 fs/gfs2/glock.c finish_xmote(gl, gl->gl_reply); gl 707 fs/gfs2/glock.c spin_lock(&gl->gl_lockref.lock); gl 708 fs/gfs2/glock.c if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && gl 709 fs/gfs2/glock.c gl->gl_state != LM_ST_UNLOCKED && gl 710 fs/gfs2/glock.c gl->gl_demote_state != LM_ST_EXCLUSIVE) { gl 713 fs/gfs2/glock.c holdtime = gl->gl_tchange + gl->gl_hold_time; gl 718 fs/gfs2/glock.c clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); gl 719 fs/gfs2/glock.c set_bit(GLF_DEMOTE, &gl->gl_flags); gl 722 fs/gfs2/glock.c run_queue(gl, 0); gl 726 fs/gfs2/glock.c if (gl->gl_name.ln_type != LM_TYPE_INODE) gl 728 fs/gfs2/glock.c __gfs2_glock_queue_work(gl, delay); gl 736 fs/gfs2/glock.c gl->gl_lockref.count -= drop_refs; gl 737 fs/gfs2/glock.c if (!gl->gl_lockref.count) { gl 738 fs/gfs2/glock.c __gfs2_glock_put(gl); gl 741 fs/gfs2/glock.c spin_unlock(&gl->gl_lockref.lock); gl 749 fs/gfs2/glock.c struct gfs2_glock *gl; gl 759 fs/gfs2/glock.c gl = rhashtable_lookup_get_insert_fast(&gl_hash_table, gl 761 fs/gfs2/glock.c if (IS_ERR(gl)) gl 764 fs/gfs2/glock.c gl = rhashtable_lookup_fast(&gl_hash_table, gl 767 fs/gfs2/glock.c if (gl && !lockref_get_not_dead(&gl->gl_lockref)) { gl 775 fs/gfs2/glock.c return gl; gl 799 fs/gfs2/glock.c struct gfs2_glock *gl, *tmp; gl 804 fs/gfs2/glock.c gl = find_insert_glock(&name, NULL); gl 805 fs/gfs2/glock.c if (gl) { gl 806 fs/gfs2/glock.c *glp = gl; gl 816 fs/gfs2/glock.c gl = kmem_cache_alloc(cachep, GFP_NOFS); gl 817 fs/gfs2/glock.c if (!gl) gl 820 fs/gfs2/glock.c memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); gl 823 fs/gfs2/glock.c gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS); gl 824 fs/gfs2/glock.c if (!gl->gl_lksb.sb_lvbptr) { gl 825 fs/gfs2/glock.c kmem_cache_free(cachep, gl); gl 831 fs/gfs2/glock.c gl->gl_node.next = NULL; gl 832 fs/gfs2/glock.c gl->gl_flags = 0; gl 833 fs/gfs2/glock.c gl->gl_name = name; gl 834 fs/gfs2/glock.c gl->gl_lockref.count = 1; gl 835 fs/gfs2/glock.c gl->gl_state = LM_ST_UNLOCKED; gl 836 fs/gfs2/glock.c gl->gl_target = LM_ST_UNLOCKED; gl 837 fs/gfs2/glock.c gl->gl_demote_state = LM_ST_EXCLUSIVE; gl 838 fs/gfs2/glock.c gl->gl_ops = glops; gl 839 fs/gfs2/glock.c gl->gl_dstamp = 0; gl 842 fs/gfs2/glock.c gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type]; gl 844 fs/gfs2/glock.c gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0; gl 845 fs/gfs2/glock.c gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0; gl 846 fs/gfs2/glock.c gl->gl_tchange = jiffies; gl 847 fs/gfs2/glock.c gl->gl_object = NULL; gl 848 fs/gfs2/glock.c gl->gl_hold_time = GL_GLOCK_DFT_HOLD; gl 849 fs/gfs2/glock.c INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); gl 850 fs/gfs2/glock.c INIT_WORK(&gl->gl_delete, delete_work_func); gl 852 fs/gfs2/glock.c mapping = gfs2_glock2aspace(gl); gl 862 fs/gfs2/glock.c tmp = find_insert_glock(&name, gl); gl 864 fs/gfs2/glock.c *glp = gl; gl 874 fs/gfs2/glock.c kfree(gl->gl_lksb.sb_lvbptr); gl 875 fs/gfs2/glock.c kmem_cache_free(cachep, gl); gl 891 fs/gfs2/glock.c void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags, gl 895 fs/gfs2/glock.c gh->gh_gl = gl; gl 902 fs/gfs2/glock.c gfs2_glock_hold(gl); gl 939 fs/gfs2/glock.c static void gfs2_glock_update_hold_time(struct gfs2_glock *gl, gl 945 fs/gfs2/glock.c gl->gl_hold_time = min(gl->gl_hold_time + GL_GLOCK_HOLD_INCR, gl 1061 fs/gfs2/glock.c static void handle_callback(struct gfs2_glock *gl, unsigned int state, gl 1066 fs/gfs2/glock.c set_bit(bit, &gl->gl_flags); gl 1067 fs/gfs2/glock.c if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { gl 1068 fs/gfs2/glock.c gl->gl_demote_state = state; gl 1069 fs/gfs2/glock.c gl->gl_demote_time = jiffies; gl 1070 fs/gfs2/glock.c } else if (gl->gl_demote_state != LM_ST_UNLOCKED && gl 1071 fs/gfs2/glock.c gl->gl_demote_state != state) { gl 1072 fs/gfs2/glock.c gl->gl_demote_state = LM_ST_UNLOCKED; gl 1074 fs/gfs2/glock.c if (gl->gl_ops->go_callback) gl 1075 fs/gfs2/glock.c gl->gl_ops->go_callback(gl, remote); gl 1076 fs/gfs2/glock.c trace_gfs2_demote_rq(gl, remote); gl 1109 fs/gfs2/glock.c __releases(&gl->gl_lockref.lock) gl 1110 fs/gfs2/glock.c __acquires(&gl->gl_lockref.lock) gl 1112 fs/gfs2/glock.c struct gfs2_glock *gl = gh->gh_gl; gl 1113 fs/gfs2/glock.c struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; gl 1118 fs/gfs2/glock.c GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL); gl 1120 fs/gfs2/glock.c GLOCK_BUG_ON(gl, true); gl 1123 fs/gfs2/glock.c if (test_bit(GLF_LOCK, &gl->gl_flags)) gl 1124 fs/gfs2/glock.c try_futile = !may_grant(gl, gh); gl 1125 fs/gfs2/glock.c if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) gl 1129 fs/gfs2/glock.c list_for_each_entry(gh2, &gl->gl_holders, gh_list) { gl 1145 fs/gfs2/glock.c set_bit(GLF_QUEUED, &gl->gl_flags); gl 1147 fs/gfs2/glock.c gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT); gl 1148 fs/gfs2/glock.c gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT); gl 1150 fs/gfs2/glock.c list_add_tail(&gh->gh_list, &gl->gl_holders); gl 1157 fs/gfs2/glock.c gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); gl 1159 fs/gfs2/glock.c spin_unlock(&gl->gl_lockref.lock); gl 1161 fs/gfs2/glock.c sdp->sd_lockstruct.ls_ops->lm_cancel(gl); gl 1162 fs/gfs2/glock.c spin_lock(&gl->gl_lockref.lock); gl 1175 fs/gfs2/glock.c gfs2_dump_glock(NULL, gl, true); gl 1190 fs/gfs2/glock.c struct gfs2_glock *gl = gh->gh_gl; gl 1191 fs/gfs2/glock.c struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; gl 1197 fs/gfs2/glock.c if (test_bit(GLF_LRU, &gl->gl_flags)) gl 1198 fs/gfs2/glock.c gfs2_glock_remove_from_lru(gl); gl 1200 fs/gfs2/glock.c spin_lock(&gl->gl_lockref.lock); gl 1203 fs/gfs2/glock.c test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) { gl 1204 fs/gfs2/glock.c set_bit(GLF_REPLY_PENDING, &gl->gl_flags); gl 1205 fs/gfs2/glock.c gl->gl_lockref.count++; gl 1206 fs/gfs2/glock.c __gfs2_glock_queue_work(gl, 0); gl 1208 fs/gfs2/glock.c run_queue(gl, 1); gl 1209 fs/gfs2/glock.c spin_unlock(&gl->gl_lockref.lock); gl 1237 fs/gfs2/glock.c struct gfs2_glock *gl = gh->gh_gl; gl 1238 fs/gfs2/glock.c const struct gfs2_glock_operations *glops = gl->gl_ops; gl 1242 fs/gfs2/glock.c spin_lock(&gl->gl_lockref.lock); gl 1244 fs/gfs2/glock.c handle_callback(gl, LM_ST_UNLOCKED, 0, false); gl 1248 fs/gfs2/glock.c if (find_first_holder(gl) == NULL) { gl 1250 fs/gfs2/glock.c GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags)); gl 1251 fs/gfs2/glock.c spin_unlock(&gl->gl_lockref.lock); gl 1253 fs/gfs2/glock.c spin_lock(&gl->gl_lockref.lock); gl 1254 fs/gfs2/glock.c clear_bit(GLF_LOCK, &gl->gl_flags); gl 1256 fs/gfs2/glock.c if (list_empty(&gl->gl_holders) && gl 1257 fs/gfs2/glock.c !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && gl 1258 fs/gfs2/glock.c !test_bit(GLF_DEMOTE, &gl->gl_flags)) gl 1261 fs/gfs2/glock.c if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl)) gl 1262 fs/gfs2/glock.c gfs2_glock_add_to_lru(gl); gl 1266 fs/gfs2/glock.c gl->gl_lockref.count++; gl 1267 fs/gfs2/glock.c if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && gl 1268 fs/gfs2/glock.c !test_bit(GLF_DEMOTE, &gl->gl_flags) && gl 1269 fs/gfs2/glock.c gl->gl_name.ln_type == LM_TYPE_INODE) gl 1270 fs/gfs2/glock.c delay = gl->gl_hold_time; gl 1271 fs/gfs2/glock.c __gfs2_glock_queue_work(gl, delay); gl 1273 fs/gfs2/glock.c spin_unlock(&gl->gl_lockref.lock); gl 1278 fs/gfs2/glock.c struct gfs2_glock *gl = gh->gh_gl; gl 1281 fs/gfs2/glock.c wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE); gl 1312 fs/gfs2/glock.c struct gfs2_glock *gl; gl 1315 fs/gfs2/glock.c error = gfs2_glock_get(sdp, number, glops, CREATE, &gl); gl 1317 fs/gfs2/glock.c error = gfs2_glock_nq_init(gl, state, flags, gh); gl 1318 fs/gfs2/glock.c gfs2_glock_put(gl); gl 1432 fs/gfs2/glock.c void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) gl 1438 fs/gfs2/glock.c gfs2_glock_hold(gl); gl 1439 fs/gfs2/glock.c holdtime = gl->gl_tchange + gl->gl_hold_time; gl 1440 fs/gfs2/glock.c if (test_bit(GLF_QUEUED, &gl->gl_flags) && gl 1441 fs/gfs2/glock.c gl->gl_name.ln_type == LM_TYPE_INODE) { gl 1444 fs/gfs2/glock.c if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) gl 1445 fs/gfs2/glock.c delay = gl->gl_hold_time; gl 1448 fs/gfs2/glock.c spin_lock(&gl->gl_lockref.lock); gl 1449 fs/gfs2/glock.c handle_callback(gl, state, delay, true); gl 1450 fs/gfs2/glock.c __gfs2_glock_queue_work(gl, delay); gl 1451 fs/gfs2/glock.c spin_unlock(&gl->gl_lockref.lock); gl 1465 fs/gfs2/glock.c static int gfs2_should_freeze(const struct gfs2_glock *gl) gl 1469 fs/gfs2/glock.c if (gl->gl_reply & ~LM_OUT_ST_MASK) gl 1471 fs/gfs2/glock.c if (gl->gl_target == LM_ST_UNLOCKED) gl 1474 fs/gfs2/glock.c list_for_each_entry(gh, &gl->gl_holders, gh_list) { gl 1493 fs/gfs2/glock.c void gfs2_glock_complete(struct gfs2_glock *gl, int ret) gl 1495 fs/gfs2/glock.c struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; gl 1497 fs/gfs2/glock.c spin_lock(&gl->gl_lockref.lock); gl 1498 fs/gfs2/glock.c gl->gl_reply = ret; gl 1501 fs/gfs2/glock.c if (gfs2_should_freeze(gl)) { gl 1502 fs/gfs2/glock.c set_bit(GLF_FROZEN, &gl->gl_flags); gl 1503 fs/gfs2/glock.c spin_unlock(&gl->gl_lockref.lock); gl 1508 fs/gfs2/glock.c gl->gl_lockref.count++; gl 1509 fs/gfs2/glock.c set_bit(GLF_REPLY_PENDING, &gl->gl_flags); gl 1510 fs/gfs2/glock.c __gfs2_glock_queue_work(gl, 0); gl 1511 fs/gfs2/glock.c spin_unlock(&gl->gl_lockref.lock); gl 1547 fs/gfs2/glock.c struct gfs2_glock *gl; gl 1552 fs/gfs2/glock.c gl = list_entry(list->next, struct gfs2_glock, gl_lru); gl 1553 fs/gfs2/glock.c list_del_init(&gl->gl_lru); gl 1554 fs/gfs2/glock.c if (!spin_trylock(&gl->gl_lockref.lock)) { gl 1556 fs/gfs2/glock.c list_add(&gl->gl_lru, &lru_list); gl 1557 fs/gfs2/glock.c set_bit(GLF_LRU, &gl->gl_flags); gl 1561 fs/gfs2/glock.c if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { gl 1562 fs/gfs2/glock.c spin_unlock(&gl->gl_lockref.lock); gl 1565 fs/gfs2/glock.c gl->gl_lockref.count++; gl 1566 fs/gfs2/glock.c if (demote_ok(gl)) gl 1567 fs/gfs2/glock.c handle_callback(gl, LM_ST_UNLOCKED, 0, false); gl 1568 fs/gfs2/glock.c WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags)); gl 1569 fs/gfs2/glock.c __gfs2_glock_queue_work(gl, 0); gl 1570 fs/gfs2/glock.c spin_unlock(&gl->gl_lockref.lock); gl 1586 fs/gfs2/glock.c struct gfs2_glock *gl; gl 1593 fs/gfs2/glock.c gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); gl 1596 fs/gfs2/glock.c if (!test_bit(GLF_LOCK, &gl->gl_flags)) { gl 1597 fs/gfs2/glock.c list_move(&gl->gl_lru, &dispose); gl 1599 fs/gfs2/glock.c clear_bit(GLF_LRU, &gl->gl_flags); gl 1604 fs/gfs2/glock.c list_move(&gl->gl_lru, &skipped); gl 1647 fs/gfs2/glock.c struct gfs2_glock *gl; gl 1655 fs/gfs2/glock.c while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) gl 1656 fs/gfs2/glock.c if (gl->gl_name.ln_sbd == sdp && gl 1657 fs/gfs2/glock.c lockref_get_not_dead(&gl->gl_lockref)) gl 1658 fs/gfs2/glock.c examiner(gl); gl 1661 fs/gfs2/glock.c } while (cond_resched(), gl == ERR_PTR(-EAGAIN)); gl 1672 fs/gfs2/glock.c static void thaw_glock(struct gfs2_glock *gl) gl 1674 fs/gfs2/glock.c if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) { gl 1675 fs/gfs2/glock.c gfs2_glock_put(gl); gl 1678 fs/gfs2/glock.c set_bit(GLF_REPLY_PENDING, &gl->gl_flags); gl 1679 fs/gfs2/glock.c gfs2_glock_queue_work(gl, 0); gl 1688 fs/gfs2/glock.c static void clear_glock(struct gfs2_glock *gl) gl 1690 fs/gfs2/glock.c gfs2_glock_remove_from_lru(gl); gl 1692 fs/gfs2/glock.c spin_lock(&gl->gl_lockref.lock); gl 1693 fs/gfs2/glock.c if (gl->gl_state != LM_ST_UNLOCKED) gl 1694 fs/gfs2/glock.c handle_callback(gl, LM_ST_UNLOCKED, 0, false); gl 1695 fs/gfs2/glock.c __gfs2_glock_queue_work(gl, 0); gl 1696 fs/gfs2/glock.c spin_unlock(&gl->gl_lockref.lock); gl 1710 fs/gfs2/glock.c static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid) gl 1712 fs/gfs2/glock.c spin_lock(&gl->gl_lockref.lock); gl 1713 fs/gfs2/glock.c gfs2_dump_glock(seq, gl, fsid); gl 1714 fs/gfs2/glock.c spin_unlock(&gl->gl_lockref.lock); gl 1717 fs/gfs2/glock.c static void dump_glock_func(struct gfs2_glock *gl) gl 1719 fs/gfs2/glock.c dump_glock(NULL, gl, true); gl 1744 fs/gfs2/glock.c struct gfs2_glock *gl = ip->i_gl; gl 1748 fs/gfs2/glock.c gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0); gl 1750 fs/gfs2/glock.c spin_lock(&gl->gl_lockref.lock); gl 1751 fs/gfs2/glock.c clear_bit(GLF_LOCK, &gl->gl_flags); gl 1752 fs/gfs2/glock.c run_queue(gl, 1); gl 1753 fs/gfs2/glock.c spin_unlock(&gl->gl_lockref.lock); gl 1827 fs/gfs2/glock.c static const char *gflags2str(char *buf, const struct gfs2_glock *gl) gl 1829 fs/gfs2/glock.c const unsigned long *gflags = &gl->gl_flags; gl 1856 fs/gfs2/glock.c if (gl->gl_object) gl 1882 fs/gfs2/glock.c void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid) gl 1884 fs/gfs2/glock.c const struct gfs2_glock_operations *glops = gl->gl_ops; gl 1888 fs/gfs2/glock.c struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; gl 1894 fs/gfs2/glock.c dtime = jiffies - gl->gl_demote_time; gl 1896 fs/gfs2/glock.c if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) gl 1899 fs/gfs2/glock.c "v:%d r:%d m:%ld\n", fs_id_buf, state2str(gl->gl_state), gl 1900 fs/gfs2/glock.c gl->gl_name.ln_type, gl 1901 fs/gfs2/glock.c (unsigned long long)gl->gl_name.ln_number, gl 1902 fs/gfs2/glock.c gflags2str(gflags_buf, gl), gl 1903 fs/gfs2/glock.c state2str(gl->gl_target), gl 1904 fs/gfs2/glock.c state2str(gl->gl_demote_state), dtime, gl 1905 fs/gfs2/glock.c atomic_read(&gl->gl_ail_count), gl 1906 fs/gfs2/glock.c atomic_read(&gl->gl_revokes), gl 1907 fs/gfs2/glock.c (int)gl->gl_lockref.count, gl->gl_hold_time); gl 1909 fs/gfs2/glock.c list_for_each_entry(gh, &gl->gl_holders, gh_list) gl 1912 fs/gfs2/glock.c if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump) gl 1913 fs/gfs2/glock.c glops->go_dump(seq, gl, fs_id_buf); gl 1918 fs/gfs2/glock.c struct gfs2_glock *gl = iter_ptr; gl 1921 fs/gfs2/glock.c gl->gl_name.ln_type, gl 1922 fs/gfs2/glock.c (unsigned long long)gl->gl_name.ln_number, gl 1923 fs/gfs2/glock.c (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT], gl 1924 fs/gfs2/glock.c (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR], gl 1925 fs/gfs2/glock.c (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB], gl 1926 fs/gfs2/glock.c (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB], gl 1927 fs/gfs2/glock.c (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT], gl 1928 fs/gfs2/glock.c (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR], gl 1929 fs/gfs2/glock.c (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT], gl 1930 fs/gfs2/glock.c (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]); gl 2035 fs/gfs2/glock.c struct gfs2_glock *gl = gi->gl; gl 2037 fs/gfs2/glock.c if (gl) { gl 2040 fs/gfs2/glock.c if (!lockref_put_not_zero(&gl->gl_lockref)) gl 2041 fs/gfs2/glock.c gfs2_glock_queue_put(gl); gl 2044 fs/gfs2/glock.c gl = rhashtable_walk_next(&gi->hti); gl 2045 fs/gfs2/glock.c if (IS_ERR_OR_NULL(gl)) { gl 2046 fs/gfs2/glock.c if (gl == ERR_PTR(-EAGAIN)) { gl 2050 fs/gfs2/glock.c gl = NULL; gl 2053 fs/gfs2/glock.c if (gl->gl_name.ln_sbd != gi->sdp) gl 2056 fs/gfs2/glock.c if (!lockref_get_not_dead(&gl->gl_lockref)) gl 2060 fs/gfs2/glock.c if (__lockref_is_dead(&gl->gl_lockref)) gl 2065 fs/gfs2/glock.c gi->gl = gl; gl 2090 fs/gfs2/glock.c return gi->gl; gl 2101 fs/gfs2/glock.c return gi->gl; gl 2180 fs/gfs2/glock.c gi->gl = NULL; gl 2196 fs/gfs2/glock.c if (gi->gl) gl 2197 fs/gfs2/glock.c gfs2_glock_put(gi->gl); gl 128 fs/gfs2/glock.h void (*lm_put_lock) (struct gfs2_glock *gl); gl 129 fs/gfs2/glock.h int (*lm_lock) (struct gfs2_glock *gl, unsigned int req_state, gl 131 fs/gfs2/glock.h void (*lm_cancel) (struct gfs2_glock *gl); gl 136 fs/gfs2/glock.h static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl) gl 142 fs/gfs2/glock.h spin_lock(&gl->gl_lockref.lock); gl 144 fs/gfs2/glock.h list_for_each_entry(gh, &gl->gl_holders, gh_list) { gl 152 fs/gfs2/glock.h spin_unlock(&gl->gl_lockref.lock); gl 157 fs/gfs2/glock.h static inline int gfs2_glock_is_held_excl(struct gfs2_glock *gl) gl 159 fs/gfs2/glock.h return gl->gl_state == LM_ST_EXCLUSIVE; gl 162 fs/gfs2/glock.h static inline int gfs2_glock_is_held_dfrd(struct gfs2_glock *gl) gl 164 fs/gfs2/glock.h return gl->gl_state == LM_ST_DEFERRED; gl 167 fs/gfs2/glock.h static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl) gl 169 fs/gfs2/glock.h return gl->gl_state == LM_ST_SHARED; gl 172 fs/gfs2/glock.h static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl) gl 174 fs/gfs2/glock.h if (gl->gl_ops->go_flags & GLOF_ASPACE) gl 175 fs/gfs2/glock.h return (struct address_space *)(gl + 1); gl 182 fs/gfs2/glock.h extern void gfs2_glock_hold(struct gfs2_glock *gl); gl 183 fs/gfs2/glock.h extern void gfs2_glock_put(struct gfs2_glock *gl); gl 184 fs/gfs2/glock.h extern void gfs2_glock_queue_put(struct gfs2_glock *gl); gl 185 fs/gfs2/glock.h extern void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, gl 203 fs/gfs2/glock.h extern void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, gl 205 fs/gfs2/glock.h #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { \ gl 206 fs/gfs2/glock.h gfs2_dump_glock(NULL, gl, true); \ gl 221 fs/gfs2/glock.h static inline int gfs2_glock_nq_init(struct gfs2_glock *gl, gl 227 fs/gfs2/glock.h gfs2_holder_init(gl, state, flags, gh); gl 236 fs/gfs2/glock.h extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state); gl 237 fs/gfs2/glock.h extern void gfs2_glock_complete(struct gfs2_glock *gl, int ret); gl 241 fs/gfs2/glock.h extern void gfs2_glock_add_to_lru(struct gfs2_glock *gl); gl 242 fs/gfs2/glock.h extern void gfs2_glock_free(struct gfs2_glock *gl); gl 274 fs/gfs2/glock.h static inline void glock_set_object(struct gfs2_glock *gl, void *object) gl 276 fs/gfs2/glock.h spin_lock(&gl->gl_lockref.lock); gl 277 fs/gfs2/glock.h if (gfs2_assert_warn(gl->gl_name.ln_sbd, gl->gl_object == NULL)) gl 278 fs/gfs2/glock.h gfs2_dump_glock(NULL, gl, true); gl 279 fs/gfs2/glock.h gl->gl_object = object; gl 280 fs/gfs2/glock.h spin_unlock(&gl->gl_lockref.lock); gl 301 fs/gfs2/glock.h static inline void glock_clear_object(struct gfs2_glock *gl, void *object) gl 303 fs/gfs2/glock.h spin_lock(&gl->gl_lockref.lock); gl 304 fs/gfs2/glock.h if (gl->gl_object == object) gl 305 fs/gfs2/glock.h gl->gl_object = NULL; gl 306 fs/gfs2/glock.h spin_unlock(&gl->gl_lockref.lock); gl 32 fs/gfs2/glops.c static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) gl 34 fs/gfs2/glops.c fs_err(gl->gl_name.ln_sbd, gl 39 fs/gfs2/glops.c fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n", gl 40 fs/gfs2/glops.c gl->gl_name.ln_type, gl->gl_name.ln_number, gl 41 fs/gfs2/glops.c gfs2_glock2aspace(gl)); gl 42 fs/gfs2/glops.c gfs2_lm_withdraw(gl->gl_name.ln_sbd, "AIL error\n"); gl 53 fs/gfs2/glops.c static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, gl 56 fs/gfs2/glops.c struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; gl 57 fs/gfs2/glops.c struct list_head *head = &gl->gl_ail_list; gl 71 fs/gfs2/glops.c gfs2_ail_error(gl, bh); gl 76 fs/gfs2/glops.c GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count)); gl 82 fs/gfs2/glops.c static void gfs2_ail_empty_gl(struct gfs2_glock *gl) gl 84 fs/gfs2/glops.c struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; gl 90 fs/gfs2/glops.c tr.tr_revokes = atomic_read(&gl->gl_ail_count); gl 129 fs/gfs2/glops.c __gfs2_ail_flush(gl, 0, tr.tr_revokes); gl 137 fs/gfs2/glops.c void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) gl 139 fs/gfs2/glops.c struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; gl 140 fs/gfs2/glops.c unsigned int revokes = atomic_read(&gl->gl_ail_count); gl 153 fs/gfs2/glops.c __gfs2_ail_flush(gl, fsync, max_revokes); gl 168 fs/gfs2/glops.c static void rgrp_go_sync(struct gfs2_glock *gl) gl 170 fs/gfs2/glops.c struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; gl 175 fs/gfs2/glops.c spin_lock(&gl->gl_lockref.lock); gl 176 fs/gfs2/glops.c rgd = gl->gl_object; gl 179 fs/gfs2/glops.c spin_unlock(&gl->gl_lockref.lock); gl 181 fs/gfs2/glops.c if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) gl 183 fs/gfs2/glops.c GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); gl 185 fs/gfs2/glops.c gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL | gl 187 fs/gfs2/glops.c filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end); gl 188 fs/gfs2/glops.c error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end); gl 190 fs/gfs2/glops.c gfs2_ail_empty_gl(gl); gl 192 fs/gfs2/glops.c spin_lock(&gl->gl_lockref.lock); gl 193 fs/gfs2/glops.c rgd = gl->gl_object; gl 196 fs/gfs2/glops.c spin_unlock(&gl->gl_lockref.lock); gl 209 fs/gfs2/glops.c static void rgrp_go_inval(struct gfs2_glock *gl, int flags) gl 211 fs/gfs2/glops.c struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; gl 213 fs/gfs2/glops.c struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); gl 219 fs/gfs2/glops.c gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count)); gl 220 fs/gfs2/glops.c truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end); gl 226 fs/gfs2/glops.c static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl) gl 230 fs/gfs2/glops.c spin_lock(&gl->gl_lockref.lock); gl 231 fs/gfs2/glops.c ip = gl->gl_object; gl 234 fs/gfs2/glops.c spin_unlock(&gl->gl_lockref.lock); gl 238 fs/gfs2/glops.c struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl) gl 242 fs/gfs2/glops.c spin_lock(&gl->gl_lockref.lock); gl 243 fs/gfs2/glops.c rgd = gl->gl_object; gl 244 fs/gfs2/glops.c spin_unlock(&gl->gl_lockref.lock); gl 264 fs/gfs2/glops.c static void inode_go_sync(struct gfs2_glock *gl) gl 266 fs/gfs2/glops.c struct gfs2_inode *ip = gfs2_glock2inode(gl); gl 268 fs/gfs2/glops.c struct address_space *metamapping = gfs2_glock2aspace(gl); gl 276 fs/gfs2/glops.c if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) gl 279 fs/gfs2/glops.c GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); gl 281 fs/gfs2/glops.c gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL | gl 292 fs/gfs2/glops.c gfs2_ail_empty_gl(gl); gl 298 fs/gfs2/glops.c clear_bit(GLF_DIRTY, &gl->gl_flags); gl 315 fs/gfs2/glops.c static void inode_go_inval(struct gfs2_glock *gl, int flags) gl 317 fs/gfs2/glops.c struct gfs2_inode *ip = gfs2_glock2inode(gl); gl 319 fs/gfs2/glops.c gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count)); gl 322 fs/gfs2/glops.c struct address_space *mapping = gfs2_glock2aspace(gl); gl 332 fs/gfs2/glops.c if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) { gl 333 fs/gfs2/glops.c gfs2_log_flush(gl->gl_name.ln_sbd, NULL, gl 336 fs/gfs2/glops.c gl->gl_name.ln_sbd->sd_rindex_uptodate = 0; gl 351 fs/gfs2/glops.c static int inode_go_demote_ok(const struct gfs2_glock *gl) gl 353 fs/gfs2/glops.c struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; gl 355 fs/gfs2/glops.c if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object) gl 454 fs/gfs2/glops.c struct gfs2_glock *gl = gh->gh_gl; gl 455 fs/gfs2/glops.c struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; gl 456 fs/gfs2/glops.c struct gfs2_inode *ip = gl->gl_object; gl 472 fs/gfs2/glops.c (gl->gl_state == LM_ST_EXCLUSIVE) && gl 493 fs/gfs2/glops.c static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl, gl 496 fs/gfs2/glops.c struct gfs2_inode *ip = gl->gl_object; gl 524 fs/gfs2/glops.c static void freeze_go_sync(struct gfs2_glock *gl) gl 527 fs/gfs2/glops.c struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; gl 529 fs/gfs2/glops.c if (gl->gl_state == LM_ST_SHARED && gl 550 fs/gfs2/glops.c static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh) gl 552 fs/gfs2/glops.c struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; gl 583 fs/gfs2/glops.c static int freeze_go_demote_ok(const struct gfs2_glock *gl) gl 594 fs/gfs2/glops.c static void iopen_go_callback(struct gfs2_glock *gl, bool remote) gl 596 fs/gfs2/glops.c struct gfs2_inode *ip = gl->gl_object; gl 597 fs/gfs2/glops.c struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; gl 602 fs/gfs2/glops.c if (gl->gl_demote_state == LM_ST_UNLOCKED && gl 603 fs/gfs2/glops.c gl->gl_state == LM_ST_SHARED && ip) { gl 604 fs/gfs2/glops.c gl->gl_lockref.count++; gl 605 fs/gfs2/glops.c if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0) gl 606 fs/gfs2/glops.c gl->gl_lockref.count--; gl 25 fs/gfs2/glops.h extern void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync); gl 37 fs/gfs2/incore.h typedef void (*gfs2_glop_bh_t) (struct gfs2_glock *gl, unsigned int ret); gl 237 fs/gfs2/incore.h void (*go_sync) (struct gfs2_glock *gl); gl 238 fs/gfs2/incore.h int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh); gl 239 fs/gfs2/incore.h void (*go_inval) (struct gfs2_glock *gl, int flags); gl 240 fs/gfs2/incore.h int (*go_demote_ok) (const struct gfs2_glock *gl); gl 243 fs/gfs2/incore.h void (*go_dump)(struct seq_file *seq, struct gfs2_glock *gl, gl 245 fs/gfs2/incore.h void (*go_callback)(struct gfs2_glock *gl, bool remote); gl 857 fs/gfs2/incore.h static inline void gfs2_glstats_inc(struct gfs2_glock *gl, int which) gl 859 fs/gfs2/incore.h gl->gl_stats.stats[which]++; gl 862 fs/gfs2/incore.h static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which) gl 864 fs/gfs2/incore.h const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; gl 866 fs/gfs2/incore.h this_cpu_ptr(sdp->sd_lkstats)->lkstats[gl->gl_name.ln_type].stats[which]++; gl 870 fs/gfs2/incore.h extern struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl); gl 841 fs/gfs2/inode.c struct gfs2_glock *gl; gl 852 fs/gfs2/inode.c gl = GFS2_I(inode)->i_gl; gl 853 fs/gfs2/inode.c error = gfs2_glock_nq_init(gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); gl 70 fs/gfs2/lock_dlm.c static inline void gfs2_update_reply_times(struct gfs2_glock *gl) gl 73 fs/gfs2/lock_dlm.c const unsigned gltype = gl->gl_name.ln_type; gl 74 fs/gfs2/lock_dlm.c unsigned index = test_bit(GLF_BLOCKING, &gl->gl_flags) ? gl 79 fs/gfs2/lock_dlm.c rtt = ktime_to_ns(ktime_sub(ktime_get_real(), gl->gl_dstamp)); gl 80 fs/gfs2/lock_dlm.c lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats); gl 81 fs/gfs2/lock_dlm.c gfs2_update_stats(&gl->gl_stats, index, rtt); /* Local */ gl 85 fs/gfs2/lock_dlm.c trace_gfs2_glock_lock_time(gl, rtt); gl 97 fs/gfs2/lock_dlm.c static inline void gfs2_update_request_times(struct gfs2_glock *gl) gl 100 fs/gfs2/lock_dlm.c const unsigned gltype = gl->gl_name.ln_type; gl 105 fs/gfs2/lock_dlm.c dstamp = gl->gl_dstamp; gl 106 fs/gfs2/lock_dlm.c gl->gl_dstamp = ktime_get_real(); gl 107 fs/gfs2/lock_dlm.c irt = ktime_to_ns(ktime_sub(gl->gl_dstamp, dstamp)); gl 108 fs/gfs2/lock_dlm.c lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats); gl 109 fs/gfs2/lock_dlm.c gfs2_update_stats(&gl->gl_stats, GFS2_LKS_SIRT, irt); /* Local */ gl 116 fs/gfs2/lock_dlm.c struct gfs2_glock *gl = arg; gl 117 fs/gfs2/lock_dlm.c unsigned ret = gl->gl_state; gl 119 fs/gfs2/lock_dlm.c gfs2_update_reply_times(gl); gl 120 fs/gfs2/lock_dlm.c BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED); gl 122 fs/gfs2/lock_dlm.c if ((gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID) && gl->gl_lksb.sb_lvbptr) gl 123 fs/gfs2/lock_dlm.c memset(gl->gl_lksb.sb_lvbptr, 0, GDLM_LVB_SIZE); gl 125 fs/gfs2/lock_dlm.c switch (gl->gl_lksb.sb_status) { gl 127 fs/gfs2/lock_dlm.c gfs2_glock_free(gl); gl 144 fs/gfs2/lock_dlm.c ret = gl->gl_req; gl 145 fs/gfs2/lock_dlm.c if (gl->gl_lksb.sb_flags & DLM_SBF_ALTMODE) { gl 146 fs/gfs2/lock_dlm.c if (gl->gl_req == LM_ST_SHARED) gl 148 fs/gfs2/lock_dlm.c else if (gl->gl_req == LM_ST_DEFERRED) gl 154 fs/gfs2/lock_dlm.c set_bit(GLF_INITIAL, &gl->gl_flags); gl 155 fs/gfs2/lock_dlm.c gfs2_glock_complete(gl, ret); gl 158 fs/gfs2/lock_dlm.c if (!test_bit(GLF_INITIAL, &gl->gl_flags)) gl 159 fs/gfs2/lock_dlm.c gl->gl_lksb.sb_lkid = 0; gl 160 fs/gfs2/lock_dlm.c gfs2_glock_complete(gl, ret); gl 165 fs/gfs2/lock_dlm.c struct gfs2_glock *gl = arg; gl 169 fs/gfs2/lock_dlm.c gfs2_glock_cb(gl, LM_ST_UNLOCKED); gl 172 fs/gfs2/lock_dlm.c gfs2_glock_cb(gl, LM_ST_DEFERRED); gl 175 fs/gfs2/lock_dlm.c gfs2_glock_cb(gl, LM_ST_SHARED); gl 178 fs/gfs2/lock_dlm.c fs_err(gl->gl_name.ln_sbd, "unknown bast mode %d\n", mode); gl 202 fs/gfs2/lock_dlm.c static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags, gl 207 fs/gfs2/lock_dlm.c if (gl->gl_lksb.sb_lvbptr) gl 232 fs/gfs2/lock_dlm.c if (gl->gl_lksb.sb_lkid != 0) { gl 234 fs/gfs2/lock_dlm.c if (test_bit(GLF_BLOCKING, &gl->gl_flags)) gl 250 fs/gfs2/lock_dlm.c static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state, gl 253 fs/gfs2/lock_dlm.c struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; gl 258 fs/gfs2/lock_dlm.c req = make_mode(gl->gl_name.ln_sbd, req_state); gl 259 fs/gfs2/lock_dlm.c lkf = make_flags(gl, flags, req); gl 260 fs/gfs2/lock_dlm.c gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT); gl 261 fs/gfs2/lock_dlm.c gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT); gl 262 fs/gfs2/lock_dlm.c if (gl->gl_lksb.sb_lkid) { gl 263 fs/gfs2/lock_dlm.c gfs2_update_request_times(gl); gl 267 fs/gfs2/lock_dlm.c gfs2_reverse_hex(strname + 7, gl->gl_name.ln_type); gl 268 fs/gfs2/lock_dlm.c gfs2_reverse_hex(strname + 23, gl->gl_name.ln_number); gl 269 fs/gfs2/lock_dlm.c gl->gl_dstamp = ktime_get_real(); gl 275 fs/gfs2/lock_dlm.c return dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname, gl 276 fs/gfs2/lock_dlm.c GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast); gl 279 fs/gfs2/lock_dlm.c static void gdlm_put_lock(struct gfs2_glock *gl) gl 281 fs/gfs2/lock_dlm.c struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; gl 286 fs/gfs2/lock_dlm.c if (gl->gl_lksb.sb_lkid == 0) { gl 287 fs/gfs2/lock_dlm.c gfs2_glock_free(gl); gl 291 fs/gfs2/lock_dlm.c clear_bit(GLF_BLOCKING, &gl->gl_flags); gl 292 fs/gfs2/lock_dlm.c gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT); gl 293 fs/gfs2/lock_dlm.c gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT); gl 294 fs/gfs2/lock_dlm.c gfs2_update_request_times(gl); gl 298 fs/gfs2/lock_dlm.c if (gl->gl_lksb.sb_lvbptr && (gl->gl_state == LM_ST_EXCLUSIVE)) gl 303 fs/gfs2/lock_dlm.c gfs2_glock_free(gl); gl 307 fs/gfs2/lock_dlm.c error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK, gl 308 fs/gfs2/lock_dlm.c NULL, gl); gl 311 fs/gfs2/lock_dlm.c gl->gl_name.ln_type, gl 312 fs/gfs2/lock_dlm.c (unsigned long long)gl->gl_name.ln_number, error); gl 317 fs/gfs2/lock_dlm.c static void gdlm_cancel(struct gfs2_glock *gl) gl 319 fs/gfs2/lock_dlm.c struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; gl 320 fs/gfs2/lock_dlm.c dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl); gl 97 fs/gfs2/log.c struct gfs2_glock *gl = NULL; gl 120 fs/gfs2/log.c if (gl == bd->bd_gl) gl 122 fs/gfs2/log.c gl = bd->bd_gl; gl 599 fs/gfs2/log.c struct gfs2_glock *gl = bd->bd_gl; gl 602 fs/gfs2/log.c if (atomic_inc_return(&gl->gl_revokes) == 1) gl 603 fs/gfs2/log.c gfs2_glock_hold(gl); gl 608 fs/gfs2/log.c set_bit(GLF_LFLUSH, &gl->gl_flags); gl 612 fs/gfs2/log.c void gfs2_glock_remove_revoke(struct gfs2_glock *gl) gl 614 fs/gfs2/log.c if (atomic_dec_return(&gl->gl_revokes) == 0) { gl 615 fs/gfs2/log.c clear_bit(GLF_LFLUSH, &gl->gl_flags); gl 616 fs/gfs2/log.c gfs2_glock_queue_put(gl); gl 792 fs/gfs2/log.c void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags) gl 800 fs/gfs2/log.c if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) { gl 72 fs/gfs2/log.h extern void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, gl 81 fs/gfs2/log.h extern void gfs2_glock_remove_revoke(struct gfs2_glock *gl); gl 72 fs/gfs2/lops.c struct gfs2_glock *gl = bd->bd_gl; gl 73 fs/gfs2/lops.c struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; gl 74 fs/gfs2/lops.c struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); gl 75 fs/gfs2/lops.c unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number; gl 118 fs/gfs2/lops.c struct gfs2_glock *gl = bd->bd_gl; gl 119 fs/gfs2/lops.c list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list); gl 120 fs/gfs2/lops.c atomic_inc(&gl->gl_ail_count); gl 754 fs/gfs2/lops.c struct gfs2_glock *gl = ip->i_gl; gl 777 fs/gfs2/lops.c bh_ip = gfs2_meta_new(gl, blkno); gl 821 fs/gfs2/lops.c static void gfs2_meta_sync(struct gfs2_glock *gl) gl 823 fs/gfs2/lops.c struct address_space *mapping = gfs2_glock2aspace(gl); gl 824 fs/gfs2/lops.c struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; gl 834 fs/gfs2/lops.c gfs2_io_error(gl->gl_name.ln_sbd); gl 899 fs/gfs2/lops.c struct gfs2_glock *gl; gl 904 fs/gfs2/lops.c gl = bd->bd_gl; gl 905 fs/gfs2/lops.c gfs2_glock_remove_revoke(gl); gl 1007 fs/gfs2/lops.c struct gfs2_glock *gl = ip->i_gl; gl 1031 fs/gfs2/lops.c bh_ip = gfs2_meta_new(gl, blkno); gl 52 fs/gfs2/main.c struct gfs2_glock *gl = foo; gl 54 fs/gfs2/main.c spin_lock_init(&gl->gl_lockref.lock); gl 55 fs/gfs2/main.c INIT_LIST_HEAD(&gl->gl_holders); gl 56 fs/gfs2/main.c INIT_LIST_HEAD(&gl->gl_lru); gl 57 fs/gfs2/main.c INIT_LIST_HEAD(&gl->gl_ail_list); gl 58 fs/gfs2/main.c atomic_set(&gl->gl_ail_count, 0); gl 59 fs/gfs2/main.c atomic_set(&gl->gl_revokes, 0); gl 64 fs/gfs2/main.c struct gfs2_glock *gl = foo; gl 65 fs/gfs2/main.c struct address_space *mapping = (struct address_space *)(gl + 1); gl 67 fs/gfs2/main.c gfs2_init_glock_once(gl); gl 110 fs/gfs2/meta_io.c struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create) gl 112 fs/gfs2/meta_io.c struct address_space *mapping = gfs2_glock2aspace(gl); gl 113 fs/gfs2/meta_io.c struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; gl 178 fs/gfs2/meta_io.c struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno) gl 181 fs/gfs2/meta_io.c bh = gfs2_getbuf(gl, blkno, CREATE); gl 247 fs/gfs2/meta_io.c int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, gl 250 fs/gfs2/meta_io.c struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; gl 259 fs/gfs2/meta_io.c *bhp = bh = gfs2_getbuf(gl, blkno, CREATE); gl 272 fs/gfs2/meta_io.c bh = gfs2_getbuf(gl, blkno + 1, CREATE); gl 406 fs/gfs2/meta_io.c struct gfs2_glock *gl = ip->i_gl; gl 415 fs/gfs2/meta_io.c ret = gfs2_meta_read(gl, num, DIO_WAIT, rahead, &bh); gl 434 fs/gfs2/meta_io.c struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen) gl 436 fs/gfs2/meta_io.c struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; gl 448 fs/gfs2/meta_io.c first_bh = gfs2_getbuf(gl, dblock, CREATE); gl 459 fs/gfs2/meta_io.c bh = gfs2_getbuf(gl, dblock, CREATE); gl 51 fs/gfs2/meta_io.h extern struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno); gl 52 fs/gfs2/meta_io.h extern int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, gl 55 fs/gfs2/meta_io.h extern struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, gl 73 fs/gfs2/meta_io.h struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen); gl 36 fs/gfs2/recovery.c struct gfs2_glock *gl = ip->i_gl; gl 50 fs/gfs2/recovery.c *bh = gfs2_meta_ra(gl, dblock, extlen); gl 724 fs/gfs2/rgrp.c struct gfs2_glock *gl; gl 728 fs/gfs2/rgrp.c gl = rgd->rd_gl; gl 732 fs/gfs2/rgrp.c if (gl) { gl 733 fs/gfs2/rgrp.c glock_clear_object(gl, rgd); gl 735 fs/gfs2/rgrp.c gfs2_glock_put(gl); gl 1037 fs/gfs2/rgrp.c struct gfs2_glock *gl = ip->i_gl; gl 1044 fs/gfs2/rgrp.c if (!gfs2_glock_is_locked_by_me(gl)) { gl 1045 fs/gfs2/rgrp.c error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh); gl 1185 fs/gfs2/rgrp.c struct gfs2_glock *gl = rgd->rd_gl; gl 1196 fs/gfs2/rgrp.c error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, 0, &bi->bi_bh); gl 1838 fs/gfs2/rgrp.c struct gfs2_glock *gl; gl 1863 fs/gfs2/rgrp.c error = gfs2_glock_get(sdp, block, &gfs2_iopen_glops, CREATE, &gl); gl 1874 fs/gfs2/rgrp.c ip = gl->gl_object; gl 1876 fs/gfs2/rgrp.c if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0) gl 1877 fs/gfs2/rgrp.c gfs2_glock_put(gl); gl 1919 fs/gfs2/rgrp.c const struct gfs2_glock *gl = rgd->rd_gl; gl 1920 fs/gfs2/rgrp.c const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; gl 1942 fs/gfs2/rgrp.c gl->gl_stats.stats[GFS2_LKS_SRTTVARB]; gl 1945 fs/gfs2/rgrp.c l_srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB]; gl 1946 fs/gfs2/rgrp.c l_dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT]; gl 2012 fs/gfs2/rgrp.c struct gfs2_glock *gl = rgd->rd_gl; gl 2014 fs/gfs2/rgrp.c if (gl->gl_state != LM_ST_UNLOCKED && list_empty(&gl->gl_holders) && gl 2015 fs/gfs2/rgrp.c !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) && gl 2016 fs/gfs2/rgrp.c !test_bit(GLF_DEMOTE, &gl->gl_flags)) gl 2255 fs/gfs2/rgrp.c void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_glock *gl, gl 2258 fs/gfs2/rgrp.c struct gfs2_rgrpd *rgd = gl->gl_object; gl 72 fs/gfs2/rgrp.h extern void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_glock *gl, gl 999 fs/gfs2/super.c struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; gl 1000 fs/gfs2/super.c if (test_bit(GLF_DEMOTE, &gl->gl_flags)) gl 1012 fs/gfs2/super.c struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; gl 1014 fs/gfs2/super.c gfs2_glock_hold(gl); gl 1015 fs/gfs2/super.c if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0) gl 1016 fs/gfs2/super.c gfs2_glock_queue_put(gl); gl 1143 fs/gfs2/super.c struct gfs2_glock *gl = ip->i_gl; gl 1148 fs/gfs2/super.c if (atomic_read(&gl->gl_revokes) == 0) { gl 1149 fs/gfs2/super.c clear_bit(GLF_LFLUSH, &gl->gl_flags); gl 1150 fs/gfs2/super.c clear_bit(GLF_DIRTY, &gl->gl_flags); gl 1211 fs/gfs2/super.c static void gfs2_glock_put_eventually(struct gfs2_glock *gl) gl 1214 fs/gfs2/super.c gfs2_glock_queue_put(gl); gl 1216 fs/gfs2/super.c gfs2_glock_put(gl); gl 1380 fs/gfs2/super.c struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; gl 1382 fs/gfs2/super.c glock_clear_object(gl, ip); gl 1384 fs/gfs2/super.c gfs2_glock_hold(gl); gl 1386 fs/gfs2/super.c gfs2_glock_put_eventually(gl); gl 228 fs/gfs2/sys.c struct gfs2_glock *gl; gl 263 fs/gfs2/sys.c rv = gfs2_glock_get(sdp, glnum, glops, 0, &gl); gl 266 fs/gfs2/sys.c gfs2_glock_cb(gl, glmode); gl 267 fs/gfs2/sys.c gfs2_glock_put(gl); gl 93 fs/gfs2/trace_gfs2.h TP_PROTO(const struct gfs2_glock *gl, unsigned int new_state), gl 95 fs/gfs2/trace_gfs2.h TP_ARGS(gl, new_state), gl 109 fs/gfs2/trace_gfs2.h __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev; gl 110 fs/gfs2/trace_gfs2.h __entry->glnum = gl->gl_name.ln_number; gl 111 fs/gfs2/trace_gfs2.h __entry->gltype = gl->gl_name.ln_type; gl 112 fs/gfs2/trace_gfs2.h __entry->cur_state = glock_trace_state(gl->gl_state); gl 114 fs/gfs2/trace_gfs2.h __entry->tgt_state = glock_trace_state(gl->gl_target); gl 115 fs/gfs2/trace_gfs2.h __entry->dmt_state = glock_trace_state(gl->gl_demote_state); gl 116 fs/gfs2/trace_gfs2.h __entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0); gl 132 fs/gfs2/trace_gfs2.h TP_PROTO(const struct gfs2_glock *gl), gl 134 fs/gfs2/trace_gfs2.h TP_ARGS(gl), gl 145 fs/gfs2/trace_gfs2.h __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev; gl 146 fs/gfs2/trace_gfs2.h __entry->gltype = gl->gl_name.ln_type; gl 147 fs/gfs2/trace_gfs2.h __entry->glnum = gl->gl_name.ln_number; gl 148 fs/gfs2/trace_gfs2.h __entry->cur_state = glock_trace_state(gl->gl_state); gl 149 fs/gfs2/trace_gfs2.h __entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0); gl 164 fs/gfs2/trace_gfs2.h TP_PROTO(const struct gfs2_glock *gl, bool remote), gl 166 fs/gfs2/trace_gfs2.h TP_ARGS(gl, remote), gl 179 fs/gfs2/trace_gfs2.h __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev; gl 180 fs/gfs2/trace_gfs2.h __entry->gltype = gl->gl_name.ln_type; gl 181 fs/gfs2/trace_gfs2.h __entry->glnum = gl->gl_name.ln_number; gl 182 fs/gfs2/trace_gfs2.h __entry->cur_state = glock_trace_state(gl->gl_state); gl 183 fs/gfs2/trace_gfs2.h __entry->dmt_state = glock_trace_state(gl->gl_demote_state); gl 184 fs/gfs2/trace_gfs2.h __entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0); gl 261 fs/gfs2/trace_gfs2.h TP_PROTO(const struct gfs2_glock *gl, s64 tdiff), gl 263 fs/gfs2/trace_gfs2.h TP_ARGS(gl, tdiff), gl 283 fs/gfs2/trace_gfs2.h __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev; gl 284 fs/gfs2/trace_gfs2.h __entry->glnum = gl->gl_name.ln_number; gl 285 fs/gfs2/trace_gfs2.h __entry->gltype = gl->gl_name.ln_type; gl 286 fs/gfs2/trace_gfs2.h __entry->status = gl->gl_lksb.sb_status; gl 287 fs/gfs2/trace_gfs2.h __entry->flags = gl->gl_lksb.sb_flags; gl 289 fs/gfs2/trace_gfs2.h __entry->srtt = gl->gl_stats.stats[GFS2_LKS_SRTT]; gl 290 fs/gfs2/trace_gfs2.h __entry->srttvar = gl->gl_stats.stats[GFS2_LKS_SRTTVAR]; gl 291 fs/gfs2/trace_gfs2.h __entry->srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB]; gl 292 fs/gfs2/trace_gfs2.h __entry->srttvarb = gl->gl_stats.stats[GFS2_LKS_SRTTVARB]; gl 293 fs/gfs2/trace_gfs2.h __entry->sirt = gl->gl_stats.stats[GFS2_LKS_SIRT]; gl 294 fs/gfs2/trace_gfs2.h __entry->sirtvar = gl->gl_stats.stats[GFS2_LKS_SIRTVAR]; gl 295 fs/gfs2/trace_gfs2.h __entry->dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT]; gl 296 fs/gfs2/trace_gfs2.h __entry->qcount = gl->gl_stats.stats[GFS2_LKS_QCOUNT]; gl 123 fs/gfs2/trans.c static struct gfs2_bufdata *gfs2_alloc_bufdata(struct gfs2_glock *gl, gl 130 fs/gfs2/trans.c bd->bd_gl = gl; gl 150 fs/gfs2/trans.c void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh) gl 153 fs/gfs2/trans.c struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; gl 167 fs/gfs2/trans.c bd = gfs2_alloc_bufdata(gl, bh); gl 173 fs/gfs2/trans.c gfs2_assert(sdp, bd->bd_gl == gl); gl 187 fs/gfs2/trans.c void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh) gl 190 fs/gfs2/trans.c struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; gl 208 fs/gfs2/trans.c bd = gfs2_alloc_bufdata(gl, bh); gl 215 fs/gfs2/trans.c gfs2_assert(sdp, bd->bd_gl == gl); gl 41 fs/gfs2/trans.h extern void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh); gl 42 fs/gfs2/trans.h extern void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh); gl 1054 tools/perf/util/probe-finder.c static int pubname_search_cb(Dwarf *dbg, Dwarf_Global *gl, void *data) gl 1058 tools/perf/util/probe-finder.c if (dwarf_offdie(dbg, gl->die_offset, param->sp_die)) { gl 1063 tools/perf/util/probe-finder.c if (!dwarf_offdie(dbg, gl->cu_offset, param->cu_die))