Lines Matching refs:ci
310 static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds) in __get_cap_for_mds() argument
313 struct rb_node *n = ci->i_caps.rb_node; in __get_cap_for_mds()
327 struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds) in ceph_get_cap_for_mds() argument
331 spin_lock(&ci->i_ceph_lock); in ceph_get_cap_for_mds()
332 cap = __get_cap_for_mds(ci, mds); in ceph_get_cap_for_mds()
333 spin_unlock(&ci->i_ceph_lock); in ceph_get_cap_for_mds()
340 static int __ceph_get_cap_mds(struct ceph_inode_info *ci) in __ceph_get_cap_mds() argument
347 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { in __ceph_get_cap_mds()
360 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_get_cap_mds() local
362 spin_lock(&ci->i_ceph_lock); in ceph_get_cap_mds()
364 spin_unlock(&ci->i_ceph_lock); in ceph_get_cap_mds()
371 static void __insert_cap_node(struct ceph_inode_info *ci, in __insert_cap_node() argument
374 struct rb_node **p = &ci->i_caps.rb_node; in __insert_cap_node()
390 rb_insert_color(&new->ci_node, &ci->i_caps); in __insert_cap_node()
398 struct ceph_inode_info *ci) in __cap_set_timeouts() argument
402 ci->i_hold_caps_min = round_jiffies(jiffies + in __cap_set_timeouts()
404 ci->i_hold_caps_max = round_jiffies(jiffies + in __cap_set_timeouts()
406 dout("__cap_set_timeouts %p min %lu max %lu\n", &ci->vfs_inode, in __cap_set_timeouts()
407 ci->i_hold_caps_min - jiffies, ci->i_hold_caps_max - jiffies); in __cap_set_timeouts()
419 struct ceph_inode_info *ci) in __cap_delay_requeue() argument
421 __cap_set_timeouts(mdsc, ci); in __cap_delay_requeue()
422 dout("__cap_delay_requeue %p flags %d at %lu\n", &ci->vfs_inode, in __cap_delay_requeue()
423 ci->i_ceph_flags, ci->i_hold_caps_max); in __cap_delay_requeue()
426 if (!list_empty(&ci->i_cap_delay_list)) { in __cap_delay_requeue()
427 if (ci->i_ceph_flags & CEPH_I_FLUSH) in __cap_delay_requeue()
429 list_del_init(&ci->i_cap_delay_list); in __cap_delay_requeue()
431 list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list); in __cap_delay_requeue()
443 struct ceph_inode_info *ci) in __cap_delay_requeue_front() argument
445 dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode); in __cap_delay_requeue_front()
447 ci->i_ceph_flags |= CEPH_I_FLUSH; in __cap_delay_requeue_front()
448 if (!list_empty(&ci->i_cap_delay_list)) in __cap_delay_requeue_front()
449 list_del_init(&ci->i_cap_delay_list); in __cap_delay_requeue_front()
450 list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list); in __cap_delay_requeue_front()
460 struct ceph_inode_info *ci) in __cap_delay_cancel() argument
462 dout("__cap_delay_cancel %p\n", &ci->vfs_inode); in __cap_delay_cancel()
463 if (list_empty(&ci->i_cap_delay_list)) in __cap_delay_cancel()
466 list_del_init(&ci->i_cap_delay_list); in __cap_delay_cancel()
473 static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap, in __check_cap_issue() argument
476 unsigned had = __ceph_caps_issued(ci, NULL); in __check_cap_issue()
484 ci->i_rdcache_gen++; in __check_cap_issue()
494 ci->i_shared_gen++; in __check_cap_issue()
495 if (S_ISDIR(ci->vfs_inode.i_mode)) { in __check_cap_issue()
496 dout(" marking %p NOT complete\n", &ci->vfs_inode); in __check_cap_issue()
497 __ceph_dir_clear_complete(ci); in __check_cap_issue()
518 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_add_cap() local
533 cap = __get_cap_for_mds(ci, mds); in ceph_add_cap()
544 cap->ci = ci; in ceph_add_cap()
545 __insert_cap_node(ci, cap); in ceph_add_cap()
564 WARN_ON(cap != ci->i_auth_cap); in ceph_add_cap()
573 if (!ci->i_snap_realm) { in ceph_add_cap()
581 ci->i_snap_realm = realm; in ceph_add_cap()
582 list_add(&ci->i_snap_realm_item, in ceph_add_cap()
592 __check_cap_issue(ci, cap, issued); in ceph_add_cap()
599 actual_wanted = __ceph_caps_wanted(ci); in ceph_add_cap()
605 __cap_delay_requeue(mdsc, ci); in ceph_add_cap()
609 if (ci->i_auth_cap == NULL || in ceph_add_cap()
610 ceph_seq_cmp(ci->i_auth_cap->mseq, mseq) < 0) { in ceph_add_cap()
611 ci->i_auth_cap = cap; in ceph_add_cap()
615 WARN_ON(ci->i_auth_cap == cap); in ceph_add_cap()
634 __ceph_get_fmode(ci, fmode); in ceph_add_cap()
654 "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode, in __cap_is_valid()
667 int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented) in __ceph_caps_issued() argument
669 int have = ci->i_snap_caps; in __ceph_caps_issued()
675 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { in __ceph_caps_issued()
680 &ci->vfs_inode, cap, ceph_cap_string(cap->issued)); in __ceph_caps_issued()
690 if (ci->i_auth_cap) { in __ceph_caps_issued()
691 cap = ci->i_auth_cap; in __ceph_caps_issued()
700 int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap) in __ceph_caps_issued_other() argument
702 int have = ci->i_snap_caps; in __ceph_caps_issued_other()
706 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { in __ceph_caps_issued_other()
727 dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap, in __touch_cap()
732 &cap->ci->vfs_inode, cap, s->s_mds); in __touch_cap()
742 int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch) in __ceph_caps_issued_mask() argument
746 int have = ci->i_snap_caps; in __ceph_caps_issued_mask()
750 " (mask %s)\n", &ci->vfs_inode, in __ceph_caps_issued_mask()
756 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { in __ceph_caps_issued_mask()
762 " (mask %s)\n", &ci->vfs_inode, cap, in __ceph_caps_issued_mask()
774 " (mask %s)\n", &ci->vfs_inode, in __ceph_caps_issued_mask()
782 for (q = rb_first(&ci->i_caps); q != p; in __ceph_caps_issued_mask()
801 int __ceph_caps_revoking_other(struct ceph_inode_info *ci, in __ceph_caps_revoking_other() argument
807 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { in __ceph_caps_revoking_other()
816 int ceph_caps_revoking(struct ceph_inode_info *ci, int mask) in ceph_caps_revoking() argument
818 struct inode *inode = &ci->vfs_inode; in ceph_caps_revoking()
821 spin_lock(&ci->i_ceph_lock); in ceph_caps_revoking()
822 ret = __ceph_caps_revoking_other(ci, NULL, mask); in ceph_caps_revoking()
823 spin_unlock(&ci->i_ceph_lock); in ceph_caps_revoking()
829 int __ceph_caps_used(struct ceph_inode_info *ci) in __ceph_caps_used() argument
832 if (ci->i_pin_ref) in __ceph_caps_used()
834 if (ci->i_rd_ref) in __ceph_caps_used()
836 if (ci->i_rdcache_ref || in __ceph_caps_used()
837 (!S_ISDIR(ci->vfs_inode.i_mode) && /* ignore readdir cache */ in __ceph_caps_used()
838 ci->vfs_inode.i_data.nrpages)) in __ceph_caps_used()
840 if (ci->i_wr_ref) in __ceph_caps_used()
842 if (ci->i_wb_ref || ci->i_wrbuffer_ref) in __ceph_caps_used()
850 int __ceph_caps_file_wanted(struct ceph_inode_info *ci) in __ceph_caps_file_wanted() argument
855 if (ci->i_nr_by_mode[mode]) in __ceph_caps_file_wanted()
863 int __ceph_caps_mds_wanted(struct ceph_inode_info *ci) in __ceph_caps_mds_wanted() argument
869 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { in __ceph_caps_mds_wanted()
873 if (cap == ci->i_auth_cap) in __ceph_caps_mds_wanted()
884 static int __ceph_is_any_caps(struct ceph_inode_info *ci) in __ceph_is_any_caps() argument
886 return !RB_EMPTY_ROOT(&ci->i_caps); in __ceph_is_any_caps()
891 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_is_any_caps() local
894 spin_lock(&ci->i_ceph_lock); in ceph_is_any_caps()
895 ret = __ceph_is_any_caps(ci); in ceph_is_any_caps()
896 spin_unlock(&ci->i_ceph_lock); in ceph_is_any_caps()
901 static void drop_inode_snap_realm(struct ceph_inode_info *ci) in drop_inode_snap_realm() argument
903 struct ceph_snap_realm *realm = ci->i_snap_realm; in drop_inode_snap_realm()
905 list_del_init(&ci->i_snap_realm_item); in drop_inode_snap_realm()
906 ci->i_snap_realm_counter++; in drop_inode_snap_realm()
907 ci->i_snap_realm = NULL; in drop_inode_snap_realm()
909 ceph_put_snap_realm(ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc, in drop_inode_snap_realm()
922 struct ceph_inode_info *ci = cap->ci; in __ceph_remove_cap() local
924 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; in __ceph_remove_cap()
927 dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode); in __ceph_remove_cap()
942 cap->ci = NULL; in __ceph_remove_cap()
960 cap->cap_ino = ci->i_vino.ino; in __ceph_remove_cap()
965 rb_erase(&cap->ci_node, &ci->i_caps); in __ceph_remove_cap()
966 if (ci->i_auth_cap == cap) in __ceph_remove_cap()
967 ci->i_auth_cap = NULL; in __ceph_remove_cap()
976 if (!__ceph_is_any_caps(ci) && ci->i_wr_ref == 0 && ci->i_snap_realm) in __ceph_remove_cap()
977 drop_inode_snap_realm(ci); in __ceph_remove_cap()
979 if (!__ceph_is_any_real_caps(ci)) in __ceph_remove_cap()
980 __cap_delay_cancel(mdsc, ci); in __ceph_remove_cap()
1080 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_queue_caps_release() local
1083 p = rb_first(&ci->i_caps); in ceph_queue_caps_release()
1111 __releases(cap->ci->i_ceph_lock) in __send_cap()
1113 struct ceph_inode_info *ci = cap->ci; in __send_cap() local
1114 struct inode *inode = &ci->vfs_inode; in __send_cap()
1145 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 && in __send_cap()
1146 time_before(jiffies, ci->i_hold_caps_min)) { in __send_cap()
1156 ci->i_ceph_flags &= ~(CEPH_I_NODELAY | CEPH_I_FLUSH); in __send_cap()
1171 follows = flushing ? ci->i_head_snapc->seq : 0; in __send_cap()
1178 ci->i_reported_size = size; in __send_cap()
1179 max_size = ci->i_wanted_max_size; in __send_cap()
1180 ci->i_requested_max_size = max_size; in __send_cap()
1183 time_warp_seq = ci->i_time_warp_seq; in __send_cap()
1189 __ceph_build_xattrs_blob(ci); in __send_cap()
1190 xattr_blob = ci->i_xattrs.blob; in __send_cap()
1191 xattr_version = ci->i_xattrs.version; in __send_cap()
1194 inline_data = ci->i_inline_version != CEPH_INLINE_NONE; in __send_cap()
1196 spin_unlock(&ci->i_ceph_lock); in __send_cap()
1210 wake_up_all(&ci->i_cap_wq); in __send_cap()
1227 void __ceph_flush_snaps(struct ceph_inode_info *ci, in __ceph_flush_snaps() argument
1230 __releases(ci->i_ceph_lock) in __ceph_flush_snaps()
1231 __acquires(ci->i_ceph_lock) in __ceph_flush_snaps()
1233 struct inode *inode = &ci->vfs_inode; in __ceph_flush_snaps()
1249 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { in __ceph_flush_snaps()
1264 if (ci->i_auth_cap == NULL) { in __ceph_flush_snaps()
1275 mds = ci->i_auth_cap->session->s_mds; in __ceph_flush_snaps()
1276 mseq = ci->i_auth_cap->mseq; in __ceph_flush_snaps()
1288 spin_unlock(&ci->i_ceph_lock); in __ceph_flush_snaps()
1302 spin_lock(&ci->i_ceph_lock); in __ceph_flush_snaps()
1314 spin_unlock(&ci->i_ceph_lock); in __ceph_flush_snaps()
1331 spin_lock(&ci->i_ceph_lock); in __ceph_flush_snaps()
1337 list_del_init(&ci->i_snap_flush_item); in __ceph_flush_snaps()
1349 static void ceph_flush_snaps(struct ceph_inode_info *ci) in ceph_flush_snaps() argument
1351 spin_lock(&ci->i_ceph_lock); in ceph_flush_snaps()
1352 __ceph_flush_snaps(ci, NULL, 0); in ceph_flush_snaps()
1353 spin_unlock(&ci->i_ceph_lock); in ceph_flush_snaps()
1361 int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask, in __ceph_mark_dirty_caps() argument
1365 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; in __ceph_mark_dirty_caps()
1366 struct inode *inode = &ci->vfs_inode; in __ceph_mark_dirty_caps()
1367 int was = ci->i_dirty_caps; in __ceph_mark_dirty_caps()
1370 if (!ci->i_auth_cap) { in __ceph_mark_dirty_caps()
1377 dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode, in __ceph_mark_dirty_caps()
1380 ci->i_dirty_caps |= mask; in __ceph_mark_dirty_caps()
1382 WARN_ON_ONCE(ci->i_prealloc_cap_flush); in __ceph_mark_dirty_caps()
1383 swap(ci->i_prealloc_cap_flush, *pcf); in __ceph_mark_dirty_caps()
1385 if (!ci->i_head_snapc) { in __ceph_mark_dirty_caps()
1387 ci->i_head_snapc = ceph_get_snap_context( in __ceph_mark_dirty_caps()
1388 ci->i_snap_realm->cached_context); in __ceph_mark_dirty_caps()
1391 &ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap); in __ceph_mark_dirty_caps()
1392 BUG_ON(!list_empty(&ci->i_dirty_item)); in __ceph_mark_dirty_caps()
1394 list_add(&ci->i_dirty_item, &mdsc->cap_dirty); in __ceph_mark_dirty_caps()
1396 if (ci->i_flushing_caps == 0) { in __ceph_mark_dirty_caps()
1401 WARN_ON_ONCE(!ci->i_prealloc_cap_flush); in __ceph_mark_dirty_caps()
1403 BUG_ON(list_empty(&ci->i_dirty_item)); in __ceph_mark_dirty_caps()
1404 if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) && in __ceph_mark_dirty_caps()
1407 __cap_delay_requeue(mdsc, ci); in __ceph_mark_dirty_caps()
1411 static void __add_cap_flushing_to_inode(struct ceph_inode_info *ci, in __add_cap_flushing_to_inode() argument
1414 struct rb_node **p = &ci->i_cap_flush_tree.rb_node; in __add_cap_flushing_to_inode()
1431 rb_insert_color(&cf->i_node, &ci->i_cap_flush_tree); in __add_cap_flushing_to_inode()
1490 struct ceph_inode_info *ci = ceph_inode(inode); in __mark_caps_flushing() local
1494 BUG_ON(ci->i_dirty_caps == 0); in __mark_caps_flushing()
1495 BUG_ON(list_empty(&ci->i_dirty_item)); in __mark_caps_flushing()
1496 BUG_ON(!ci->i_prealloc_cap_flush); in __mark_caps_flushing()
1498 flushing = ci->i_dirty_caps; in __mark_caps_flushing()
1501 ceph_cap_string(ci->i_flushing_caps), in __mark_caps_flushing()
1502 ceph_cap_string(ci->i_flushing_caps | flushing)); in __mark_caps_flushing()
1503 ci->i_flushing_caps |= flushing; in __mark_caps_flushing()
1504 ci->i_dirty_caps = 0; in __mark_caps_flushing()
1507 swap(cf, ci->i_prealloc_cap_flush); in __mark_caps_flushing()
1511 list_del_init(&ci->i_dirty_item); in __mark_caps_flushing()
1517 if (list_empty(&ci->i_flushing_item)) { in __mark_caps_flushing()
1518 list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing); in __mark_caps_flushing()
1522 list_move_tail(&ci->i_flushing_item, &session->s_cap_flushing); in __mark_caps_flushing()
1528 __add_cap_flushing_to_inode(ci, cf); in __mark_caps_flushing()
1539 struct ceph_inode_info *ci = ceph_inode(inode); in try_nonblocking_invalidate() local
1540 u32 invalidating_gen = ci->i_rdcache_gen; in try_nonblocking_invalidate()
1542 spin_unlock(&ci->i_ceph_lock); in try_nonblocking_invalidate()
1544 spin_lock(&ci->i_ceph_lock); in try_nonblocking_invalidate()
1547 invalidating_gen == ci->i_rdcache_gen) { in try_nonblocking_invalidate()
1551 ci->i_rdcache_revoking = ci->i_rdcache_gen - 1; in try_nonblocking_invalidate()
1569 void ceph_check_caps(struct ceph_inode_info *ci, int flags, in ceph_check_caps() argument
1572 struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode); in ceph_check_caps()
1574 struct inode *inode = &ci->vfs_inode; in ceph_check_caps()
1592 spin_lock(&ci->i_ceph_lock); in ceph_check_caps()
1594 if (ci->i_ceph_flags & CEPH_I_FLUSH) in ceph_check_caps()
1598 if (!list_empty(&ci->i_cap_snaps)) in ceph_check_caps()
1599 __ceph_flush_snaps(ci, &session, 0); in ceph_check_caps()
1602 spin_lock(&ci->i_ceph_lock); in ceph_check_caps()
1604 file_wanted = __ceph_caps_file_wanted(ci); in ceph_check_caps()
1605 used = __ceph_caps_used(ci); in ceph_check_caps()
1606 issued = __ceph_caps_issued(ci, &implemented); in ceph_check_caps()
1616 __ceph_dir_is_complete(ci)) { in ceph_check_caps()
1633 if (ci->i_max_size == 0) in ceph_check_caps()
1641 ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps), in ceph_check_caps()
1642 ceph_cap_string(ci->i_flushing_caps), in ceph_check_caps()
1656 ci->i_wrbuffer_ref == 0 && /* no dirty pages... */ in ceph_check_caps()
1667 ci->i_rdcache_revoking = ci->i_rdcache_gen; in ceph_check_caps()
1673 __cap_set_timeouts(mdsc, ci); in ceph_check_caps()
1681 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { in ceph_check_caps()
1687 ((flags & CHECK_CAPS_AUTHONLY) && cap != ci->i_auth_cap)) in ceph_check_caps()
1693 if (ci->i_auth_cap && cap != ci->i_auth_cap) in ceph_check_caps()
1694 cap_used &= ~ci->i_auth_cap->issued; in ceph_check_caps()
1703 if (cap == ci->i_auth_cap && in ceph_check_caps()
1706 if (ci->i_wanted_max_size > ci->i_max_size && in ceph_check_caps()
1707 ci->i_wanted_max_size > ci->i_requested_max_size) { in ceph_check_caps()
1713 if ((inode->i_size << 1) >= ci->i_max_size && in ceph_check_caps()
1714 (ci->i_reported_size << 1) < ci->i_max_size) { in ceph_check_caps()
1720 if (cap == ci->i_auth_cap && (flags & CHECK_CAPS_FLUSH) && in ceph_check_caps()
1721 ci->i_dirty_caps) { in ceph_check_caps()
1746 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 && in ceph_check_caps()
1747 time_before(jiffies, ci->i_hold_caps_max)) { in ceph_check_caps()
1758 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) { in ceph_check_caps()
1773 spin_unlock(&ci->i_ceph_lock); in ceph_check_caps()
1787 spin_unlock(&ci->i_ceph_lock); in ceph_check_caps()
1795 if (cap == ci->i_auth_cap && ci->i_dirty_caps) { in ceph_check_caps()
1824 __cap_delay_cancel(mdsc, ci); in ceph_check_caps()
1826 __cap_delay_requeue(mdsc, ci); in ceph_check_caps()
1828 spin_unlock(&ci->i_ceph_lock); in ceph_check_caps()
1845 struct ceph_inode_info *ci = ceph_inode(inode); in try_flush_caps() local
1851 spin_lock(&ci->i_ceph_lock); in try_flush_caps()
1852 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) { in try_flush_caps()
1856 if (ci->i_dirty_caps && ci->i_auth_cap) { in try_flush_caps()
1857 struct ceph_cap *cap = ci->i_auth_cap; in try_flush_caps()
1858 int used = __ceph_caps_used(ci); in try_flush_caps()
1859 int want = __ceph_caps_wanted(ci); in try_flush_caps()
1863 spin_unlock(&ci->i_ceph_lock); in try_flush_caps()
1882 spin_lock(&ci->i_ceph_lock); in try_flush_caps()
1883 __cap_delay_requeue(mdsc, ci); in try_flush_caps()
1884 spin_unlock(&ci->i_ceph_lock); in try_flush_caps()
1887 struct rb_node *n = rb_last(&ci->i_cap_flush_tree); in try_flush_caps()
1893 flushing = ci->i_flushing_caps; in try_flush_caps()
1894 spin_unlock(&ci->i_ceph_lock); in try_flush_caps()
1909 struct ceph_inode_info *ci = ceph_inode(inode); in caps_are_flushed() local
1914 spin_lock(&ci->i_ceph_lock); in caps_are_flushed()
1915 n = rb_first(&ci->i_cap_flush_tree); in caps_are_flushed()
1921 spin_unlock(&ci->i_ceph_lock); in caps_are_flushed()
1933 struct ceph_inode_info *ci = ceph_inode(inode); in sync_write_wait() local
1934 struct list_head *head = &ci->i_unsafe_writes; in sync_write_wait()
1941 spin_lock(&ci->i_unsafe_lock); in sync_write_wait()
1952 spin_unlock(&ci->i_unsafe_lock); in sync_write_wait()
1956 spin_lock(&ci->i_unsafe_lock); in sync_write_wait()
1969 spin_unlock(&ci->i_unsafe_lock); in sync_write_wait()
1977 struct ceph_inode_info *ci = ceph_inode(inode); in unsafe_request_wait() local
1981 spin_lock(&ci->i_unsafe_lock); in unsafe_request_wait()
1982 if (S_ISDIR(inode->i_mode) && !list_empty(&ci->i_unsafe_dirops)) { in unsafe_request_wait()
1983 req1 = list_last_entry(&ci->i_unsafe_dirops, in unsafe_request_wait()
1988 if (!list_empty(&ci->i_unsafe_iops)) { in unsafe_request_wait()
1989 req2 = list_last_entry(&ci->i_unsafe_iops, in unsafe_request_wait()
1994 spin_unlock(&ci->i_unsafe_lock); in unsafe_request_wait()
2018 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_fsync() local
2046 ret = wait_event_interruptible(ci->i_cap_wq, in ceph_fsync()
2063 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_write_inode() local
2073 err = wait_event_interruptible(ci->i_cap_wq, in ceph_write_inode()
2079 spin_lock(&ci->i_ceph_lock); in ceph_write_inode()
2080 if (__ceph_caps_dirty(ci)) in ceph_write_inode()
2081 __cap_delay_requeue_front(mdsc, ci); in ceph_write_inode()
2082 spin_unlock(&ci->i_ceph_lock); in ceph_write_inode()
2101 struct ceph_inode_info *ci = capsnap->ci; in kick_flushing_capsnaps() local
2102 struct inode *inode = &ci->vfs_inode; in kick_flushing_capsnaps()
2105 spin_lock(&ci->i_ceph_lock); in kick_flushing_capsnaps()
2106 cap = ci->i_auth_cap; in kick_flushing_capsnaps()
2110 __ceph_flush_snaps(ci, &session, 1); in kick_flushing_capsnaps()
2115 spin_unlock(&ci->i_ceph_lock); in kick_flushing_capsnaps()
2121 struct ceph_inode_info *ci) in __kick_flushing_caps() argument
2123 struct inode *inode = &ci->vfs_inode; in __kick_flushing_caps()
2136 spin_lock(&ci->i_ceph_lock); in __kick_flushing_caps()
2137 cap = ci->i_auth_cap; in __kick_flushing_caps()
2141 spin_unlock(&ci->i_ceph_lock); in __kick_flushing_caps()
2145 for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) { in __kick_flushing_caps()
2151 spin_unlock(&ci->i_ceph_lock); in __kick_flushing_caps()
2162 __ceph_caps_used(ci), in __kick_flushing_caps()
2163 __ceph_caps_wanted(ci), in __kick_flushing_caps()
2173 struct ceph_inode_info *ci; in ceph_early_kick_flushing_caps() local
2177 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) { in ceph_early_kick_flushing_caps()
2178 spin_lock(&ci->i_ceph_lock); in ceph_early_kick_flushing_caps()
2179 cap = ci->i_auth_cap; in ceph_early_kick_flushing_caps()
2182 &ci->vfs_inode, cap, session->s_mds); in ceph_early_kick_flushing_caps()
2183 spin_unlock(&ci->i_ceph_lock); in ceph_early_kick_flushing_caps()
2194 if ((cap->issued & ci->i_flushing_caps) != in ceph_early_kick_flushing_caps()
2195 ci->i_flushing_caps) { in ceph_early_kick_flushing_caps()
2196 spin_unlock(&ci->i_ceph_lock); in ceph_early_kick_flushing_caps()
2197 if (!__kick_flushing_caps(mdsc, session, ci)) in ceph_early_kick_flushing_caps()
2199 spin_lock(&ci->i_ceph_lock); in ceph_early_kick_flushing_caps()
2202 spin_unlock(&ci->i_ceph_lock); in ceph_early_kick_flushing_caps()
2209 struct ceph_inode_info *ci; in ceph_kick_flushing_caps() local
2214 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) { in ceph_kick_flushing_caps()
2215 int delayed = __kick_flushing_caps(mdsc, session, ci); in ceph_kick_flushing_caps()
2217 spin_lock(&ci->i_ceph_lock); in ceph_kick_flushing_caps()
2218 __cap_delay_requeue(mdsc, ci); in ceph_kick_flushing_caps()
2219 spin_unlock(&ci->i_ceph_lock); in ceph_kick_flushing_caps()
2228 struct ceph_inode_info *ci = ceph_inode(inode); in kick_flushing_inode_caps() local
2231 spin_lock(&ci->i_ceph_lock); in kick_flushing_inode_caps()
2232 cap = ci->i_auth_cap; in kick_flushing_inode_caps()
2234 ceph_cap_string(ci->i_flushing_caps)); in kick_flushing_inode_caps()
2236 __ceph_flush_snaps(ci, &session, 1); in kick_flushing_inode_caps()
2238 if (ci->i_flushing_caps) { in kick_flushing_inode_caps()
2242 list_move_tail(&ci->i_flushing_item, in kick_flushing_inode_caps()
2246 spin_unlock(&ci->i_ceph_lock); in kick_flushing_inode_caps()
2248 delayed = __kick_flushing_caps(mdsc, session, ci); in kick_flushing_inode_caps()
2250 spin_lock(&ci->i_ceph_lock); in kick_flushing_inode_caps()
2251 __cap_delay_requeue(mdsc, ci); in kick_flushing_inode_caps()
2252 spin_unlock(&ci->i_ceph_lock); in kick_flushing_inode_caps()
2255 spin_unlock(&ci->i_ceph_lock); in kick_flushing_inode_caps()
2266 static void __take_cap_refs(struct ceph_inode_info *ci, int got, in __take_cap_refs() argument
2270 ci->i_pin_ref++; in __take_cap_refs()
2272 ci->i_rd_ref++; in __take_cap_refs()
2274 ci->i_rdcache_ref++; in __take_cap_refs()
2276 if (ci->i_wr_ref == 0 && !ci->i_head_snapc) { in __take_cap_refs()
2278 ci->i_head_snapc = ceph_get_snap_context( in __take_cap_refs()
2279 ci->i_snap_realm->cached_context); in __take_cap_refs()
2281 ci->i_wr_ref++; in __take_cap_refs()
2284 if (ci->i_wb_ref == 0) in __take_cap_refs()
2285 ihold(&ci->vfs_inode); in __take_cap_refs()
2286 ci->i_wb_ref++; in __take_cap_refs()
2288 &ci->vfs_inode, ci->i_wb_ref-1, ci->i_wb_ref); in __take_cap_refs()
2299 static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want, in try_get_cap_refs() argument
2302 struct inode *inode = &ci->vfs_inode; in try_get_cap_refs()
2313 spin_lock(&ci->i_ceph_lock); in try_get_cap_refs()
2316 file_wanted = __ceph_caps_file_wanted(ci); in try_get_cap_refs()
2326 while (ci->i_truncate_pending) { in try_get_cap_refs()
2327 spin_unlock(&ci->i_ceph_lock); in try_get_cap_refs()
2333 spin_lock(&ci->i_ceph_lock); in try_get_cap_refs()
2336 have = __ceph_caps_issued(ci, &implemented); in try_get_cap_refs()
2339 if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) { in try_get_cap_refs()
2341 inode, endoff, ci->i_max_size); in try_get_cap_refs()
2342 if (endoff > ci->i_requested_max_size) { in try_get_cap_refs()
2352 if (__ceph_have_pending_cap_snap(ci)) { in try_get_cap_refs()
2372 !ci->i_head_snapc && in try_get_cap_refs()
2385 spin_unlock(&ci->i_ceph_lock); in try_get_cap_refs()
2393 __take_cap_refs(ci, *got, true); in try_get_cap_refs()
2398 if ((need & CEPH_CAP_FILE_WR) && ci->i_auth_cap) { in try_get_cap_refs()
2399 struct ceph_mds_session *s = ci->i_auth_cap->session; in try_get_cap_refs()
2406 inode, ceph_cap_string(need), ci->i_auth_cap->mds); in try_get_cap_refs()
2412 if (!__ceph_is_any_caps(ci) && in try_get_cap_refs()
2424 spin_unlock(&ci->i_ceph_lock); in try_get_cap_refs()
2440 struct ceph_inode_info *ci = ceph_inode(inode); in check_max_size() local
2444 spin_lock(&ci->i_ceph_lock); in check_max_size()
2445 if (endoff >= ci->i_max_size && endoff > ci->i_wanted_max_size) { in check_max_size()
2448 ci->i_wanted_max_size = endoff; in check_max_size()
2451 if (ci->i_auth_cap && in check_max_size()
2452 (ci->i_auth_cap->issued & CEPH_CAP_FILE_WR) && in check_max_size()
2453 ci->i_wanted_max_size > ci->i_max_size && in check_max_size()
2454 ci->i_wanted_max_size > ci->i_requested_max_size) in check_max_size()
2456 spin_unlock(&ci->i_ceph_lock); in check_max_size()
2458 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); in check_max_size()
2466 int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, in ceph_get_caps() argument
2471 ret = ceph_pool_perm_check(ci, need); in ceph_get_caps()
2477 check_max_size(&ci->vfs_inode, endoff); in ceph_get_caps()
2481 ret = try_get_cap_refs(ci, need, want, endoff, in ceph_get_caps()
2489 ret = wait_event_interruptible(ci->i_cap_wq, in ceph_get_caps()
2490 try_get_cap_refs(ci, need, want, endoff, in ceph_get_caps()
2500 if (ci->i_inline_version != CEPH_INLINE_NONE && in ceph_get_caps()
2502 i_size_read(&ci->vfs_inode) > 0) { in ceph_get_caps()
2504 find_get_page(ci->vfs_inode.i_mapping, 0); in ceph_get_caps()
2516 ceph_put_cap_refs(ci, _got); in ceph_get_caps()
2523 ret = __ceph_do_getattr(&ci->vfs_inode, NULL, in ceph_get_caps()
2541 void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps) in ceph_get_cap_refs() argument
2543 spin_lock(&ci->i_ceph_lock); in ceph_get_cap_refs()
2544 __take_cap_refs(ci, caps, false); in ceph_get_cap_refs()
2545 spin_unlock(&ci->i_ceph_lock); in ceph_get_cap_refs()
2578 void ceph_put_cap_refs(struct ceph_inode_info *ci, int had) in ceph_put_cap_refs() argument
2580 struct inode *inode = &ci->vfs_inode; in ceph_put_cap_refs()
2583 spin_lock(&ci->i_ceph_lock); in ceph_put_cap_refs()
2585 --ci->i_pin_ref; in ceph_put_cap_refs()
2587 if (--ci->i_rd_ref == 0) in ceph_put_cap_refs()
2590 if (--ci->i_rdcache_ref == 0) in ceph_put_cap_refs()
2593 if (--ci->i_wb_ref == 0) { in ceph_put_cap_refs()
2598 inode, ci->i_wb_ref+1, ci->i_wb_ref); in ceph_put_cap_refs()
2601 if (--ci->i_wr_ref == 0) { in ceph_put_cap_refs()
2603 if (__ceph_have_pending_cap_snap(ci)) { in ceph_put_cap_refs()
2605 list_last_entry(&ci->i_cap_snaps, in ceph_put_cap_refs()
2611 else if (__ceph_finish_cap_snap(ci, capsnap)) in ceph_put_cap_refs()
2615 if (ci->i_wrbuffer_ref_head == 0 && in ceph_put_cap_refs()
2616 ci->i_dirty_caps == 0 && in ceph_put_cap_refs()
2617 ci->i_flushing_caps == 0) { in ceph_put_cap_refs()
2618 BUG_ON(!ci->i_head_snapc); in ceph_put_cap_refs()
2619 ceph_put_snap_context(ci->i_head_snapc); in ceph_put_cap_refs()
2620 ci->i_head_snapc = NULL; in ceph_put_cap_refs()
2623 if (!__ceph_is_any_caps(ci) && ci->i_snap_realm) in ceph_put_cap_refs()
2624 drop_inode_snap_realm(ci); in ceph_put_cap_refs()
2626 spin_unlock(&ci->i_ceph_lock); in ceph_put_cap_refs()
2632 ceph_check_caps(ci, 0, NULL); in ceph_put_cap_refs()
2634 ceph_flush_snaps(ci); in ceph_put_cap_refs()
2636 wake_up_all(&ci->i_cap_wq); in ceph_put_cap_refs()
2648 void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr, in ceph_put_wrbuffer_cap_refs() argument
2651 struct inode *inode = &ci->vfs_inode; in ceph_put_wrbuffer_cap_refs()
2658 spin_lock(&ci->i_ceph_lock); in ceph_put_wrbuffer_cap_refs()
2659 ci->i_wrbuffer_ref -= nr; in ceph_put_wrbuffer_cap_refs()
2660 last = !ci->i_wrbuffer_ref; in ceph_put_wrbuffer_cap_refs()
2662 if (ci->i_head_snapc == snapc) { in ceph_put_wrbuffer_cap_refs()
2663 ci->i_wrbuffer_ref_head -= nr; in ceph_put_wrbuffer_cap_refs()
2664 if (ci->i_wrbuffer_ref_head == 0 && in ceph_put_wrbuffer_cap_refs()
2665 ci->i_wr_ref == 0 && in ceph_put_wrbuffer_cap_refs()
2666 ci->i_dirty_caps == 0 && in ceph_put_wrbuffer_cap_refs()
2667 ci->i_flushing_caps == 0) { in ceph_put_wrbuffer_cap_refs()
2668 BUG_ON(!ci->i_head_snapc); in ceph_put_wrbuffer_cap_refs()
2669 ceph_put_snap_context(ci->i_head_snapc); in ceph_put_wrbuffer_cap_refs()
2670 ci->i_head_snapc = NULL; in ceph_put_wrbuffer_cap_refs()
2674 ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr, in ceph_put_wrbuffer_cap_refs()
2675 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, in ceph_put_wrbuffer_cap_refs()
2678 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { in ceph_put_wrbuffer_cap_refs()
2693 ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr, in ceph_put_wrbuffer_cap_refs()
2694 ci->i_wrbuffer_ref, capsnap->dirty_pages, in ceph_put_wrbuffer_cap_refs()
2699 spin_unlock(&ci->i_ceph_lock); in ceph_put_wrbuffer_cap_refs()
2702 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); in ceph_put_wrbuffer_cap_refs()
2705 ceph_flush_snaps(ci); in ceph_put_wrbuffer_cap_refs()
2706 wake_up_all(&ci->i_cap_wq); in ceph_put_wrbuffer_cap_refs()
2757 __releases(ci->i_ceph_lock) in handle_cap_grant()
2760 struct ceph_inode_info *ci = ceph_inode(inode); in handle_cap_grant() local
2792 WARN_ON(cap != ci->i_auth_cap); in handle_cap_grant()
2806 !ci->i_wrbuffer_ref) { in handle_cap_grant()
2810 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) { in handle_cap_grant()
2812 ci->i_rdcache_revoking = ci->i_rdcache_gen; in handle_cap_grant()
2823 __check_cap_issue(ci, cap, newcaps); in handle_cap_grant()
2847 if (version > ci->i_xattrs.version) { in handle_cap_grant()
2850 if (ci->i_xattrs.blob) in handle_cap_grant()
2851 ceph_buffer_put(ci->i_xattrs.blob); in handle_cap_grant()
2852 ci->i_xattrs.blob = ceph_buffer_get(xattr_buf); in handle_cap_grant()
2853 ci->i_xattrs.version = version; in handle_cap_grant()
2860 if ((issued & CEPH_CAP_FILE_CACHE) && ci->i_rdcache_gen > 1) in handle_cap_grant()
2875 ci->i_layout = grant->layout; in handle_cap_grant()
2882 if (ci->i_auth_cap == cap && max_size != ci->i_max_size) { in handle_cap_grant()
2884 ci->i_max_size, max_size); in handle_cap_grant()
2885 ci->i_max_size = max_size; in handle_cap_grant()
2886 if (max_size >= ci->i_wanted_max_size) { in handle_cap_grant()
2887 ci->i_wanted_max_size = 0; /* reset */ in handle_cap_grant()
2888 ci->i_requested_max_size = 0; in handle_cap_grant()
2895 wanted = __ceph_caps_wanted(ci); in handle_cap_grant()
2896 used = __ceph_caps_used(ci); in handle_cap_grant()
2897 dirty = __ceph_caps_dirty(ci); in handle_cap_grant()
2925 else if (cap == ci->i_auth_cap) in handle_cap_grant()
2938 if (cap == ci->i_auth_cap && in handle_cap_grant()
2939 __ceph_caps_revoking_other(ci, cap, newcaps)) in handle_cap_grant()
2950 if (inline_version > 0 && inline_version >= ci->i_inline_version) { in handle_cap_grant()
2951 ci->i_inline_version = inline_version; in handle_cap_grant()
2952 if (ci->i_inline_version != CEPH_INLINE_NONE && in handle_cap_grant()
2957 spin_unlock(&ci->i_ceph_lock); in handle_cap_grant()
2987 wake_up_all(&ci->i_cap_wq); in handle_cap_grant()
2990 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY, in handle_cap_grant()
2993 ceph_check_caps(ci, CHECK_CAPS_NODELAY, session); in handle_cap_grant()
3006 __releases(ci->i_ceph_lock) in handle_cap_flush_ack()
3008 struct ceph_inode_info *ci = ceph_inode(inode); in handle_cap_flush_ack() local
3018 n = rb_first(&ci->i_cap_flush_tree); in handle_cap_flush_ack()
3025 rb_erase(&cf->i_node, &ci->i_cap_flush_tree); in handle_cap_flush_ack()
3037 ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps), in handle_cap_flush_ack()
3038 ceph_cap_string(ci->i_flushing_caps & ~cleaned)); in handle_cap_flush_ack()
3043 ci->i_flushing_caps &= ~cleaned; in handle_cap_flush_ack()
3057 if (ci->i_flushing_caps == 0) { in handle_cap_flush_ack()
3058 list_del_init(&ci->i_flushing_item); in handle_cap_flush_ack()
3068 if (ci->i_dirty_caps == 0) { in handle_cap_flush_ack()
3070 BUG_ON(!list_empty(&ci->i_dirty_item)); in handle_cap_flush_ack()
3072 if (ci->i_wr_ref == 0 && in handle_cap_flush_ack()
3073 ci->i_wrbuffer_ref_head == 0) { in handle_cap_flush_ack()
3074 BUG_ON(!ci->i_head_snapc); in handle_cap_flush_ack()
3075 ceph_put_snap_context(ci->i_head_snapc); in handle_cap_flush_ack()
3076 ci->i_head_snapc = NULL; in handle_cap_flush_ack()
3079 BUG_ON(list_empty(&ci->i_dirty_item)); in handle_cap_flush_ack()
3083 wake_up_all(&ci->i_cap_wq); in handle_cap_flush_ack()
3086 spin_unlock(&ci->i_ceph_lock); in handle_cap_flush_ack()
3108 struct ceph_inode_info *ci = ceph_inode(inode); in handle_cap_flushsnap_ack() local
3115 inode, ci, session->s_mds, follows); in handle_cap_flushsnap_ack()
3117 spin_lock(&ci->i_ceph_lock); in handle_cap_flushsnap_ack()
3118 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { in handle_cap_flushsnap_ack()
3141 spin_unlock(&ci->i_ceph_lock); in handle_cap_flushsnap_ack()
3154 __releases(ci->i_ceph_lock) in handle_cap_trunc()
3156 struct ceph_inode_info *ci = ceph_inode(inode); in handle_cap_trunc() local
3163 int dirty = __ceph_caps_dirty(ci); in handle_cap_trunc()
3173 spin_unlock(&ci->i_ceph_lock); in handle_cap_trunc()
3196 struct ceph_inode_info *ci = ceph_inode(inode); in handle_cap_export() local
3214 inode, ci, mds, mseq, target); in handle_cap_export()
3216 spin_lock(&ci->i_ceph_lock); in handle_cap_export()
3217 cap = __get_cap_for_mds(ci, mds); in handle_cap_export()
3234 tcap = __get_cap_for_mds(ci, target); in handle_cap_export()
3246 if (cap == ci->i_auth_cap) in handle_cap_export()
3247 ci->i_auth_cap = tcap; in handle_cap_export()
3248 if (ci->i_flushing_caps && ci->i_auth_cap == tcap) { in handle_cap_export()
3250 list_move_tail(&ci->i_flushing_item, in handle_cap_export()
3259 int flag = (cap == ci->i_auth_cap) ? CEPH_CAP_FLAG_AUTH : 0; in handle_cap_export()
3267 spin_unlock(&ci->i_ceph_lock); in handle_cap_export()
3291 spin_unlock(&ci->i_ceph_lock); in handle_cap_export()
3311 __acquires(ci->i_ceph_lock) in handle_cap_import()
3313 struct ceph_inode_info *ci = ceph_inode(inode); in handle_cap_import() local
3335 inode, ci, mds, mseq, peer); in handle_cap_import()
3338 spin_lock(&ci->i_ceph_lock); in handle_cap_import()
3339 cap = __get_cap_for_mds(ci, mds); in handle_cap_import()
3342 spin_unlock(&ci->i_ceph_lock); in handle_cap_import()
3354 __ceph_caps_issued(ci, &issued); in handle_cap_import()
3355 issued |= __ceph_caps_dirty(ci); in handle_cap_import()
3360 ocap = peer >= 0 ? __get_cap_for_mds(ci, peer) : NULL; in handle_cap_import()
3378 ci->i_wanted_max_size = 0; in handle_cap_import()
3379 ci->i_requested_max_size = 0; in handle_cap_import()
3397 struct ceph_inode_info *ci; in ceph_handle_caps() local
3468 ci = ceph_inode(inode); in ceph_handle_caps()
3528 spin_lock(&ci->i_ceph_lock); in ceph_handle_caps()
3533 spin_unlock(&ci->i_ceph_lock); in ceph_handle_caps()
3541 __ceph_caps_issued(ci, &issued); in ceph_handle_caps()
3542 issued |= __ceph_caps_dirty(ci); in ceph_handle_caps()
3557 spin_unlock(&ci->i_ceph_lock); in ceph_handle_caps()
3589 struct ceph_inode_info *ci; in ceph_check_delayed_caps() local
3597 ci = list_first_entry(&mdsc->cap_delay_list, in ceph_check_delayed_caps()
3600 if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 && in ceph_check_delayed_caps()
3601 time_before(jiffies, ci->i_hold_caps_max)) in ceph_check_delayed_caps()
3603 list_del_init(&ci->i_cap_delay_list); in ceph_check_delayed_caps()
3605 dout("check_delayed_caps on %p\n", &ci->vfs_inode); in ceph_check_delayed_caps()
3606 ceph_check_caps(ci, flags, NULL); in ceph_check_delayed_caps()
3616 struct ceph_inode_info *ci; in ceph_flush_dirty_caps() local
3622 ci = list_first_entry(&mdsc->cap_dirty, struct ceph_inode_info, in ceph_flush_dirty_caps()
3624 inode = &ci->vfs_inode; in ceph_flush_dirty_caps()
3628 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, NULL); in ceph_flush_dirty_caps()
3641 void ceph_put_fmode(struct ceph_inode_info *ci, int fmode) in ceph_put_fmode() argument
3643 struct inode *inode = &ci->vfs_inode; in ceph_put_fmode()
3646 spin_lock(&ci->i_ceph_lock); in ceph_put_fmode()
3648 ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1); in ceph_put_fmode()
3649 BUG_ON(ci->i_nr_by_mode[fmode] == 0); in ceph_put_fmode()
3650 if (--ci->i_nr_by_mode[fmode] == 0) in ceph_put_fmode()
3652 spin_unlock(&ci->i_ceph_lock); in ceph_put_fmode()
3654 if (last && ci->i_vino.snap == CEPH_NOSNAP) in ceph_put_fmode()
3655 ceph_check_caps(ci, 0, NULL); in ceph_put_fmode()
3669 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_encode_inode_release() local
3675 spin_lock(&ci->i_ceph_lock); in ceph_encode_inode_release()
3676 used = __ceph_caps_used(ci); in ceph_encode_inode_release()
3677 dirty = __ceph_caps_dirty(ci); in ceph_encode_inode_release()
3686 cap = __get_cap_for_mds(ci, mds); in ceph_encode_inode_release()
3693 int wanted = __ceph_caps_wanted(ci); in ceph_encode_inode_release()
3694 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0) in ceph_encode_inode_release()
3728 spin_unlock(&ci->i_ceph_lock); in ceph_encode_inode_release()