delayed_free 5191 fs/cifs/connect.c call_rcu(&cifs_sb->rcu, delayed_free); delayed_free 739 fs/fat/inode.c call_rcu(&sbi->rcu, delayed_free); delayed_free 335 kernel/locking/lockdep.c } delayed_free; delayed_free 971 kernel/locking/lockdep.c for (i = 0, pf = delayed_free.pf; i < ARRAY_SIZE(delayed_free.pf); i++, pf++) { delayed_free 1086 kernel/locking/lockdep.c init_rcu_head(&delayed_free.rcu_head); delayed_free 1095 kernel/locking/lockdep.c INIT_LIST_HEAD(&delayed_free.pf[0].zapped); delayed_free 1096 kernel/locking/lockdep.c INIT_LIST_HEAD(&delayed_free.pf[1].zapped); delayed_free 4913 kernel/locking/lockdep.c return delayed_free.pf + delayed_free.index; delayed_free 4929 kernel/locking/lockdep.c if (delayed_free.scheduled) delayed_free 4932 kernel/locking/lockdep.c delayed_free.scheduled = true; delayed_free 4934 kernel/locking/lockdep.c WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf); delayed_free 4935 kernel/locking/lockdep.c delayed_free.index ^= 1; delayed_free 4937 kernel/locking/lockdep.c call_rcu(&delayed_free.rcu_head, free_zapped_rcu); delayed_free 4964 kernel/locking/lockdep.c if (WARN_ON_ONCE(ch != &delayed_free.rcu_head)) delayed_free 4972 kernel/locking/lockdep.c pf = delayed_free.pf + (delayed_free.index ^ 1); delayed_free 4974 kernel/locking/lockdep.c delayed_free.scheduled = false; delayed_free 4979 kernel/locking/lockdep.c call_rcu_zapped(delayed_free.pf + delayed_free.index); delayed_free 5049 kernel/locking/lockdep.c struct pending_free *pf = delayed_free.pf; delayed_free 5150 kernel/locking/lockdep.c struct pending_free *pf = delayed_free.pf; delayed_free 5228 kernel/locking/lockdep.c sizeof(delayed_free)