Lines Matching refs:rnp
157 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
192 unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp) in rcu_rnp_online_cpus() argument
194 return ACCESS_ONCE(rnp->qsmaskinitnext); in rcu_rnp_online_cpus()
353 static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
541 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_future_needs_gp() local
542 int idx = (ACCESS_ONCE(rnp->completed) + 1) & 0x1; in rcu_future_needs_gp()
543 int *fp = &rnp->need_future_gp[idx]; in rcu_future_needs_gp()
971 struct rcu_node *rnp; in rcu_lockdep_current_cpu_online() local
978 rnp = rdp->mynode; in rcu_lockdep_current_cpu_online()
979 ret = (rdp->grpmask & rcu_rnp_online_cpus(rnp)) || in rcu_lockdep_current_cpu_online()
1149 struct rcu_node *rnp; in rcu_dump_cpu_stacks() local
1151 rcu_for_each_leaf_node(rsp, rnp) { in rcu_dump_cpu_stacks()
1152 raw_spin_lock_irqsave(&rnp->lock, flags); in rcu_dump_cpu_stacks()
1153 if (rnp->qsmask != 0) { in rcu_dump_cpu_stacks()
1154 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) in rcu_dump_cpu_stacks()
1155 if (rnp->qsmask & (1UL << cpu)) in rcu_dump_cpu_stacks()
1156 dump_cpu_task(rnp->grplo + cpu); in rcu_dump_cpu_stacks()
1158 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_dump_cpu_stacks()
1170 struct rcu_node *rnp = rcu_get_root(rsp); in print_other_cpu_stall() local
1175 raw_spin_lock_irqsave(&rnp->lock, flags); in print_other_cpu_stall()
1178 raw_spin_unlock_irqrestore(&rnp->lock, flags); in print_other_cpu_stall()
1182 raw_spin_unlock_irqrestore(&rnp->lock, flags); in print_other_cpu_stall()
1192 rcu_for_each_leaf_node(rsp, rnp) { in print_other_cpu_stall()
1193 raw_spin_lock_irqsave(&rnp->lock, flags); in print_other_cpu_stall()
1194 ndetected += rcu_print_task_stall(rnp); in print_other_cpu_stall()
1195 if (rnp->qsmask != 0) { in print_other_cpu_stall()
1196 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) in print_other_cpu_stall()
1197 if (rnp->qsmask & (1UL << cpu)) { in print_other_cpu_stall()
1199 rnp->grplo + cpu); in print_other_cpu_stall()
1203 raw_spin_unlock_irqrestore(&rnp->lock, flags); in print_other_cpu_stall()
1242 struct rcu_node *rnp = rcu_get_root(rsp); in print_cpu_stall() local
1264 raw_spin_lock_irqsave(&rnp->lock, flags); in print_cpu_stall()
1268 raw_spin_unlock_irqrestore(&rnp->lock, flags); in print_cpu_stall()
1287 struct rcu_node *rnp; in check_cpu_stall() local
1321 rnp = rdp->mynode; in check_cpu_stall()
1323 (ACCESS_ONCE(rnp->qsmask) & rdp->grpmask)) { in check_cpu_stall()
1387 struct rcu_node *rnp) in rcu_cbs_completed() argument
1396 if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed) in rcu_cbs_completed()
1397 return rnp->completed + 1; in rcu_cbs_completed()
1403 return rnp->completed + 2; in rcu_cbs_completed()
1410 static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, in trace_rcu_future_gp() argument
1413 trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum, in trace_rcu_future_gp()
1414 rnp->completed, c, rnp->level, in trace_rcu_future_gp()
1415 rnp->grplo, rnp->grphi, s); in trace_rcu_future_gp()
1427 rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, in rcu_start_future_gp() argument
1439 c = rcu_cbs_completed(rdp->rsp, rnp); in rcu_start_future_gp()
1440 trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf")); in rcu_start_future_gp()
1441 if (rnp->need_future_gp[c & 0x1]) { in rcu_start_future_gp()
1442 trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf")); in rcu_start_future_gp()
1459 if (rnp->gpnum != rnp->completed || in rcu_start_future_gp()
1461 rnp->need_future_gp[c & 0x1]++; in rcu_start_future_gp()
1462 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf")); in rcu_start_future_gp()
1471 if (rnp != rnp_root) { in rcu_start_future_gp()
1492 trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot")); in rcu_start_future_gp()
1501 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleafroot")); in rcu_start_future_gp()
1503 trace_rcu_future_gp(rnp, rdp, c, TPS("Startedroot")); in rcu_start_future_gp()
1507 if (rnp != rnp_root) in rcu_start_future_gp()
1521 static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) in rcu_future_gp_cleanup() argument
1523 int c = rnp->completed; in rcu_future_gp_cleanup()
1527 rcu_nocb_gp_cleanup(rsp, rnp); in rcu_future_gp_cleanup()
1528 rnp->need_future_gp[c & 0x1] = 0; in rcu_future_gp_cleanup()
1529 needmore = rnp->need_future_gp[(c + 1) & 0x1]; in rcu_future_gp_cleanup()
1530 trace_rcu_future_gp(rnp, rdp, c, in rcu_future_gp_cleanup()
1563 static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp, in rcu_accelerate_cbs() argument
1588 c = rcu_cbs_completed(rsp, rnp); in rcu_accelerate_cbs()
1613 ret = rcu_start_future_gp(rnp, rdp, NULL); in rcu_accelerate_cbs()
1633 static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp, in rcu_advance_cbs() argument
1647 if (ULONG_CMP_LT(rnp->completed, rdp->nxtcompleted[i])) in rcu_advance_cbs()
1664 return rcu_accelerate_cbs(rsp, rnp, rdp); in rcu_advance_cbs()
1673 static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, in __note_gp_changes() argument
1679 if (rdp->completed == rnp->completed && in __note_gp_changes()
1683 ret = rcu_accelerate_cbs(rsp, rnp, rdp); in __note_gp_changes()
1688 ret = rcu_advance_cbs(rsp, rnp, rdp); in __note_gp_changes()
1691 rdp->completed = rnp->completed; in __note_gp_changes()
1695 if (rdp->gpnum != rnp->gpnum || unlikely(ACCESS_ONCE(rdp->gpwrap))) { in __note_gp_changes()
1701 rdp->gpnum = rnp->gpnum; in __note_gp_changes()
1705 rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask); in __note_gp_changes()
1716 struct rcu_node *rnp; in note_gp_changes() local
1719 rnp = rdp->mynode; in note_gp_changes()
1720 if ((rdp->gpnum == ACCESS_ONCE(rnp->gpnum) && in note_gp_changes()
1721 rdp->completed == ACCESS_ONCE(rnp->completed) && in note_gp_changes()
1723 !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */ in note_gp_changes()
1728 needwake = __note_gp_changes(rsp, rnp, rdp); in note_gp_changes()
1729 raw_spin_unlock_irqrestore(&rnp->lock, flags); in note_gp_changes()
1741 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_gp_init() local
1744 raw_spin_lock_irq(&rnp->lock); in rcu_gp_init()
1748 raw_spin_unlock_irq(&rnp->lock); in rcu_gp_init()
1758 raw_spin_unlock_irq(&rnp->lock); in rcu_gp_init()
1767 raw_spin_unlock_irq(&rnp->lock); in rcu_gp_init()
1775 rcu_for_each_leaf_node(rsp, rnp) { in rcu_gp_init()
1776 raw_spin_lock_irq(&rnp->lock); in rcu_gp_init()
1778 if (rnp->qsmaskinit == rnp->qsmaskinitnext && in rcu_gp_init()
1779 !rnp->wait_blkd_tasks) { in rcu_gp_init()
1781 raw_spin_unlock_irq(&rnp->lock); in rcu_gp_init()
1786 oldmask = rnp->qsmaskinit; in rcu_gp_init()
1787 rnp->qsmaskinit = rnp->qsmaskinitnext; in rcu_gp_init()
1790 if (!oldmask != !rnp->qsmaskinit) { in rcu_gp_init()
1792 rcu_init_new_rnp(rnp); in rcu_gp_init()
1793 else if (rcu_preempt_has_tasks(rnp)) /* blocked tasks */ in rcu_gp_init()
1794 rnp->wait_blkd_tasks = true; in rcu_gp_init()
1796 rcu_cleanup_dead_rnp(rnp); in rcu_gp_init()
1808 if (rnp->wait_blkd_tasks && in rcu_gp_init()
1809 (!rcu_preempt_has_tasks(rnp) || in rcu_gp_init()
1810 rnp->qsmaskinit)) { in rcu_gp_init()
1811 rnp->wait_blkd_tasks = false; in rcu_gp_init()
1812 rcu_cleanup_dead_rnp(rnp); in rcu_gp_init()
1815 raw_spin_unlock_irq(&rnp->lock); in rcu_gp_init()
1831 rcu_for_each_node_breadth_first(rsp, rnp) { in rcu_gp_init()
1832 raw_spin_lock_irq(&rnp->lock); in rcu_gp_init()
1835 rcu_preempt_check_blocked_tasks(rnp); in rcu_gp_init()
1836 rnp->qsmask = rnp->qsmaskinit; in rcu_gp_init()
1837 ACCESS_ONCE(rnp->gpnum) = rsp->gpnum; in rcu_gp_init()
1838 if (WARN_ON_ONCE(rnp->completed != rsp->completed)) in rcu_gp_init()
1839 ACCESS_ONCE(rnp->completed) = rsp->completed; in rcu_gp_init()
1840 if (rnp == rdp->mynode) in rcu_gp_init()
1841 (void)__note_gp_changes(rsp, rnp, rdp); in rcu_gp_init()
1842 rcu_preempt_boost_start_gp(rnp); in rcu_gp_init()
1843 trace_rcu_grace_period_init(rsp->name, rnp->gpnum, in rcu_gp_init()
1844 rnp->level, rnp->grplo, in rcu_gp_init()
1845 rnp->grphi, rnp->qsmask); in rcu_gp_init()
1846 raw_spin_unlock_irq(&rnp->lock); in rcu_gp_init()
1865 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_gp_fqs() local
1886 raw_spin_lock_irq(&rnp->lock); in rcu_gp_fqs()
1890 raw_spin_unlock_irq(&rnp->lock); in rcu_gp_fqs()
1904 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_gp_cleanup() local
1907 raw_spin_lock_irq(&rnp->lock); in rcu_gp_cleanup()
1921 raw_spin_unlock_irq(&rnp->lock); in rcu_gp_cleanup()
1932 rcu_for_each_node_breadth_first(rsp, rnp) { in rcu_gp_cleanup()
1933 raw_spin_lock_irq(&rnp->lock); in rcu_gp_cleanup()
1935 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)); in rcu_gp_cleanup()
1936 WARN_ON_ONCE(rnp->qsmask); in rcu_gp_cleanup()
1937 ACCESS_ONCE(rnp->completed) = rsp->gpnum; in rcu_gp_cleanup()
1939 if (rnp == rdp->mynode) in rcu_gp_cleanup()
1940 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp; in rcu_gp_cleanup()
1942 nocb += rcu_future_gp_cleanup(rsp, rnp); in rcu_gp_cleanup()
1943 raw_spin_unlock_irq(&rnp->lock); in rcu_gp_cleanup()
1947 rnp = rcu_get_root(rsp); in rcu_gp_cleanup()
1948 raw_spin_lock_irq(&rnp->lock); in rcu_gp_cleanup()
1950 rcu_nocb_gp_set(rnp, nocb); in rcu_gp_cleanup()
1958 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp; in rcu_gp_cleanup()
1965 raw_spin_unlock_irq(&rnp->lock); in rcu_gp_cleanup()
1978 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_gp_kthread() local
2021 (!ACCESS_ONCE(rnp->qsmask) && in rcu_gp_kthread()
2022 !rcu_preempt_blocked_readers_cgp(rnp)), in rcu_gp_kthread()
2026 if (!ACCESS_ONCE(rnp->qsmask) && in rcu_gp_kthread()
2027 !rcu_preempt_blocked_readers_cgp(rnp)) in rcu_gp_kthread()
2077 rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, in rcu_start_gp_advanced() argument
2113 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_start_gp() local
2124 ret = rcu_advance_cbs(rsp, rnp, rdp) || ret; in rcu_start_gp()
2125 ret = rcu_start_gp_advanced(rsp, rnp, rdp) || ret; in rcu_start_gp()
2156 struct rcu_node *rnp, unsigned long gps, unsigned long flags) in rcu_report_qs_rnp() argument
2157 __releases(rnp->lock) in rcu_report_qs_rnp()
2164 if (!(rnp->qsmask & mask) || rnp->gpnum != gps) { in rcu_report_qs_rnp()
2170 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_report_qs_rnp()
2174 rnp->qsmask &= ~mask; in rcu_report_qs_rnp()
2175 trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum, in rcu_report_qs_rnp()
2176 mask, rnp->qsmask, rnp->level, in rcu_report_qs_rnp()
2177 rnp->grplo, rnp->grphi, in rcu_report_qs_rnp()
2178 !!rnp->gp_tasks); in rcu_report_qs_rnp()
2179 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { in rcu_report_qs_rnp()
2182 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_report_qs_rnp()
2185 mask = rnp->grpmask; in rcu_report_qs_rnp()
2186 if (rnp->parent == NULL) { in rcu_report_qs_rnp()
2192 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_report_qs_rnp()
2193 rnp_c = rnp; in rcu_report_qs_rnp()
2194 rnp = rnp->parent; in rcu_report_qs_rnp()
2195 raw_spin_lock_irqsave(&rnp->lock, flags); in rcu_report_qs_rnp()
2216 struct rcu_node *rnp, unsigned long flags) in rcu_report_unblock_qs_rnp() argument
2217 __releases(rnp->lock) in rcu_report_unblock_qs_rnp()
2224 rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { in rcu_report_unblock_qs_rnp()
2225 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_report_unblock_qs_rnp()
2229 rnp_p = rnp->parent; in rcu_report_unblock_qs_rnp()
2240 gps = rnp->gpnum; in rcu_report_unblock_qs_rnp()
2241 mask = rnp->grpmask; in rcu_report_unblock_qs_rnp()
2242 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ in rcu_report_unblock_qs_rnp()
2263 struct rcu_node *rnp; in rcu_report_qs_rdp() local
2265 rnp = rdp->mynode; in rcu_report_qs_rdp()
2266 raw_spin_lock_irqsave(&rnp->lock, flags); in rcu_report_qs_rdp()
2270 rdp->gpnum != rnp->gpnum || rnp->completed == rnp->gpnum || in rcu_report_qs_rdp()
2281 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_report_qs_rdp()
2285 if ((rnp->qsmask & mask) == 0) { in rcu_report_qs_rdp()
2286 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_report_qs_rdp()
2294 needwake = rcu_accelerate_cbs(rsp, rnp, rdp); in rcu_report_qs_rdp()
2296 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags); in rcu_report_qs_rdp()
2346 struct rcu_node *rnp, struct rcu_data *rdp) in rcu_send_cbs_to_orphanage() argument
2453 RCU_TRACE(struct rcu_node *rnp = rdp->mynode); in rcu_cleanup_dying_cpu()
2457 rnp->gpnum + 1 - !!(rnp->qsmask & mask), in rcu_cleanup_dying_cpu()
2481 struct rcu_node *rnp = rnp_leaf; in rcu_cleanup_dead_rnp() local
2483 if (rnp->qsmaskinit || rcu_preempt_has_tasks(rnp)) in rcu_cleanup_dead_rnp()
2486 mask = rnp->grpmask; in rcu_cleanup_dead_rnp()
2487 rnp = rnp->parent; in rcu_cleanup_dead_rnp()
2488 if (!rnp) in rcu_cleanup_dead_rnp()
2490 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ in rcu_cleanup_dead_rnp()
2492 rnp->qsmaskinit &= ~mask; in rcu_cleanup_dead_rnp()
2493 rnp->qsmask &= ~mask; in rcu_cleanup_dead_rnp()
2494 if (rnp->qsmaskinit) { in rcu_cleanup_dead_rnp()
2495 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ in rcu_cleanup_dead_rnp()
2498 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ in rcu_cleanup_dead_rnp()
2512 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ in rcu_cleanup_dying_idle_cpu() local
2516 raw_spin_lock_irqsave(&rnp->lock, flags); in rcu_cleanup_dying_idle_cpu()
2518 rnp->qsmaskinitnext &= ~mask; in rcu_cleanup_dying_idle_cpu()
2519 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_cleanup_dying_idle_cpu()
2533 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ in rcu_cleanup_dead_cpu() local
2536 rcu_boost_kthread_setaffinity(rnp, -1); in rcu_cleanup_dead_cpu()
2540 rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp); in rcu_cleanup_dead_cpu()
2726 struct rcu_node *rnp; in force_qs_rnp() local
2728 rcu_for_each_leaf_node(rsp, rnp) { in force_qs_rnp()
2731 raw_spin_lock_irqsave(&rnp->lock, flags); in force_qs_rnp()
2734 raw_spin_unlock_irqrestore(&rnp->lock, flags); in force_qs_rnp()
2737 if (rnp->qsmask == 0) { in force_qs_rnp()
2740 rcu_preempt_blocked_readers_cgp(rnp)) { in force_qs_rnp()
2746 rcu_initiate_boost(rnp, flags); in force_qs_rnp()
2750 if (rnp->parent && in force_qs_rnp()
2751 (rnp->parent->qsmask & rnp->grpmask)) { in force_qs_rnp()
2757 rcu_report_unblock_qs_rnp(rsp, rnp, flags); in force_qs_rnp()
2762 cpu = rnp->grplo; in force_qs_rnp()
2764 for (; cpu <= rnp->grphi; cpu++, bit <<= 1) { in force_qs_rnp()
2765 if ((rnp->qsmask & bit) != 0) { in force_qs_rnp()
2766 if ((rnp->qsmaskinit & bit) == 0) in force_qs_rnp()
2774 rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags); in force_qs_rnp()
2777 raw_spin_unlock_irqrestore(&rnp->lock, flags); in force_qs_rnp()
2790 struct rcu_node *rnp; in force_quiescent_state() local
2794 rnp = __this_cpu_read(rsp->rda->mynode); in force_quiescent_state()
2795 for (; rnp != NULL; rnp = rnp->parent) { in force_quiescent_state()
2797 !raw_spin_trylock(&rnp->fqslock); in force_quiescent_state()
2804 rnp_old = rnp; in force_quiescent_state()
3417 struct rcu_node *rnp = rdp->mynode; in __rcu_pending() local
3453 if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */ in __rcu_pending()
3459 if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum || in __rcu_pending()
3707 struct rcu_node *rnp = rnp_leaf; in rcu_init_new_rnp() local
3710 mask = rnp->grpmask; in rcu_init_new_rnp()
3711 rnp = rnp->parent; in rcu_init_new_rnp()
3712 if (rnp == NULL) in rcu_init_new_rnp()
3714 raw_spin_lock(&rnp->lock); /* Interrupts already disabled. */ in rcu_init_new_rnp()
3715 rnp->qsmaskinit |= mask; in rcu_init_new_rnp()
3716 raw_spin_unlock(&rnp->lock); /* Interrupts remain disabled. */ in rcu_init_new_rnp()
3728 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_boot_init_percpu_data() local
3731 raw_spin_lock_irqsave(&rnp->lock, flags); in rcu_boot_init_percpu_data()
3739 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_boot_init_percpu_data()
3754 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_init_percpu_data() local
3757 raw_spin_lock_irqsave(&rnp->lock, flags); in rcu_init_percpu_data()
3768 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ in rcu_init_percpu_data()
3775 rnp = rdp->mynode; in rcu_init_percpu_data()
3777 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ in rcu_init_percpu_data()
3779 rnp->qsmaskinitnext |= mask; in rcu_init_percpu_data()
3780 rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */ in rcu_init_percpu_data()
3781 rdp->completed = rnp->completed; in rcu_init_percpu_data()
3786 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_init_percpu_data()
3805 struct rcu_node *rnp = rdp->mynode; in rcu_cpu_notify() local
3817 rcu_boost_kthread_setaffinity(rnp, -1); in rcu_cpu_notify()
3820 rcu_boost_kthread_setaffinity(rnp, cpu); in rcu_cpu_notify()
3874 struct rcu_node *rnp; in rcu_spawn_gp_kthread() local
3894 rnp = rcu_get_root(rsp); in rcu_spawn_gp_kthread()
3895 raw_spin_lock_irqsave(&rnp->lock, flags); in rcu_spawn_gp_kthread()
3902 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_spawn_gp_kthread()
3970 struct rcu_node *rnp; in rcu_init_one() local
3992 rnp = rsp->level[i]; in rcu_init_one()
3993 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { in rcu_init_one()
3994 raw_spin_lock_init(&rnp->lock); in rcu_init_one()
3995 lockdep_set_class_and_name(&rnp->lock, in rcu_init_one()
3997 raw_spin_lock_init(&rnp->fqslock); in rcu_init_one()
3998 lockdep_set_class_and_name(&rnp->fqslock, in rcu_init_one()
4000 rnp->gpnum = rsp->gpnum; in rcu_init_one()
4001 rnp->completed = rsp->completed; in rcu_init_one()
4002 rnp->qsmask = 0; in rcu_init_one()
4003 rnp->qsmaskinit = 0; in rcu_init_one()
4004 rnp->grplo = j * cpustride; in rcu_init_one()
4005 rnp->grphi = (j + 1) * cpustride - 1; in rcu_init_one()
4006 if (rnp->grphi >= nr_cpu_ids) in rcu_init_one()
4007 rnp->grphi = nr_cpu_ids - 1; in rcu_init_one()
4009 rnp->grpnum = 0; in rcu_init_one()
4010 rnp->grpmask = 0; in rcu_init_one()
4011 rnp->parent = NULL; in rcu_init_one()
4013 rnp->grpnum = j % rsp->levelspread[i - 1]; in rcu_init_one()
4014 rnp->grpmask = 1UL << rnp->grpnum; in rcu_init_one()
4015 rnp->parent = rsp->level[i - 1] + in rcu_init_one()
4018 rnp->level = i; in rcu_init_one()
4019 INIT_LIST_HEAD(&rnp->blkd_tasks); in rcu_init_one()
4020 rcu_init_one_nocb(rnp); in rcu_init_one()
4025 rnp = rsp->level[rcu_num_lvls - 1]; in rcu_init_one()
4027 while (i > rnp->grphi) in rcu_init_one()
4028 rnp++; in rcu_init_one()
4029 per_cpu_ptr(rsp->rda, i)->mynode = rnp; in rcu_init_one()