Lines Matching refs:rnp

95 static int rcu_preempted_readers_exp(struct rcu_node *rnp);
96 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
147 struct rcu_node *rnp; in rcu_preempt_note_context_switch() local
154 rnp = rdp->mynode; in rcu_preempt_note_context_switch()
155 raw_spin_lock_irqsave(&rnp->lock, flags); in rcu_preempt_note_context_switch()
158 t->rcu_blocked_node = rnp; in rcu_preempt_note_context_switch()
178 WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0); in rcu_preempt_note_context_switch()
180 if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) { in rcu_preempt_note_context_switch()
181 list_add(&t->rcu_node_entry, rnp->gp_tasks->prev); in rcu_preempt_note_context_switch()
182 rnp->gp_tasks = &t->rcu_node_entry; in rcu_preempt_note_context_switch()
184 if (rnp->boost_tasks != NULL) in rcu_preempt_note_context_switch()
185 rnp->boost_tasks = rnp->gp_tasks; in rcu_preempt_note_context_switch()
188 list_add(&t->rcu_node_entry, &rnp->blkd_tasks); in rcu_preempt_note_context_switch()
189 if (rnp->qsmask & rdp->grpmask) in rcu_preempt_note_context_switch()
190 rnp->gp_tasks = &t->rcu_node_entry; in rcu_preempt_note_context_switch()
194 (rnp->qsmask & rdp->grpmask) in rcu_preempt_note_context_switch()
195 ? rnp->gpnum in rcu_preempt_note_context_switch()
196 : rnp->gpnum + 1); in rcu_preempt_note_context_switch()
197 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_preempt_note_context_switch()
225 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) in rcu_preempt_blocked_readers_cgp() argument
227 return rnp->gp_tasks != NULL; in rcu_preempt_blocked_readers_cgp()
235 struct rcu_node *rnp) in rcu_next_node_entry() argument
240 if (np == &rnp->blkd_tasks) in rcu_next_node_entry()
249 static bool rcu_preempt_has_tasks(struct rcu_node *rnp) in rcu_preempt_has_tasks() argument
251 return !list_empty(&rnp->blkd_tasks); in rcu_preempt_has_tasks()
269 struct rcu_node *rnp; in rcu_read_unlock_special() local
315 rnp = t->rcu_blocked_node; in rcu_read_unlock_special()
316 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ in rcu_read_unlock_special()
318 if (rnp == t->rcu_blocked_node) in rcu_read_unlock_special()
320 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ in rcu_read_unlock_special()
322 empty_norm = !rcu_preempt_blocked_readers_cgp(rnp); in rcu_read_unlock_special()
323 empty_exp = !rcu_preempted_readers_exp(rnp); in rcu_read_unlock_special()
325 np = rcu_next_node_entry(t, rnp); in rcu_read_unlock_special()
329 rnp->gpnum, t->pid); in rcu_read_unlock_special()
330 if (&t->rcu_node_entry == rnp->gp_tasks) in rcu_read_unlock_special()
331 rnp->gp_tasks = np; in rcu_read_unlock_special()
332 if (&t->rcu_node_entry == rnp->exp_tasks) in rcu_read_unlock_special()
333 rnp->exp_tasks = np; in rcu_read_unlock_special()
335 if (&t->rcu_node_entry == rnp->boost_tasks) in rcu_read_unlock_special()
336 rnp->boost_tasks = np; in rcu_read_unlock_special()
338 drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t; in rcu_read_unlock_special()
347 empty_exp_now = !rcu_preempted_readers_exp(rnp); in rcu_read_unlock_special()
348 if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) { in rcu_read_unlock_special()
350 rnp->gpnum, in rcu_read_unlock_special()
351 0, rnp->qsmask, in rcu_read_unlock_special()
352 rnp->level, in rcu_read_unlock_special()
353 rnp->grplo, in rcu_read_unlock_special()
354 rnp->grphi, in rcu_read_unlock_special()
355 !!rnp->gp_tasks); in rcu_read_unlock_special()
357 rnp, flags); in rcu_read_unlock_special()
359 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_read_unlock_special()
365 rt_mutex_unlock(&rnp->boost_mtx); in rcu_read_unlock_special()
373 rcu_report_exp_rnp(&rcu_preempt_state, rnp, true); in rcu_read_unlock_special()
383 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) in rcu_print_detail_task_stall_rnp() argument
388 raw_spin_lock_irqsave(&rnp->lock, flags); in rcu_print_detail_task_stall_rnp()
389 if (!rcu_preempt_blocked_readers_cgp(rnp)) { in rcu_print_detail_task_stall_rnp()
390 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_print_detail_task_stall_rnp()
393 t = list_entry(rnp->gp_tasks, in rcu_print_detail_task_stall_rnp()
395 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) in rcu_print_detail_task_stall_rnp()
397 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_print_detail_task_stall_rnp()
406 struct rcu_node *rnp = rcu_get_root(rsp); in rcu_print_detail_task_stall() local
408 rcu_print_detail_task_stall_rnp(rnp); in rcu_print_detail_task_stall()
409 rcu_for_each_leaf_node(rsp, rnp) in rcu_print_detail_task_stall()
410 rcu_print_detail_task_stall_rnp(rnp); in rcu_print_detail_task_stall()
415 static void rcu_print_task_stall_begin(struct rcu_node *rnp) in rcu_print_task_stall_begin() argument
418 rnp->level, rnp->grplo, rnp->grphi); in rcu_print_task_stall_begin()
428 static void rcu_print_task_stall_begin(struct rcu_node *rnp) in rcu_print_task_stall_begin() argument
442 static int rcu_print_task_stall(struct rcu_node *rnp) in rcu_print_task_stall() argument
447 if (!rcu_preempt_blocked_readers_cgp(rnp)) in rcu_print_task_stall()
449 rcu_print_task_stall_begin(rnp); in rcu_print_task_stall()
450 t = list_entry(rnp->gp_tasks, in rcu_print_task_stall()
452 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { in rcu_print_task_stall()
470 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) in rcu_preempt_check_blocked_tasks() argument
472 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)); in rcu_preempt_check_blocked_tasks()
473 if (rcu_preempt_has_tasks(rnp)) in rcu_preempt_check_blocked_tasks()
474 rnp->gp_tasks = rnp->blkd_tasks.next; in rcu_preempt_check_blocked_tasks()
475 WARN_ON_ONCE(rnp->qsmask); in rcu_preempt_check_blocked_tasks()
556 static int rcu_preempted_readers_exp(struct rcu_node *rnp) in rcu_preempted_readers_exp() argument
558 return rnp->exp_tasks != NULL; in rcu_preempted_readers_exp()
570 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp) in sync_rcu_preempt_exp_done() argument
572 return !rcu_preempted_readers_exp(rnp) && in sync_rcu_preempt_exp_done()
573 ACCESS_ONCE(rnp->expmask) == 0; in sync_rcu_preempt_exp_done()
586 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, in rcu_report_exp_rnp() argument
592 raw_spin_lock_irqsave(&rnp->lock, flags); in rcu_report_exp_rnp()
595 if (!sync_rcu_preempt_exp_done(rnp)) { in rcu_report_exp_rnp()
596 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_report_exp_rnp()
599 if (rnp->parent == NULL) { in rcu_report_exp_rnp()
600 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_report_exp_rnp()
607 mask = rnp->grpmask; in rcu_report_exp_rnp()
608 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ in rcu_report_exp_rnp()
609 rnp = rnp->parent; in rcu_report_exp_rnp()
610 raw_spin_lock(&rnp->lock); /* irqs already disabled */ in rcu_report_exp_rnp()
612 rnp->expmask &= ~mask; in rcu_report_exp_rnp()
626 sync_rcu_preempt_exp_init1(struct rcu_state *rsp, struct rcu_node *rnp) in sync_rcu_preempt_exp_init1() argument
632 raw_spin_lock_irqsave(&rnp->lock, flags); in sync_rcu_preempt_exp_init1()
634 WARN_ON_ONCE(rnp->expmask); in sync_rcu_preempt_exp_init1()
635 WARN_ON_ONCE(rnp->exp_tasks); in sync_rcu_preempt_exp_init1()
636 if (!rcu_preempt_has_tasks(rnp)) { in sync_rcu_preempt_exp_init1()
638 raw_spin_unlock_irqrestore(&rnp->lock, flags); in sync_rcu_preempt_exp_init1()
642 rnp->expmask = 1; in sync_rcu_preempt_exp_init1()
643 rnp_up = rnp; in sync_rcu_preempt_exp_init1()
654 raw_spin_unlock_irqrestore(&rnp->lock, flags); in sync_rcu_preempt_exp_init1()
669 sync_rcu_preempt_exp_init2(struct rcu_state *rsp, struct rcu_node *rnp) in sync_rcu_preempt_exp_init2() argument
673 raw_spin_lock_irqsave(&rnp->lock, flags); in sync_rcu_preempt_exp_init2()
675 if (!rnp->expmask) { in sync_rcu_preempt_exp_init2()
677 raw_spin_unlock_irqrestore(&rnp->lock, flags); in sync_rcu_preempt_exp_init2()
682 rnp->expmask = 0; in sync_rcu_preempt_exp_init2()
688 if (rcu_preempt_has_tasks(rnp)) { in sync_rcu_preempt_exp_init2()
689 rnp->exp_tasks = rnp->blkd_tasks.next; in sync_rcu_preempt_exp_init2()
690 rcu_initiate_boost(rnp, flags); /* releases rnp->lock */ in sync_rcu_preempt_exp_init2()
695 raw_spin_unlock_irqrestore(&rnp->lock, flags); in sync_rcu_preempt_exp_init2()
696 rcu_report_exp_rnp(rsp, rnp, false); in sync_rcu_preempt_exp_init2()
713 struct rcu_node *rnp; in synchronize_rcu_expedited() local
769 rcu_for_each_leaf_node(rsp, rnp) in synchronize_rcu_expedited()
770 sync_rcu_preempt_exp_init1(rsp, rnp); in synchronize_rcu_expedited()
771 rcu_for_each_leaf_node(rsp, rnp) in synchronize_rcu_expedited()
772 sync_rcu_preempt_exp_init2(rsp, rnp); in synchronize_rcu_expedited()
777 rnp = rcu_get_root(rsp); in synchronize_rcu_expedited()
779 sync_rcu_preempt_exp_done(rnp)); in synchronize_rcu_expedited()
857 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp) in rcu_preempt_blocked_readers_cgp() argument
865 static bool rcu_preempt_has_tasks(struct rcu_node *rnp) in rcu_preempt_has_tasks() argument
882 static int rcu_print_task_stall(struct rcu_node *rnp) in rcu_print_task_stall() argument
892 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) in rcu_preempt_check_blocked_tasks() argument
894 WARN_ON_ONCE(rnp->qsmask); in rcu_preempt_check_blocked_tasks()
948 static void rcu_initiate_boost_trace(struct rcu_node *rnp) in rcu_initiate_boost_trace() argument
950 if (!rcu_preempt_has_tasks(rnp)) in rcu_initiate_boost_trace()
951 rnp->n_balk_blkd_tasks++; in rcu_initiate_boost_trace()
952 else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL) in rcu_initiate_boost_trace()
953 rnp->n_balk_exp_gp_tasks++; in rcu_initiate_boost_trace()
954 else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL) in rcu_initiate_boost_trace()
955 rnp->n_balk_boost_tasks++; in rcu_initiate_boost_trace()
956 else if (rnp->gp_tasks != NULL && rnp->qsmask != 0) in rcu_initiate_boost_trace()
957 rnp->n_balk_notblocked++; in rcu_initiate_boost_trace()
958 else if (rnp->gp_tasks != NULL && in rcu_initiate_boost_trace()
959 ULONG_CMP_LT(jiffies, rnp->boost_time)) in rcu_initiate_boost_trace()
960 rnp->n_balk_notyet++; in rcu_initiate_boost_trace()
962 rnp->n_balk_nos++; in rcu_initiate_boost_trace()
967 static void rcu_initiate_boost_trace(struct rcu_node *rnp) in rcu_initiate_boost_trace() argument
991 static int rcu_boost(struct rcu_node *rnp) in rcu_boost() argument
997 if (ACCESS_ONCE(rnp->exp_tasks) == NULL && in rcu_boost()
998 ACCESS_ONCE(rnp->boost_tasks) == NULL) in rcu_boost()
1001 raw_spin_lock_irqsave(&rnp->lock, flags); in rcu_boost()
1008 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) { in rcu_boost()
1009 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_boost()
1019 if (rnp->exp_tasks != NULL) { in rcu_boost()
1020 tb = rnp->exp_tasks; in rcu_boost()
1021 rnp->n_exp_boosts++; in rcu_boost()
1023 tb = rnp->boost_tasks; in rcu_boost()
1024 rnp->n_normal_boosts++; in rcu_boost()
1026 rnp->n_tasks_boosted++; in rcu_boost()
1045 rt_mutex_init_proxy_locked(&rnp->boost_mtx, t); in rcu_boost()
1046 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_boost()
1048 rt_mutex_lock(&rnp->boost_mtx); in rcu_boost()
1049 rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */ in rcu_boost()
1051 return ACCESS_ONCE(rnp->exp_tasks) != NULL || in rcu_boost()
1052 ACCESS_ONCE(rnp->boost_tasks) != NULL; in rcu_boost()
1061 struct rcu_node *rnp = (struct rcu_node *)arg; in rcu_boost_kthread() local
1067 rnp->boost_kthread_status = RCU_KTHREAD_WAITING; in rcu_boost_kthread()
1069 rcu_wait(rnp->boost_tasks || rnp->exp_tasks); in rcu_boost_kthread()
1071 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING; in rcu_boost_kthread()
1072 more2boost = rcu_boost(rnp); in rcu_boost_kthread()
1078 rnp->boost_kthread_status = RCU_KTHREAD_YIELDING; in rcu_boost_kthread()
1100 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) in rcu_initiate_boost() argument
1101 __releases(rnp->lock) in rcu_initiate_boost()
1105 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) { in rcu_initiate_boost()
1106 rnp->n_balk_exp_gp_tasks++; in rcu_initiate_boost()
1107 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_initiate_boost()
1110 if (rnp->exp_tasks != NULL || in rcu_initiate_boost()
1111 (rnp->gp_tasks != NULL && in rcu_initiate_boost()
1112 rnp->boost_tasks == NULL && in rcu_initiate_boost()
1113 rnp->qsmask == 0 && in rcu_initiate_boost()
1114 ULONG_CMP_GE(jiffies, rnp->boost_time))) { in rcu_initiate_boost()
1115 if (rnp->exp_tasks == NULL) in rcu_initiate_boost()
1116 rnp->boost_tasks = rnp->gp_tasks; in rcu_initiate_boost()
1117 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_initiate_boost()
1118 t = rnp->boost_kthread_task; in rcu_initiate_boost()
1120 rcu_wake_cond(t, rnp->boost_kthread_status); in rcu_initiate_boost()
1122 rcu_initiate_boost_trace(rnp); in rcu_initiate_boost()
1123 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_initiate_boost()
1158 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) in rcu_preempt_boost_start_gp() argument
1160 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES; in rcu_preempt_boost_start_gp()
1169 struct rcu_node *rnp) in rcu_spawn_one_boost_kthread() argument
1171 int rnp_index = rnp - &rsp->node[0]; in rcu_spawn_one_boost_kthread()
1179 if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0) in rcu_spawn_one_boost_kthread()
1183 if (rnp->boost_kthread_task != NULL) in rcu_spawn_one_boost_kthread()
1185 t = kthread_create(rcu_boost_kthread, (void *)rnp, in rcu_spawn_one_boost_kthread()
1189 raw_spin_lock_irqsave(&rnp->lock, flags); in rcu_spawn_one_boost_kthread()
1191 rnp->boost_kthread_task = t; in rcu_spawn_one_boost_kthread()
1192 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_spawn_one_boost_kthread()
1269 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) in rcu_boost_kthread_setaffinity() argument
1271 struct task_struct *t = rnp->boost_kthread_task; in rcu_boost_kthread_setaffinity()
1272 unsigned long mask = rcu_rnp_online_cpus(rnp); in rcu_boost_kthread_setaffinity()
1280 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) in rcu_boost_kthread_setaffinity()
1303 struct rcu_node *rnp; in rcu_spawn_boost_kthreads() local
1309 rcu_for_each_leaf_node(rcu_state_p, rnp) in rcu_spawn_boost_kthreads()
1310 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); in rcu_spawn_boost_kthreads()
1316 struct rcu_node *rnp = rdp->mynode; in rcu_prepare_kthreads() local
1320 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); in rcu_prepare_kthreads()
1325 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) in rcu_initiate_boost() argument
1326 __releases(rnp->lock) in rcu_initiate_boost()
1328 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_initiate_boost()
1341 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) in rcu_preempt_boost_start_gp() argument
1345 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) in rcu_boost_kthread_setaffinity() argument
1447 struct rcu_node *rnp; in rcu_try_advance_all_cbs() local
1457 rnp = rdp->mynode; in rcu_try_advance_all_cbs()
1464 if ((rdp->completed != rnp->completed || in rcu_try_advance_all_cbs()
1532 struct rcu_node *rnp; in rcu_prepare_for_idle() local
1575 rnp = rdp->mynode; in rcu_prepare_for_idle()
1576 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ in rcu_prepare_for_idle()
1578 needwake = rcu_accelerate_cbs(rsp, rnp, rdp); in rcu_prepare_for_idle()
1579 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ in rcu_prepare_for_idle()
1860 static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) in rcu_nocb_gp_cleanup() argument
1862 wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]); in rcu_nocb_gp_cleanup()
1873 static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq) in rcu_nocb_gp_set() argument
1875 rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq; in rcu_nocb_gp_set()
1878 static void rcu_init_one_nocb(struct rcu_node *rnp) in rcu_init_one_nocb() argument
1880 init_waitqueue_head(&rnp->nocb_gp_wq[0]); in rcu_init_one_nocb()
1881 init_waitqueue_head(&rnp->nocb_gp_wq[1]); in rcu_init_one_nocb()
2104 struct rcu_node *rnp = rdp->mynode; in rcu_nocb_wait_gp() local
2106 raw_spin_lock_irqsave(&rnp->lock, flags); in rcu_nocb_wait_gp()
2108 needwake = rcu_start_future_gp(rnp, rdp, &c); in rcu_nocb_wait_gp()
2109 raw_spin_unlock_irqrestore(&rnp->lock, flags); in rcu_nocb_wait_gp()
2117 trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait")); in rcu_nocb_wait_gp()
2120 rnp->nocb_gp_wq[c & 0x1], in rcu_nocb_wait_gp()
2121 (d = ULONG_CMP_GE(ACCESS_ONCE(rnp->completed), c))); in rcu_nocb_wait_gp()
2125 trace_rcu_future_gp(rnp, rdp, c, TPS("ResumeWait")); in rcu_nocb_wait_gp()
2127 trace_rcu_future_gp(rnp, rdp, c, TPS("EndWait")); in rcu_nocb_wait_gp()
2552 static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) in rcu_nocb_gp_cleanup() argument
2556 static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq) in rcu_nocb_gp_set() argument
2560 static void rcu_init_one_nocb(struct rcu_node *rnp) in rcu_init_one_nocb() argument