1/*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, you can access it online at
17 * http://www.gnu.org/licenses/gpl-2.0.html.
18 *
19 * Copyright IBM Corporation, 2008
20 *
21 * Author: Ingo Molnar <mingo@elte.hu>
22 *	   Paul E. McKenney <paulmck@linux.vnet.ibm.com>
23 */
24
25#include <linux/cache.h>
26#include <linux/spinlock.h>
27#include <linux/threads.h>
28#include <linux/cpumask.h>
29#include <linux/seqlock.h>
30
31/*
32 * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
33 * CONFIG_RCU_FANOUT_LEAF.
34 * In theory, it should be possible to add more levels straightforwardly.
35 * In practice, this did work well going from three levels to four.
36 * Of course, your mileage may vary.
37 */
38#define MAX_RCU_LVLS 4
39#define RCU_FANOUT_1	      (CONFIG_RCU_FANOUT_LEAF)
40#define RCU_FANOUT_2	      (RCU_FANOUT_1 * CONFIG_RCU_FANOUT)
41#define RCU_FANOUT_3	      (RCU_FANOUT_2 * CONFIG_RCU_FANOUT)
42#define RCU_FANOUT_4	      (RCU_FANOUT_3 * CONFIG_RCU_FANOUT)
43
44#if NR_CPUS <= RCU_FANOUT_1
45#  define RCU_NUM_LVLS	      1
46#  define NUM_RCU_LVL_0	      1
47#  define NUM_RCU_LVL_1	      (NR_CPUS)
48#  define NUM_RCU_LVL_2	      0
49#  define NUM_RCU_LVL_3	      0
50#  define NUM_RCU_LVL_4	      0
51#elif NR_CPUS <= RCU_FANOUT_2
52#  define RCU_NUM_LVLS	      2
53#  define NUM_RCU_LVL_0	      1
54#  define NUM_RCU_LVL_1	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
55#  define NUM_RCU_LVL_2	      (NR_CPUS)
56#  define NUM_RCU_LVL_3	      0
57#  define NUM_RCU_LVL_4	      0
58#elif NR_CPUS <= RCU_FANOUT_3
59#  define RCU_NUM_LVLS	      3
60#  define NUM_RCU_LVL_0	      1
61#  define NUM_RCU_LVL_1	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
62#  define NUM_RCU_LVL_2	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
63#  define NUM_RCU_LVL_3	      (NR_CPUS)
64#  define NUM_RCU_LVL_4	      0
65#elif NR_CPUS <= RCU_FANOUT_4
66#  define RCU_NUM_LVLS	      4
67#  define NUM_RCU_LVL_0	      1
68#  define NUM_RCU_LVL_1	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3)
69#  define NUM_RCU_LVL_2	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
70#  define NUM_RCU_LVL_3	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
71#  define NUM_RCU_LVL_4	      (NR_CPUS)
72#else
73# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
74#endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */
75
76#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3 + NUM_RCU_LVL_4)
77#define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
78
79extern int rcu_num_lvls;
80extern int rcu_num_nodes;
81
82/*
83 * Dynticks per-CPU state.
84 */
85struct rcu_dynticks {
86	long long dynticks_nesting; /* Track irq/process nesting level. */
87				    /* Process level is worth LLONG_MAX/2. */
88	int dynticks_nmi_nesting;   /* Track NMI nesting level. */
89	atomic_t dynticks;	    /* Even value for idle, else odd. */
90#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
91	long long dynticks_idle_nesting;
92				    /* irq/process nesting level from idle. */
93	atomic_t dynticks_idle;	    /* Even value for idle, else odd. */
94				    /*  "Idle" excludes userspace execution. */
95	unsigned long dynticks_idle_jiffies;
96				    /* End of last non-NMI non-idle period. */
97#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
98#ifdef CONFIG_RCU_FAST_NO_HZ
99	bool all_lazy;		    /* Are all CPU's CBs lazy? */
100	unsigned long nonlazy_posted;
101				    /* # times non-lazy CBs posted to CPU. */
102	unsigned long nonlazy_posted_snap;
103				    /* idle-period nonlazy_posted snapshot. */
104	unsigned long last_accelerate;
105				    /* Last jiffy CBs were accelerated. */
106	unsigned long last_advance_all;
107				    /* Last jiffy CBs were all advanced. */
108	int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
109#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
110};
111
112/* RCU's kthread states for tracing. */
113#define RCU_KTHREAD_STOPPED  0
114#define RCU_KTHREAD_RUNNING  1
115#define RCU_KTHREAD_WAITING  2
116#define RCU_KTHREAD_OFFCPU   3
117#define RCU_KTHREAD_YIELDING 4
118#define RCU_KTHREAD_MAX      4
119
120/*
121 * Definition for node within the RCU grace-period-detection hierarchy.
122 */
123struct rcu_node {
124	raw_spinlock_t lock;	/* Root rcu_node's lock protects some */
125				/*  rcu_state fields as well as following. */
126	unsigned long gpnum;	/* Current grace period for this node. */
127				/*  This will either be equal to or one */
128				/*  behind the root rcu_node's gpnum. */
129	unsigned long completed; /* Last GP completed for this node. */
130				/*  This will either be equal to or one */
131				/*  behind the root rcu_node's gpnum. */
132	unsigned long qsmask;	/* CPUs or groups that need to switch in */
133				/*  order for current grace period to proceed.*/
134				/*  In leaf rcu_node, each bit corresponds to */
135				/*  an rcu_data structure, otherwise, each */
136				/*  bit corresponds to a child rcu_node */
137				/*  structure. */
138	unsigned long expmask;	/* Groups that have ->blkd_tasks */
139				/*  elements that need to drain to allow the */
140				/*  current expedited grace period to */
141				/*  complete (only for PREEMPT_RCU). */
142	unsigned long qsmaskinit;
143				/* Per-GP initial value for qsmask & expmask. */
144				/*  Initialized from ->qsmaskinitnext at the */
145				/*  beginning of each grace period. */
146	unsigned long qsmaskinitnext;
147				/* Online CPUs for next grace period. */
148	unsigned long grpmask;	/* Mask to apply to parent qsmask. */
149				/*  Only one bit will be set in this mask. */
150	int	grplo;		/* lowest-numbered CPU or group here. */
151	int	grphi;		/* highest-numbered CPU or group here. */
152	u8	grpnum;		/* CPU/group number for next level up. */
153	u8	level;		/* root is at level 0. */
154	bool	wait_blkd_tasks;/* Necessary to wait for blocked tasks to */
155				/*  exit RCU read-side critical sections */
156				/*  before propagating offline up the */
157				/*  rcu_node tree? */
158	struct rcu_node *parent;
159	struct list_head blkd_tasks;
160				/* Tasks blocked in RCU read-side critical */
161				/*  section.  Tasks are placed at the head */
162				/*  of this list and age towards the tail. */
163	struct list_head *gp_tasks;
164				/* Pointer to the first task blocking the */
165				/*  current grace period, or NULL if there */
166				/*  is no such task. */
167	struct list_head *exp_tasks;
168				/* Pointer to the first task blocking the */
169				/*  current expedited grace period, or NULL */
170				/*  if there is no such task.  If there */
171				/*  is no current expedited grace period, */
172				/*  then there can cannot be any such task. */
173#ifdef CONFIG_RCU_BOOST
174	struct list_head *boost_tasks;
175				/* Pointer to first task that needs to be */
176				/*  priority boosted, or NULL if no priority */
177				/*  boosting is needed for this rcu_node */
178				/*  structure.  If there are no tasks */
179				/*  queued on this rcu_node structure that */
180				/*  are blocking the current grace period, */
181				/*  there can be no such task. */
182	struct rt_mutex boost_mtx;
183				/* Used only for the priority-boosting */
184				/*  side effect, not as a lock. */
185	unsigned long boost_time;
186				/* When to start boosting (jiffies). */
187	struct task_struct *boost_kthread_task;
188				/* kthread that takes care of priority */
189				/*  boosting for this rcu_node structure. */
190	unsigned int boost_kthread_status;
191				/* State of boost_kthread_task for tracing. */
192	unsigned long n_tasks_boosted;
193				/* Total number of tasks boosted. */
194	unsigned long n_exp_boosts;
195				/* Number of tasks boosted for expedited GP. */
196	unsigned long n_normal_boosts;
197				/* Number of tasks boosted for normal GP. */
198	unsigned long n_balk_blkd_tasks;
199				/* Refused to boost: no blocked tasks. */
200	unsigned long n_balk_exp_gp_tasks;
201				/* Refused to boost: nothing blocking GP. */
202	unsigned long n_balk_boost_tasks;
203				/* Refused to boost: already boosting. */
204	unsigned long n_balk_notblocked;
205				/* Refused to boost: RCU RS CS still running. */
206	unsigned long n_balk_notyet;
207				/* Refused to boost: not yet time. */
208	unsigned long n_balk_nos;
209				/* Refused to boost: not sure why, though. */
210				/*  This can happen due to race conditions. */
211#endif /* #ifdef CONFIG_RCU_BOOST */
212#ifdef CONFIG_RCU_NOCB_CPU
213	wait_queue_head_t nocb_gp_wq[2];
214				/* Place for rcu_nocb_kthread() to wait GP. */
215#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
216	int need_future_gp[2];
217				/* Counts of upcoming no-CB GP requests. */
218	raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
219} ____cacheline_internodealigned_in_smp;
220
221/*
222 * Do a full breadth-first scan of the rcu_node structures for the
223 * specified rcu_state structure.
224 */
225#define rcu_for_each_node_breadth_first(rsp, rnp) \
226	for ((rnp) = &(rsp)->node[0]; \
227	     (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
228
229/*
230 * Do a breadth-first scan of the non-leaf rcu_node structures for the
231 * specified rcu_state structure.  Note that if there is a singleton
232 * rcu_node tree with but one rcu_node structure, this loop is a no-op.
233 */
234#define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
235	for ((rnp) = &(rsp)->node[0]; \
236	     (rnp) < (rsp)->level[rcu_num_lvls - 1]; (rnp)++)
237
238/*
239 * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
240 * structure.  Note that if there is a singleton rcu_node tree with but
241 * one rcu_node structure, this loop -will- visit the rcu_node structure.
242 * It is still a leaf node, even if it is also the root node.
243 */
244#define rcu_for_each_leaf_node(rsp, rnp) \
245	for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \
246	     (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
247
248/* Index values for nxttail array in struct rcu_data. */
249#define RCU_DONE_TAIL		0	/* Also RCU_WAIT head. */
250#define RCU_WAIT_TAIL		1	/* Also RCU_NEXT_READY head. */
251#define RCU_NEXT_READY_TAIL	2	/* Also RCU_NEXT head. */
252#define RCU_NEXT_TAIL		3
253#define RCU_NEXT_SIZE		4
254
255/* Per-CPU data for read-copy update. */
256struct rcu_data {
257	/* 1) quiescent-state and grace-period handling : */
258	unsigned long	completed;	/* Track rsp->completed gp number */
259					/*  in order to detect GP end. */
260	unsigned long	gpnum;		/* Highest gp number that this CPU */
261					/*  is aware of having started. */
262	unsigned long	rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */
263					/*  for rcu_all_qs() invocations. */
264	bool		passed_quiesce;	/* User-mode/idle loop etc. */
265	bool		qs_pending;	/* Core waits for quiesc state. */
266	bool		beenonline;	/* CPU online at least once. */
267	bool		gpwrap;		/* Possible gpnum/completed wrap. */
268	struct rcu_node *mynode;	/* This CPU's leaf of hierarchy */
269	unsigned long grpmask;		/* Mask to apply to leaf qsmask. */
270#ifdef CONFIG_RCU_CPU_STALL_INFO
271	unsigned long	ticks_this_gp;	/* The number of scheduling-clock */
272					/*  ticks this CPU has handled */
273					/*  during and after the last grace */
274					/* period it is aware of. */
275#endif /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
276
277	/* 2) batch handling */
278	/*
279	 * If nxtlist is not NULL, it is partitioned as follows.
280	 * Any of the partitions might be empty, in which case the
281	 * pointer to that partition will be equal to the pointer for
282	 * the following partition.  When the list is empty, all of
283	 * the nxttail elements point to the ->nxtlist pointer itself,
284	 * which in that case is NULL.
285	 *
286	 * [nxtlist, *nxttail[RCU_DONE_TAIL]):
287	 *	Entries that batch # <= ->completed
288	 *	The grace period for these entries has completed, and
289	 *	the other grace-period-completed entries may be moved
290	 *	here temporarily in rcu_process_callbacks().
291	 * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
292	 *	Entries that batch # <= ->completed - 1: waiting for current GP
293	 * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
294	 *	Entries known to have arrived before current GP ended
295	 * [*nxttail[RCU_NEXT_READY_TAIL], *nxttail[RCU_NEXT_TAIL]):
296	 *	Entries that might have arrived after current GP ended
297	 *	Note that the value of *nxttail[RCU_NEXT_TAIL] will
298	 *	always be NULL, as this is the end of the list.
299	 */
300	struct rcu_head *nxtlist;
301	struct rcu_head **nxttail[RCU_NEXT_SIZE];
302	unsigned long	nxtcompleted[RCU_NEXT_SIZE];
303					/* grace periods for sublists. */
304	long		qlen_lazy;	/* # of lazy queued callbacks */
305	long		qlen;		/* # of queued callbacks, incl lazy */
306	long		qlen_last_fqs_check;
307					/* qlen at last check for QS forcing */
308	unsigned long	n_cbs_invoked;	/* count of RCU cbs invoked. */
309	unsigned long	n_nocbs_invoked; /* count of no-CBs RCU cbs invoked. */
310	unsigned long   n_cbs_orphaned; /* RCU cbs orphaned by dying CPU */
311	unsigned long   n_cbs_adopted;  /* RCU cbs adopted from dying CPU */
312	unsigned long	n_force_qs_snap;
313					/* did other CPU force QS recently? */
314	long		blimit;		/* Upper limit on a processed batch */
315
316	/* 3) dynticks interface. */
317	struct rcu_dynticks *dynticks;	/* Shared per-CPU dynticks state. */
318	int dynticks_snap;		/* Per-GP tracking for dynticks. */
319
320	/* 4) reasons this CPU needed to be kicked by force_quiescent_state */
321	unsigned long dynticks_fqs;	/* Kicked due to dynticks idle. */
322	unsigned long offline_fqs;	/* Kicked due to being offline. */
323	unsigned long cond_resched_completed;
324					/* Grace period that needs help */
325					/*  from cond_resched(). */
326
327	/* 5) __rcu_pending() statistics. */
328	unsigned long n_rcu_pending;	/* rcu_pending() calls since boot. */
329	unsigned long n_rp_qs_pending;
330	unsigned long n_rp_report_qs;
331	unsigned long n_rp_cb_ready;
332	unsigned long n_rp_cpu_needs_gp;
333	unsigned long n_rp_gp_completed;
334	unsigned long n_rp_gp_started;
335	unsigned long n_rp_nocb_defer_wakeup;
336	unsigned long n_rp_need_nothing;
337
338	/* 6) _rcu_barrier() and OOM callbacks. */
339	struct rcu_head barrier_head;
340#ifdef CONFIG_RCU_FAST_NO_HZ
341	struct rcu_head oom_head;
342#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
343
344	/* 7) Callback offloading. */
345#ifdef CONFIG_RCU_NOCB_CPU
346	struct rcu_head *nocb_head;	/* CBs waiting for kthread. */
347	struct rcu_head **nocb_tail;
348	atomic_long_t nocb_q_count;	/* # CBs waiting for nocb */
349	atomic_long_t nocb_q_count_lazy; /*  invocation (all stages). */
350	struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */
351	struct rcu_head **nocb_follower_tail;
352	wait_queue_head_t nocb_wq;	/* For nocb kthreads to sleep on. */
353	struct task_struct *nocb_kthread;
354	int nocb_defer_wakeup;		/* Defer wakeup of nocb_kthread. */
355
356	/* The following fields are used by the leader, hence own cacheline. */
357	struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp;
358					/* CBs waiting for GP. */
359	struct rcu_head **nocb_gp_tail;
360	bool nocb_leader_sleep;		/* Is the nocb leader thread asleep? */
361	struct rcu_data *nocb_next_follower;
362					/* Next follower in wakeup chain. */
363
364	/* The following fields are used by the follower, hence new cachline. */
365	struct rcu_data *nocb_leader ____cacheline_internodealigned_in_smp;
366					/* Leader CPU takes GP-end wakeups. */
367#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
368
369	/* 8) RCU CPU stall data. */
370#ifdef CONFIG_RCU_CPU_STALL_INFO
371	unsigned int softirq_snap;	/* Snapshot of softirq activity. */
372#endif /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
373
374	int cpu;
375	struct rcu_state *rsp;
376};
377
378/* Values for fqs_state field in struct rcu_state. */
379#define RCU_GP_IDLE		0	/* No grace period in progress. */
380#define RCU_GP_INIT		1	/* Grace period being initialized. */
381#define RCU_SAVE_DYNTICK	2	/* Need to scan dyntick state. */
382#define RCU_FORCE_QS		3	/* Need to force quiescent state. */
383#define RCU_SIGNAL_INIT		RCU_SAVE_DYNTICK
384
385/* Values for nocb_defer_wakeup field in struct rcu_data. */
386#define RCU_NOGP_WAKE_NOT	0
387#define RCU_NOGP_WAKE		1
388#define RCU_NOGP_WAKE_FORCE	2
389
390#define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500))
391					/* For jiffies_till_first_fqs and */
392					/*  and jiffies_till_next_fqs. */
393
394#define RCU_JIFFIES_FQS_DIV	256	/* Very large systems need more */
395					/*  delay between bouts of */
396					/*  quiescent-state forcing. */
397
398#define RCU_STALL_RAT_DELAY	2	/* Allow other CPUs time to take */
399					/*  at least one scheduling clock */
400					/*  irq before ratting on them. */
401
402#define rcu_wait(cond)							\
403do {									\
404	for (;;) {							\
405		set_current_state(TASK_INTERRUPTIBLE);			\
406		if (cond)						\
407			break;						\
408		schedule();						\
409	}								\
410	__set_current_state(TASK_RUNNING);				\
411} while (0)
412
413/*
414 * RCU global state, including node hierarchy.  This hierarchy is
415 * represented in "heap" form in a dense array.  The root (first level)
416 * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
417 * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
418 * and the third level in ->node[m+1] and following (->node[m+1] referenced
419 * by ->level[2]).  The number of levels is determined by the number of
420 * CPUs and by CONFIG_RCU_FANOUT.  Small systems will have a "hierarchy"
421 * consisting of a single rcu_node.
422 */
423struct rcu_state {
424	struct rcu_node node[NUM_RCU_NODES];	/* Hierarchy. */
425	struct rcu_node *level[RCU_NUM_LVLS];	/* Hierarchy levels. */
426	u32 levelcnt[MAX_RCU_LVLS + 1];		/* # nodes in each level. */
427	u8 levelspread[RCU_NUM_LVLS];		/* kids/node in each level. */
428	u8 flavor_mask;				/* bit in flavor mask. */
429	struct rcu_data __percpu *rda;		/* pointer of percu rcu_data. */
430	void (*call)(struct rcu_head *head,	/* call_rcu() flavor. */
431		     void (*func)(struct rcu_head *head));
432
433	/* The following fields are guarded by the root rcu_node's lock. */
434
435	u8	fqs_state ____cacheline_internodealigned_in_smp;
436						/* Force QS state. */
437	u8	boost;				/* Subject to priority boost. */
438	unsigned long gpnum;			/* Current gp number. */
439	unsigned long completed;		/* # of last completed gp. */
440	struct task_struct *gp_kthread;		/* Task for grace periods. */
441	wait_queue_head_t gp_wq;		/* Where GP task waits. */
442	short gp_flags;				/* Commands for GP task. */
443	short gp_state;				/* GP kthread sleep state. */
444
445	/* End of fields guarded by root rcu_node's lock. */
446
447	raw_spinlock_t orphan_lock ____cacheline_internodealigned_in_smp;
448						/* Protect following fields. */
449	struct rcu_head *orphan_nxtlist;	/* Orphaned callbacks that */
450						/*  need a grace period. */
451	struct rcu_head **orphan_nxttail;	/* Tail of above. */
452	struct rcu_head *orphan_donelist;	/* Orphaned callbacks that */
453						/*  are ready to invoke. */
454	struct rcu_head **orphan_donetail;	/* Tail of above. */
455	long qlen_lazy;				/* Number of lazy callbacks. */
456	long qlen;				/* Total number of callbacks. */
457	/* End of fields guarded by orphan_lock. */
458
459	struct mutex barrier_mutex;		/* Guards barrier fields. */
460	atomic_t barrier_cpu_count;		/* # CPUs waiting on. */
461	struct completion barrier_completion;	/* Wake at barrier end. */
462	unsigned long n_barrier_done;		/* ++ at start and end of */
463						/*  _rcu_barrier(). */
464	/* End of fields guarded by barrier_mutex. */
465
466	atomic_long_t expedited_start;		/* Starting ticket. */
467	atomic_long_t expedited_done;		/* Done ticket. */
468	atomic_long_t expedited_wrap;		/* # near-wrap incidents. */
469	atomic_long_t expedited_tryfail;	/* # acquisition failures. */
470	atomic_long_t expedited_workdone1;	/* # done by others #1. */
471	atomic_long_t expedited_workdone2;	/* # done by others #2. */
472	atomic_long_t expedited_normal;		/* # fallbacks to normal. */
473	atomic_long_t expedited_stoppedcpus;	/* # successful stop_cpus. */
474	atomic_long_t expedited_done_tries;	/* # tries to update _done. */
475	atomic_long_t expedited_done_lost;	/* # times beaten to _done. */
476	atomic_long_t expedited_done_exit;	/* # times exited _done loop. */
477
478	unsigned long jiffies_force_qs;		/* Time at which to invoke */
479						/*  force_quiescent_state(). */
480	unsigned long n_force_qs;		/* Number of calls to */
481						/*  force_quiescent_state(). */
482	unsigned long n_force_qs_lh;		/* ~Number of calls leaving */
483						/*  due to lock unavailable. */
484	unsigned long n_force_qs_ngp;		/* Number of calls leaving */
485						/*  due to no GP active. */
486	unsigned long gp_start;			/* Time at which GP started, */
487						/*  but in jiffies. */
488	unsigned long gp_activity;		/* Time of last GP kthread */
489						/*  activity in jiffies. */
490	unsigned long jiffies_stall;		/* Time at which to check */
491						/*  for CPU stalls. */
492	unsigned long jiffies_resched;		/* Time at which to resched */
493						/*  a reluctant CPU. */
494	unsigned long n_force_qs_gpstart;	/* Snapshot of n_force_qs at */
495						/*  GP start. */
496	unsigned long gp_max;			/* Maximum GP duration in */
497						/*  jiffies. */
498	const char *name;			/* Name of structure. */
499	char abbr;				/* Abbreviated name. */
500	struct list_head flavors;		/* List of RCU flavors. */
501};
502
503/* Values for rcu_state structure's gp_flags field. */
504#define RCU_GP_FLAG_INIT 0x1	/* Need grace-period initialization. */
505#define RCU_GP_FLAG_FQS  0x2	/* Need grace-period quiescent-state forcing. */
506
507/* Values for rcu_state structure's gp_flags field. */
508#define RCU_GP_WAIT_INIT 0	/* Initial state. */
509#define RCU_GP_WAIT_GPS  1	/* Wait for grace-period start. */
510#define RCU_GP_WAIT_FQS  2	/* Wait for force-quiescent-state time. */
511
512extern struct list_head rcu_struct_flavors;
513
514/* Sequence through rcu_state structures for each RCU flavor. */
515#define for_each_rcu_flavor(rsp) \
516	list_for_each_entry((rsp), &rcu_struct_flavors, flavors)
517
518/*
519 * RCU implementation internal declarations:
520 */
521extern struct rcu_state rcu_sched_state;
522DECLARE_PER_CPU(struct rcu_data, rcu_sched_data);
523
524extern struct rcu_state rcu_bh_state;
525DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
526
527#ifdef CONFIG_PREEMPT_RCU
528extern struct rcu_state rcu_preempt_state;
529DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data);
530#endif /* #ifdef CONFIG_PREEMPT_RCU */
531
532#ifdef CONFIG_RCU_BOOST
533DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
534DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu);
535DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
536DECLARE_PER_CPU(char, rcu_cpu_has_work);
537#endif /* #ifdef CONFIG_RCU_BOOST */
538
539#ifndef RCU_TREE_NONCORE
540
541/* Forward declarations for rcutree_plugin.h */
542static void rcu_bootup_announce(void);
543static void rcu_preempt_note_context_switch(void);
544static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
545#ifdef CONFIG_HOTPLUG_CPU
546static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
547#endif /* #ifdef CONFIG_HOTPLUG_CPU */
548static void rcu_print_detail_task_stall(struct rcu_state *rsp);
549static int rcu_print_task_stall(struct rcu_node *rnp);
550static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
551static void rcu_preempt_check_callbacks(void);
552void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
553static void __init __rcu_init_preempt(void);
554static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
555static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
556static void invoke_rcu_callbacks_kthread(void);
557static bool rcu_is_callbacks_kthread(void);
558#ifdef CONFIG_RCU_BOOST
559static void rcu_preempt_do_callbacks(void);
560static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
561						 struct rcu_node *rnp);
562#endif /* #ifdef CONFIG_RCU_BOOST */
563static void __init rcu_spawn_boost_kthreads(void);
564static void rcu_prepare_kthreads(int cpu);
565static void rcu_cleanup_after_idle(void);
566static void rcu_prepare_for_idle(void);
567static void rcu_idle_count_callbacks_posted(void);
568static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
569static void print_cpu_stall_info_begin(void);
570static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
571static void print_cpu_stall_info_end(void);
572static void zero_cpu_stall_ticks(struct rcu_data *rdp);
573static void increment_cpu_stall_ticks(void);
574static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu);
575static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq);
576static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp);
577static void rcu_init_one_nocb(struct rcu_node *rnp);
578static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
579			    bool lazy, unsigned long flags);
580static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
581				      struct rcu_data *rdp,
582				      unsigned long flags);
583static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
584static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
585static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
586static void rcu_spawn_all_nocb_kthreads(int cpu);
587static void __init rcu_spawn_nocb_kthreads(void);
588#ifdef CONFIG_RCU_NOCB_CPU
589static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp);
590#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
591static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
592static bool init_nocb_callback_list(struct rcu_data *rdp);
593static void rcu_sysidle_enter(int irq);
594static void rcu_sysidle_exit(int irq);
595static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
596				  unsigned long *maxj);
597static bool is_sysidle_rcu_state(struct rcu_state *rsp);
598static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
599				  unsigned long maxj);
600static void rcu_bind_gp_kthread(void);
601static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp);
602static bool rcu_nohz_full_cpu(struct rcu_state *rsp);
603static void rcu_dynticks_task_enter(void);
604static void rcu_dynticks_task_exit(void);
605
606#endif /* #ifndef RCU_TREE_NONCORE */
607
608#ifdef CONFIG_RCU_TRACE
609/* Read out queue lengths for tracing. */
610static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
611{
612#ifdef CONFIG_RCU_NOCB_CPU
613	*ql = atomic_long_read(&rdp->nocb_q_count);
614	*qll = atomic_long_read(&rdp->nocb_q_count_lazy);
615#else /* #ifdef CONFIG_RCU_NOCB_CPU */
616	*ql = 0;
617	*qll = 0;
618#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
619}
620#endif /* #ifdef CONFIG_RCU_TRACE */
621