1/* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
10#include <linux/sched.h>
11#include <linux/unistd.h>
12#include <linux/cpu.h>
13#include <linux/oom.h>
14#include <linux/rcupdate.h>
15#include <linux/export.h>
16#include <linux/bug.h>
17#include <linux/kthread.h>
18#include <linux/stop_machine.h>
19#include <linux/mutex.h>
20#include <linux/gfp.h>
21#include <linux/suspend.h>
22#include <linux/lockdep.h>
23#include <linux/tick.h>
24#include <trace/events/power.h>
25
26#include "smpboot.h"
27
28#ifdef CONFIG_SMP
29/* Serializes the updates to cpu_online_mask, cpu_present_mask */
30static DEFINE_MUTEX(cpu_add_remove_lock);
31
32/*
33 * The following two APIs (cpu_maps_update_begin/done) must be used when
34 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
35 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
36 * hotplug callback (un)registration performed using __register_cpu_notifier()
37 * or __unregister_cpu_notifier().
38 */
39void cpu_maps_update_begin(void)
40{
41	mutex_lock(&cpu_add_remove_lock);
42}
43EXPORT_SYMBOL(cpu_notifier_register_begin);
44
45void cpu_maps_update_done(void)
46{
47	mutex_unlock(&cpu_add_remove_lock);
48}
49EXPORT_SYMBOL(cpu_notifier_register_done);
50
51static RAW_NOTIFIER_HEAD(cpu_chain);
52
53/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
54 * Should always be manipulated under cpu_add_remove_lock
55 */
56static int cpu_hotplug_disabled;
57
58#ifdef CONFIG_HOTPLUG_CPU
59
60static struct {
61	struct task_struct *active_writer;
62	/* wait queue to wake up the active_writer */
63	wait_queue_head_t wq;
64	/* verifies that no writer will get active while readers are active */
65	struct mutex lock;
66	/*
67	 * Also blocks the new readers during
68	 * an ongoing cpu hotplug operation.
69	 */
70	atomic_t refcount;
71
72#ifdef CONFIG_DEBUG_LOCK_ALLOC
73	struct lockdep_map dep_map;
74#endif
75} cpu_hotplug = {
76	.active_writer = NULL,
77	.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
78	.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
79#ifdef CONFIG_DEBUG_LOCK_ALLOC
80	.dep_map = {.name = "cpu_hotplug.lock" },
81#endif
82};
83
84/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
85#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
86#define cpuhp_lock_acquire_tryread() \
87				  lock_map_acquire_tryread(&cpu_hotplug.dep_map)
88#define cpuhp_lock_acquire()      lock_map_acquire(&cpu_hotplug.dep_map)
89#define cpuhp_lock_release()      lock_map_release(&cpu_hotplug.dep_map)
90
91
92void get_online_cpus(void)
93{
94	might_sleep();
95	if (cpu_hotplug.active_writer == current)
96		return;
97	cpuhp_lock_acquire_read();
98	mutex_lock(&cpu_hotplug.lock);
99	atomic_inc(&cpu_hotplug.refcount);
100	mutex_unlock(&cpu_hotplug.lock);
101}
102EXPORT_SYMBOL_GPL(get_online_cpus);
103
104bool try_get_online_cpus(void)
105{
106	if (cpu_hotplug.active_writer == current)
107		return true;
108	if (!mutex_trylock(&cpu_hotplug.lock))
109		return false;
110	cpuhp_lock_acquire_tryread();
111	atomic_inc(&cpu_hotplug.refcount);
112	mutex_unlock(&cpu_hotplug.lock);
113	return true;
114}
115EXPORT_SYMBOL_GPL(try_get_online_cpus);
116
117void put_online_cpus(void)
118{
119	int refcount;
120
121	if (cpu_hotplug.active_writer == current)
122		return;
123
124	refcount = atomic_dec_return(&cpu_hotplug.refcount);
125	if (WARN_ON(refcount < 0)) /* try to fix things up */
126		atomic_inc(&cpu_hotplug.refcount);
127
128	if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
129		wake_up(&cpu_hotplug.wq);
130
131	cpuhp_lock_release();
132
133}
134EXPORT_SYMBOL_GPL(put_online_cpus);
135
136/*
137 * This ensures that the hotplug operation can begin only when the
138 * refcount goes to zero.
139 *
140 * Note that during a cpu-hotplug operation, the new readers, if any,
141 * will be blocked by the cpu_hotplug.lock
142 *
143 * Since cpu_hotplug_begin() is always called after invoking
144 * cpu_maps_update_begin(), we can be sure that only one writer is active.
145 *
146 * Note that theoretically, there is a possibility of a livelock:
147 * - Refcount goes to zero, last reader wakes up the sleeping
148 *   writer.
149 * - Last reader unlocks the cpu_hotplug.lock.
150 * - A new reader arrives at this moment, bumps up the refcount.
151 * - The writer acquires the cpu_hotplug.lock finds the refcount
152 *   non zero and goes to sleep again.
153 *
154 * However, this is very difficult to achieve in practice since
155 * get_online_cpus() not an api which is called all that often.
156 *
157 */
158void cpu_hotplug_begin(void)
159{
160	DEFINE_WAIT(wait);
161
162	cpu_hotplug.active_writer = current;
163	cpuhp_lock_acquire();
164
165	for (;;) {
166		mutex_lock(&cpu_hotplug.lock);
167		prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
168		if (likely(!atomic_read(&cpu_hotplug.refcount)))
169				break;
170		mutex_unlock(&cpu_hotplug.lock);
171		schedule();
172	}
173	finish_wait(&cpu_hotplug.wq, &wait);
174}
175
176void cpu_hotplug_done(void)
177{
178	cpu_hotplug.active_writer = NULL;
179	mutex_unlock(&cpu_hotplug.lock);
180	cpuhp_lock_release();
181}
182
183/*
184 * Wait for currently running CPU hotplug operations to complete (if any) and
185 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
186 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
187 * hotplug path before performing hotplug operations. So acquiring that lock
188 * guarantees mutual exclusion from any currently running hotplug operations.
189 */
190void cpu_hotplug_disable(void)
191{
192	cpu_maps_update_begin();
193	cpu_hotplug_disabled = 1;
194	cpu_maps_update_done();
195}
196
197void cpu_hotplug_enable(void)
198{
199	cpu_maps_update_begin();
200	cpu_hotplug_disabled = 0;
201	cpu_maps_update_done();
202}
203
204#endif	/* CONFIG_HOTPLUG_CPU */
205
206/* Need to know about CPUs going up/down? */
207int __ref register_cpu_notifier(struct notifier_block *nb)
208{
209	int ret;
210	cpu_maps_update_begin();
211	ret = raw_notifier_chain_register(&cpu_chain, nb);
212	cpu_maps_update_done();
213	return ret;
214}
215
216int __ref __register_cpu_notifier(struct notifier_block *nb)
217{
218	return raw_notifier_chain_register(&cpu_chain, nb);
219}
220
221static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
222			int *nr_calls)
223{
224	int ret;
225
226	ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
227					nr_calls);
228
229	return notifier_to_errno(ret);
230}
231
232static int cpu_notify(unsigned long val, void *v)
233{
234	return __cpu_notify(val, v, -1, NULL);
235}
236
237#ifdef CONFIG_HOTPLUG_CPU
238
239static void cpu_notify_nofail(unsigned long val, void *v)
240{
241	BUG_ON(cpu_notify(val, v));
242}
243EXPORT_SYMBOL(register_cpu_notifier);
244EXPORT_SYMBOL(__register_cpu_notifier);
245
246void __ref unregister_cpu_notifier(struct notifier_block *nb)
247{
248	cpu_maps_update_begin();
249	raw_notifier_chain_unregister(&cpu_chain, nb);
250	cpu_maps_update_done();
251}
252EXPORT_SYMBOL(unregister_cpu_notifier);
253
254void __ref __unregister_cpu_notifier(struct notifier_block *nb)
255{
256	raw_notifier_chain_unregister(&cpu_chain, nb);
257}
258EXPORT_SYMBOL(__unregister_cpu_notifier);
259
260/**
261 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
262 * @cpu: a CPU id
263 *
264 * This function walks all processes, finds a valid mm struct for each one and
265 * then clears a corresponding bit in mm's cpumask.  While this all sounds
266 * trivial, there are various non-obvious corner cases, which this function
267 * tries to solve in a safe manner.
268 *
269 * Also note that the function uses a somewhat relaxed locking scheme, so it may
270 * be called only for an already offlined CPU.
271 */
272void clear_tasks_mm_cpumask(int cpu)
273{
274	struct task_struct *p;
275
276	/*
277	 * This function is called after the cpu is taken down and marked
278	 * offline, so its not like new tasks will ever get this cpu set in
279	 * their mm mask. -- Peter Zijlstra
280	 * Thus, we may use rcu_read_lock() here, instead of grabbing
281	 * full-fledged tasklist_lock.
282	 */
283	WARN_ON(cpu_online(cpu));
284	rcu_read_lock();
285	for_each_process(p) {
286		struct task_struct *t;
287
288		/*
289		 * Main thread might exit, but other threads may still have
290		 * a valid mm. Find one.
291		 */
292		t = find_lock_task_mm(p);
293		if (!t)
294			continue;
295		cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
296		task_unlock(t);
297	}
298	rcu_read_unlock();
299}
300
301static inline void check_for_tasks(int dead_cpu)
302{
303	struct task_struct *g, *p;
304
305	read_lock_irq(&tasklist_lock);
306	do_each_thread(g, p) {
307		if (!p->on_rq)
308			continue;
309		/*
310		 * We do the check with unlocked task_rq(p)->lock.
311		 * Order the reading to do not warn about a task,
312		 * which was running on this cpu in the past, and
313		 * it's just been woken on another cpu.
314		 */
315		rmb();
316		if (task_cpu(p) != dead_cpu)
317			continue;
318
319		pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
320			p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
321	} while_each_thread(g, p);
322	read_unlock_irq(&tasklist_lock);
323}
324
325struct take_cpu_down_param {
326	unsigned long mod;
327	void *hcpu;
328};
329
330/* Take this CPU down. */
331static int __ref take_cpu_down(void *_param)
332{
333	struct take_cpu_down_param *param = _param;
334	int err;
335
336	/* Ensure this CPU doesn't handle any more interrupts. */
337	err = __cpu_disable();
338	if (err < 0)
339		return err;
340
341	cpu_notify(CPU_DYING | param->mod, param->hcpu);
342	/* Give up timekeeping duties */
343	tick_handover_do_timer();
344	/* Park the stopper thread */
345	kthread_park(current);
346	return 0;
347}
348
349/* Requires cpu_add_remove_lock to be held */
350static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
351{
352	int err, nr_calls = 0;
353	void *hcpu = (void *)(long)cpu;
354	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
355	struct take_cpu_down_param tcd_param = {
356		.mod = mod,
357		.hcpu = hcpu,
358	};
359
360	if (num_online_cpus() == 1)
361		return -EBUSY;
362
363	if (!cpu_online(cpu))
364		return -EINVAL;
365
366	cpu_hotplug_begin();
367
368	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
369	if (err) {
370		nr_calls--;
371		__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
372		pr_warn("%s: attempt to take down CPU %u failed\n",
373			__func__, cpu);
374		goto out_release;
375	}
376
377	/*
378	 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
379	 * and RCU users of this state to go away such that all new such users
380	 * will observe it.
381	 *
382	 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
383	 * not imply sync_sched(), so explicitly call both.
384	 *
385	 * Do sync before park smpboot threads to take care the rcu boost case.
386	 */
387#ifdef CONFIG_PREEMPT
388	synchronize_sched();
389#endif
390	synchronize_rcu();
391
392	smpboot_park_threads(cpu);
393
394	/*
395	 * So now all preempt/rcu users must observe !cpu_active().
396	 */
397
398	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
399	if (err) {
400		/* CPU didn't die: tell everyone.  Can't complain. */
401		smpboot_unpark_threads(cpu);
402		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
403		goto out_release;
404	}
405	BUG_ON(cpu_online(cpu));
406
407	/*
408	 * The migration_call() CPU_DYING callback will have removed all
409	 * runnable tasks from the cpu, there's only the idle task left now
410	 * that the migration thread is done doing the stop_machine thing.
411	 *
412	 * Wait for the stop thread to go away.
413	 */
414	while (!per_cpu(cpu_dead_idle, cpu))
415		cpu_relax();
416	smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
417	per_cpu(cpu_dead_idle, cpu) = false;
418
419	hotplug_cpu__broadcast_tick_pull(cpu);
420	/* This actually kills the CPU. */
421	__cpu_die(cpu);
422
423	/* CPU is completely dead: tell everyone.  Too late to complain. */
424	tick_cleanup_dead_cpu(cpu);
425	cpu_notify_nofail(CPU_DEAD | mod, hcpu);
426
427	check_for_tasks(cpu);
428
429out_release:
430	cpu_hotplug_done();
431	if (!err)
432		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
433	return err;
434}
435
436int __ref cpu_down(unsigned int cpu)
437{
438	int err;
439
440	cpu_maps_update_begin();
441
442	if (cpu_hotplug_disabled) {
443		err = -EBUSY;
444		goto out;
445	}
446
447	err = _cpu_down(cpu, 0);
448
449out:
450	cpu_maps_update_done();
451	return err;
452}
453EXPORT_SYMBOL(cpu_down);
454#endif /*CONFIG_HOTPLUG_CPU*/
455
456/*
457 * Unpark per-CPU smpboot kthreads at CPU-online time.
458 */
459static int smpboot_thread_call(struct notifier_block *nfb,
460			       unsigned long action, void *hcpu)
461{
462	int cpu = (long)hcpu;
463
464	switch (action & ~CPU_TASKS_FROZEN) {
465
466	case CPU_ONLINE:
467		smpboot_unpark_threads(cpu);
468		break;
469
470	default:
471		break;
472	}
473
474	return NOTIFY_OK;
475}
476
477static struct notifier_block smpboot_thread_notifier = {
478	.notifier_call = smpboot_thread_call,
479	.priority = CPU_PRI_SMPBOOT,
480};
481
482void __cpuinit smpboot_thread_init(void)
483{
484	register_cpu_notifier(&smpboot_thread_notifier);
485}
486
487/* Requires cpu_add_remove_lock to be held */
488static int _cpu_up(unsigned int cpu, int tasks_frozen)
489{
490	int ret, nr_calls = 0;
491	void *hcpu = (void *)(long)cpu;
492	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
493	struct task_struct *idle;
494
495	cpu_hotplug_begin();
496
497	if (cpu_online(cpu) || !cpu_present(cpu)) {
498		ret = -EINVAL;
499		goto out;
500	}
501
502	idle = idle_thread_get(cpu);
503	if (IS_ERR(idle)) {
504		ret = PTR_ERR(idle);
505		goto out;
506	}
507
508	ret = smpboot_create_threads(cpu);
509	if (ret)
510		goto out;
511
512	ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
513	if (ret) {
514		nr_calls--;
515		pr_warn("%s: attempt to bring up CPU %u failed\n",
516			__func__, cpu);
517		goto out_notify;
518	}
519
520	/* Arch-specific enabling code. */
521	ret = __cpu_up(cpu, idle);
522	if (ret != 0)
523		goto out_notify;
524	BUG_ON(!cpu_online(cpu));
525
526	/* Now call notifier in preparation. */
527	cpu_notify(CPU_ONLINE | mod, hcpu);
528
529out_notify:
530	if (ret != 0)
531		__cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
532out:
533	cpu_hotplug_done();
534
535	return ret;
536}
537
538int cpu_up(unsigned int cpu)
539{
540	int err = 0;
541
542	if (!cpu_possible(cpu)) {
543		pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
544		       cpu);
545#if defined(CONFIG_IA64)
546		pr_err("please check additional_cpus= boot parameter\n");
547#endif
548		return -EINVAL;
549	}
550
551	err = try_online_node(cpu_to_node(cpu));
552	if (err)
553		return err;
554
555	cpu_maps_update_begin();
556
557	if (cpu_hotplug_disabled) {
558		err = -EBUSY;
559		goto out;
560	}
561
562	err = _cpu_up(cpu, 0);
563
564out:
565	cpu_maps_update_done();
566	return err;
567}
568EXPORT_SYMBOL_GPL(cpu_up);
569
570#ifdef CONFIG_PM_SLEEP_SMP
571static cpumask_var_t frozen_cpus;
572
573int disable_nonboot_cpus(void)
574{
575	int cpu, first_cpu, error = 0;
576
577	cpu_maps_update_begin();
578	first_cpu = cpumask_first(cpu_online_mask);
579	/*
580	 * We take down all of the non-boot CPUs in one shot to avoid races
581	 * with the userspace trying to use the CPU hotplug at the same time
582	 */
583	cpumask_clear(frozen_cpus);
584
585	pr_info("Disabling non-boot CPUs ...\n");
586	for_each_online_cpu(cpu) {
587		if (cpu == first_cpu)
588			continue;
589		trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
590		error = _cpu_down(cpu, 1);
591		trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
592		if (!error)
593			cpumask_set_cpu(cpu, frozen_cpus);
594		else {
595			pr_err("Error taking CPU%d down: %d\n", cpu, error);
596			break;
597		}
598	}
599
600	if (!error) {
601		BUG_ON(num_online_cpus() > 1);
602		/* Make sure the CPUs won't be enabled by someone else */
603		cpu_hotplug_disabled = 1;
604	} else {
605		pr_err("Non-boot CPUs are not disabled\n");
606	}
607	cpu_maps_update_done();
608	return error;
609}
610
611void __weak arch_enable_nonboot_cpus_begin(void)
612{
613}
614
615void __weak arch_enable_nonboot_cpus_end(void)
616{
617}
618
619void __ref enable_nonboot_cpus(void)
620{
621	int cpu, error;
622
623	/* Allow everyone to use the CPU hotplug again */
624	cpu_maps_update_begin();
625	cpu_hotplug_disabled = 0;
626	if (cpumask_empty(frozen_cpus))
627		goto out;
628
629	pr_info("Enabling non-boot CPUs ...\n");
630
631	arch_enable_nonboot_cpus_begin();
632
633	for_each_cpu(cpu, frozen_cpus) {
634		trace_suspend_resume(TPS("CPU_ON"), cpu, true);
635		error = _cpu_up(cpu, 1);
636		trace_suspend_resume(TPS("CPU_ON"), cpu, false);
637		if (!error) {
638			pr_info("CPU%d is up\n", cpu);
639			continue;
640		}
641		pr_warn("Error taking CPU%d up: %d\n", cpu, error);
642	}
643
644	arch_enable_nonboot_cpus_end();
645
646	cpumask_clear(frozen_cpus);
647out:
648	cpu_maps_update_done();
649}
650
651static int __init alloc_frozen_cpus(void)
652{
653	if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
654		return -ENOMEM;
655	return 0;
656}
657core_initcall(alloc_frozen_cpus);
658
659/*
660 * When callbacks for CPU hotplug notifications are being executed, we must
661 * ensure that the state of the system with respect to the tasks being frozen
662 * or not, as reported by the notification, remains unchanged *throughout the
663 * duration* of the execution of the callbacks.
664 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
665 *
666 * This synchronization is implemented by mutually excluding regular CPU
667 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
668 * Hibernate notifications.
669 */
670static int
671cpu_hotplug_pm_callback(struct notifier_block *nb,
672			unsigned long action, void *ptr)
673{
674	switch (action) {
675
676	case PM_SUSPEND_PREPARE:
677	case PM_HIBERNATION_PREPARE:
678		cpu_hotplug_disable();
679		break;
680
681	case PM_POST_SUSPEND:
682	case PM_POST_HIBERNATION:
683		cpu_hotplug_enable();
684		break;
685
686	default:
687		return NOTIFY_DONE;
688	}
689
690	return NOTIFY_OK;
691}
692
693
694static int __init cpu_hotplug_pm_sync_init(void)
695{
696	/*
697	 * cpu_hotplug_pm_callback has higher priority than x86
698	 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
699	 * to disable cpu hotplug to avoid cpu hotplug race.
700	 */
701	pm_notifier(cpu_hotplug_pm_callback, 0);
702	return 0;
703}
704core_initcall(cpu_hotplug_pm_sync_init);
705
706#endif /* CONFIG_PM_SLEEP_SMP */
707
708/**
709 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
710 * @cpu: cpu that just started
711 *
712 * This function calls the cpu_chain notifiers with CPU_STARTING.
713 * It must be called by the arch code on the new cpu, before the new cpu
714 * enables interrupts and before the "boot" cpu returns from __cpu_up().
715 */
716void notify_cpu_starting(unsigned int cpu)
717{
718	unsigned long val = CPU_STARTING;
719
720#ifdef CONFIG_PM_SLEEP_SMP
721	if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
722		val = CPU_STARTING_FROZEN;
723#endif /* CONFIG_PM_SLEEP_SMP */
724	cpu_notify(val, (void *)(long)cpu);
725}
726
727#endif /* CONFIG_SMP */
728
729/*
730 * cpu_bit_bitmap[] is a special, "compressed" data structure that
731 * represents all NR_CPUS bits binary values of 1<<nr.
732 *
733 * It is used by cpumask_of() to get a constant address to a CPU
734 * mask value that has a single bit set only.
735 */
736
737/* cpu_bit_bitmap[0] is empty - so we can back into it */
738#define MASK_DECLARE_1(x)	[x+1][0] = (1UL << (x))
739#define MASK_DECLARE_2(x)	MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
740#define MASK_DECLARE_4(x)	MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
741#define MASK_DECLARE_8(x)	MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
742
743const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
744
745	MASK_DECLARE_8(0),	MASK_DECLARE_8(8),
746	MASK_DECLARE_8(16),	MASK_DECLARE_8(24),
747#if BITS_PER_LONG > 32
748	MASK_DECLARE_8(32),	MASK_DECLARE_8(40),
749	MASK_DECLARE_8(48),	MASK_DECLARE_8(56),
750#endif
751};
752EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
753
754const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
755EXPORT_SYMBOL(cpu_all_bits);
756
757#ifdef CONFIG_INIT_ALL_POSSIBLE
758static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
759	= CPU_BITS_ALL;
760#else
761static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
762#endif
763const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
764EXPORT_SYMBOL(cpu_possible_mask);
765
766static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
767const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
768EXPORT_SYMBOL(cpu_online_mask);
769
770static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
771const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
772EXPORT_SYMBOL(cpu_present_mask);
773
774static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
775const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
776EXPORT_SYMBOL(cpu_active_mask);
777
778void set_cpu_possible(unsigned int cpu, bool possible)
779{
780	if (possible)
781		cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
782	else
783		cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
784}
785
786void set_cpu_present(unsigned int cpu, bool present)
787{
788	if (present)
789		cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
790	else
791		cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
792}
793
794void set_cpu_online(unsigned int cpu, bool online)
795{
796	if (online) {
797		cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
798		cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
799	} else {
800		cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
801	}
802}
803
804void set_cpu_active(unsigned int cpu, bool active)
805{
806	if (active)
807		cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
808	else
809		cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
810}
811
812void init_cpu_present(const struct cpumask *src)
813{
814	cpumask_copy(to_cpumask(cpu_present_bits), src);
815}
816
817void init_cpu_possible(const struct cpumask *src)
818{
819	cpumask_copy(to_cpumask(cpu_possible_bits), src);
820}
821
822void init_cpu_online(const struct cpumask *src)
823{
824	cpumask_copy(to_cpumask(cpu_online_bits), src);
825}
826