1#ifndef _LINUX_KERNEL_H
2#define _LINUX_KERNEL_H
3
4
5#include <stdarg.h>
6#include <linux/linkage.h>
7#include <linux/stddef.h>
8#include <linux/types.h>
9#include <linux/compiler.h>
10#include <linux/bitops.h>
11#include <linux/log2.h>
12#include <linux/typecheck.h>
13#include <linux/printk.h>
14#include <linux/dynamic_debug.h>
15#include <asm/byteorder.h>
16#include <uapi/linux/kernel.h>
17
18#define USHRT_MAX	((u16)(~0U))
19#define SHRT_MAX	((s16)(USHRT_MAX>>1))
20#define SHRT_MIN	((s16)(-SHRT_MAX - 1))
21#define INT_MAX		((int)(~0U>>1))
22#define INT_MIN		(-INT_MAX - 1)
23#define UINT_MAX	(~0U)
24#define LONG_MAX	((long)(~0UL>>1))
25#define LONG_MIN	(-LONG_MAX - 1)
26#define ULONG_MAX	(~0UL)
27#define LLONG_MAX	((long long)(~0ULL>>1))
28#define LLONG_MIN	(-LLONG_MAX - 1)
29#define ULLONG_MAX	(~0ULL)
30#define SIZE_MAX	(~(size_t)0)
31
32#define U8_MAX		((u8)~0U)
33#define S8_MAX		((s8)(U8_MAX>>1))
34#define S8_MIN		((s8)(-S8_MAX - 1))
35#define U16_MAX		((u16)~0U)
36#define S16_MAX		((s16)(U16_MAX>>1))
37#define S16_MIN		((s16)(-S16_MAX - 1))
38#define U32_MAX		((u32)~0U)
39#define S32_MAX		((s32)(U32_MAX>>1))
40#define S32_MIN		((s32)(-S32_MAX - 1))
41#define U64_MAX		((u64)~0ULL)
42#define S64_MAX		((s64)(U64_MAX>>1))
43#define S64_MIN		((s64)(-S64_MAX - 1))
44
45#define STACK_MAGIC	0xdeadbeef
46
47#define REPEAT_BYTE(x)	((~0ul / 0xff) * (x))
48
49#define ALIGN(x, a)		__ALIGN_KERNEL((x), (a))
50#define __ALIGN_MASK(x, mask)	__ALIGN_KERNEL_MASK((x), (mask))
51#define PTR_ALIGN(p, a)		((typeof(p))ALIGN((unsigned long)(p), (a)))
52#define IS_ALIGNED(x, a)		(((x) & ((typeof(x))(a) - 1)) == 0)
53
54#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
55
56/*
57 * This looks more complex than it should be. But we need to
58 * get the type for the ~ right in round_down (it needs to be
59 * as wide as the result!), and we want to evaluate the macro
60 * arguments just once each.
61 */
62#define __round_mask(x, y) ((__typeof__(x))((y)-1))
63#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
64#define round_down(x, y) ((x) & ~__round_mask(x, y))
65
66#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
67#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
68#define DIV_ROUND_UP_ULL(ll,d) \
69	({ unsigned long long _tmp = (ll)+(d)-1; do_div(_tmp, d); _tmp; })
70
71#if BITS_PER_LONG == 32
72# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d)
73#else
74# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d)
75#endif
76
77/* The `const' in roundup() prevents gcc-3.3 from calling __divdi3 */
78#define roundup(x, y) (					\
79{							\
80	const typeof(y) __y = y;			\
81	(((x) + (__y - 1)) / __y) * __y;		\
82}							\
83)
84#define rounddown(x, y) (				\
85{							\
86	typeof(x) __x = (x);				\
87	__x - (__x % (y));				\
88}							\
89)
90
91/*
92 * Divide positive or negative dividend by positive divisor and round
93 * to closest integer. Result is undefined for negative divisors and
94 * for negative dividends if the divisor variable type is unsigned.
95 */
96#define DIV_ROUND_CLOSEST(x, divisor)(			\
97{							\
98	typeof(x) __x = x;				\
99	typeof(divisor) __d = divisor;			\
100	(((typeof(x))-1) > 0 ||				\
101	 ((typeof(divisor))-1) > 0 || (__x) > 0) ?	\
102		(((__x) + ((__d) / 2)) / (__d)) :	\
103		(((__x) - ((__d) / 2)) / (__d));	\
104}							\
105)
106/*
107 * Same as above but for u64 dividends. divisor must be a 32-bit
108 * number.
109 */
110#define DIV_ROUND_CLOSEST_ULL(x, divisor)(		\
111{							\
112	typeof(divisor) __d = divisor;			\
113	unsigned long long _tmp = (x) + (__d) / 2;	\
114	do_div(_tmp, __d);				\
115	_tmp;						\
116}							\
117)
118
119/*
120 * Multiplies an integer by a fraction, while avoiding unnecessary
121 * overflow or loss of precision.
122 */
123#define mult_frac(x, numer, denom)(			\
124{							\
125	typeof(x) quot = (x) / (denom);			\
126	typeof(x) rem  = (x) % (denom);			\
127	(quot * (numer)) + ((rem * (numer)) / (denom));	\
128}							\
129)
130
131
132#define _RET_IP_		(unsigned long)__builtin_return_address(0)
133#define _THIS_IP_  ({ __label__ __here; __here: (unsigned long)&&__here; })
134
135#ifdef CONFIG_LBDAF
136# include <asm/div64.h>
137# define sector_div(a, b) do_div(a, b)
138#else
139# define sector_div(n, b)( \
140{ \
141	int _res; \
142	_res = (n) % (b); \
143	(n) /= (b); \
144	_res; \
145} \
146)
147#endif
148
149/**
150 * upper_32_bits - return bits 32-63 of a number
151 * @n: the number we're accessing
152 *
153 * A basic shift-right of a 64- or 32-bit quantity.  Use this to suppress
154 * the "right shift count >= width of type" warning when that quantity is
155 * 32-bits.
156 */
157#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
158
159/**
160 * lower_32_bits - return bits 0-31 of a number
161 * @n: the number we're accessing
162 */
163#define lower_32_bits(n) ((u32)(n))
164
165struct completion;
166struct pt_regs;
167struct user;
168
169#ifdef CONFIG_PREEMPT_VOLUNTARY
170extern int _cond_resched(void);
171# define might_resched() _cond_resched()
172#else
173# define might_resched() do { } while (0)
174#endif
175
176#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
177  void ___might_sleep(const char *file, int line, int preempt_offset);
178  void __might_sleep(const char *file, int line, int preempt_offset);
179/**
180 * might_sleep - annotation for functions that can sleep
181 *
182 * this macro will print a stack trace if it is executed in an atomic
183 * context (spinlock, irq-handler, ...).
184 *
185 * This is a useful debugging help to be able to catch problems early and not
186 * be bitten later when the calling function happens to sleep when it is not
187 * supposed to.
188 */
189# define might_sleep() \
190	do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
191# define sched_annotate_sleep()	(current->task_state_change = 0)
192#else
193  static inline void ___might_sleep(const char *file, int line,
194				   int preempt_offset) { }
195  static inline void __might_sleep(const char *file, int line,
196				   int preempt_offset) { }
197# define might_sleep() do { might_resched(); } while (0)
198# define sched_annotate_sleep() do { } while (0)
199#endif
200
201#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
202
203/*
204 * abs() handles unsigned and signed longs, ints, shorts and chars.  For all
205 * input types abs() returns a signed long.
206 * abs() should not be used for 64-bit types (s64, u64, long long) - use abs64()
207 * for those.
208 */
209#define abs(x) ({						\
210		long ret;					\
211		if (sizeof(x) == sizeof(long)) {		\
212			long __x = (x);				\
213			ret = (__x < 0) ? -__x : __x;		\
214		} else {					\
215			int __x = (x);				\
216			ret = (__x < 0) ? -__x : __x;		\
217		}						\
218		ret;						\
219	})
220
221#define abs64(x) ({				\
222		s64 __x = (x);			\
223		(__x < 0) ? -__x : __x;		\
224	})
225
226/**
227 * reciprocal_scale - "scale" a value into range [0, ep_ro)
228 * @val: value
229 * @ep_ro: right open interval endpoint
230 *
231 * Perform a "reciprocal multiplication" in order to "scale" a value into
232 * range [0, ep_ro), where the upper interval endpoint is right-open.
233 * This is useful, e.g. for accessing a index of an array containing
234 * ep_ro elements, for example. Think of it as sort of modulus, only that
235 * the result isn't that of modulo. ;) Note that if initial input is a
236 * small value, then result will return 0.
237 *
238 * Return: a result based on val in interval [0, ep_ro).
239 */
240static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
241{
242	return (u32)(((u64) val * ep_ro) >> 32);
243}
244
245#if defined(CONFIG_MMU) && \
246	(defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
247void might_fault(void);
248#else
249static inline void might_fault(void) { }
250#endif
251
252extern struct atomic_notifier_head panic_notifier_list;
253extern long (*panic_blink)(int state);
254__printf(1, 2)
255void panic(const char *fmt, ...)
256	__noreturn __cold;
257extern void oops_enter(void);
258extern void oops_exit(void);
259void print_oops_end_marker(void);
260extern int oops_may_print(void);
261void do_exit(long error_code)
262	__noreturn;
263void complete_and_exit(struct completion *, long)
264	__noreturn;
265
266/* Internal, do not use. */
267int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
268int __must_check _kstrtol(const char *s, unsigned int base, long *res);
269
270int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res);
271int __must_check kstrtoll(const char *s, unsigned int base, long long *res);
272
273/**
274 * kstrtoul - convert a string to an unsigned long
275 * @s: The start of the string. The string must be null-terminated, and may also
276 *  include a single newline before its terminating null. The first character
277 *  may also be a plus sign, but not a minus sign.
278 * @base: The number base to use. The maximum supported base is 16. If base is
279 *  given as 0, then the base of the string is automatically detected with the
280 *  conventional semantics - If it begins with 0x the number will be parsed as a
281 *  hexadecimal (case insensitive), if it otherwise begins with 0, it will be
282 *  parsed as an octal number. Otherwise it will be parsed as a decimal.
283 * @res: Where to write the result of the conversion on success.
284 *
285 * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
286 * Used as a replacement for the obsolete simple_strtoull. Return code must
287 * be checked.
288*/
289static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res)
290{
291	/*
292	 * We want to shortcut function call, but
293	 * __builtin_types_compatible_p(unsigned long, unsigned long long) = 0.
294	 */
295	if (sizeof(unsigned long) == sizeof(unsigned long long) &&
296	    __alignof__(unsigned long) == __alignof__(unsigned long long))
297		return kstrtoull(s, base, (unsigned long long *)res);
298	else
299		return _kstrtoul(s, base, res);
300}
301
302/**
303 * kstrtol - convert a string to a long
304 * @s: The start of the string. The string must be null-terminated, and may also
305 *  include a single newline before its terminating null. The first character
306 *  may also be a plus sign or a minus sign.
307 * @base: The number base to use. The maximum supported base is 16. If base is
308 *  given as 0, then the base of the string is automatically detected with the
309 *  conventional semantics - If it begins with 0x the number will be parsed as a
310 *  hexadecimal (case insensitive), if it otherwise begins with 0, it will be
311 *  parsed as an octal number. Otherwise it will be parsed as a decimal.
312 * @res: Where to write the result of the conversion on success.
313 *
314 * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
315 * Used as a replacement for the obsolete simple_strtoull. Return code must
316 * be checked.
317 */
318static inline int __must_check kstrtol(const char *s, unsigned int base, long *res)
319{
320	/*
321	 * We want to shortcut function call, but
322	 * __builtin_types_compatible_p(long, long long) = 0.
323	 */
324	if (sizeof(long) == sizeof(long long) &&
325	    __alignof__(long) == __alignof__(long long))
326		return kstrtoll(s, base, (long long *)res);
327	else
328		return _kstrtol(s, base, res);
329}
330
331int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res);
332int __must_check kstrtoint(const char *s, unsigned int base, int *res);
333
334static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res)
335{
336	return kstrtoull(s, base, res);
337}
338
339static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res)
340{
341	return kstrtoll(s, base, res);
342}
343
344static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res)
345{
346	return kstrtouint(s, base, res);
347}
348
349static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res)
350{
351	return kstrtoint(s, base, res);
352}
353
354int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
355int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
356int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
357int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
358
359int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
360int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
361int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res);
362int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res);
363int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res);
364int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res);
365int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res);
366int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
367int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
368int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
369
370static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
371{
372	return kstrtoull_from_user(s, count, base, res);
373}
374
375static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res)
376{
377	return kstrtoll_from_user(s, count, base, res);
378}
379
380static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res)
381{
382	return kstrtouint_from_user(s, count, base, res);
383}
384
385static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res)
386{
387	return kstrtoint_from_user(s, count, base, res);
388}
389
390/* Obsolete, do not use.  Use kstrto<foo> instead */
391
392extern unsigned long simple_strtoul(const char *,char **,unsigned int);
393extern long simple_strtol(const char *,char **,unsigned int);
394extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
395extern long long simple_strtoll(const char *,char **,unsigned int);
396
397extern int num_to_str(char *buf, int size, unsigned long long num);
398
399/* lib/printf utilities */
400
401extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...);
402extern __printf(2, 0) int vsprintf(char *buf, const char *, va_list);
403extern __printf(3, 4)
404int snprintf(char *buf, size_t size, const char *fmt, ...);
405extern __printf(3, 0)
406int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
407extern __printf(3, 4)
408int scnprintf(char *buf, size_t size, const char *fmt, ...);
409extern __printf(3, 0)
410int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
411extern __printf(2, 3)
412char *kasprintf(gfp_t gfp, const char *fmt, ...);
413extern char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
414
415extern __scanf(2, 3)
416int sscanf(const char *, const char *, ...);
417extern __scanf(2, 0)
418int vsscanf(const char *, const char *, va_list);
419
420extern int get_option(char **str, int *pint);
421extern char *get_options(const char *str, int nints, int *ints);
422extern unsigned long long memparse(const char *ptr, char **retptr);
423extern bool parse_option_str(const char *str, const char *option);
424
425extern int core_kernel_text(unsigned long addr);
426extern int core_kernel_data(unsigned long addr);
427extern int __kernel_text_address(unsigned long addr);
428extern int kernel_text_address(unsigned long addr);
429extern int func_ptr_is_kernel_text(void *ptr);
430
431unsigned long int_sqrt(unsigned long);
432
433extern void bust_spinlocks(int yes);
434extern int oops_in_progress;		/* If set, an oops, panic(), BUG() or die() is in progress */
435extern int panic_timeout;
436extern int panic_on_oops;
437extern int panic_on_unrecovered_nmi;
438extern int panic_on_io_nmi;
439extern int panic_on_warn;
440extern int sysctl_panic_on_stackoverflow;
441/*
442 * Only to be used by arch init code. If the user over-wrote the default
443 * CONFIG_PANIC_TIMEOUT, honor it.
444 */
445static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout)
446{
447	if (panic_timeout == arch_default_timeout)
448		panic_timeout = timeout;
449}
450extern const char *print_tainted(void);
451enum lockdep_ok {
452	LOCKDEP_STILL_OK,
453	LOCKDEP_NOW_UNRELIABLE
454};
455extern void add_taint(unsigned flag, enum lockdep_ok);
456extern int test_taint(unsigned flag);
457extern unsigned long get_taint(void);
458extern int root_mountflags;
459
460extern bool early_boot_irqs_disabled;
461
462/* Values used for system_state */
463extern enum system_states {
464	SYSTEM_BOOTING,
465	SYSTEM_RUNNING,
466	SYSTEM_HALT,
467	SYSTEM_POWER_OFF,
468	SYSTEM_RESTART,
469} system_state;
470
471#define TAINT_PROPRIETARY_MODULE	0
472#define TAINT_FORCED_MODULE		1
473#define TAINT_CPU_OUT_OF_SPEC		2
474#define TAINT_FORCED_RMMOD		3
475#define TAINT_MACHINE_CHECK		4
476#define TAINT_BAD_PAGE			5
477#define TAINT_USER			6
478#define TAINT_DIE			7
479#define TAINT_OVERRIDDEN_ACPI_TABLE	8
480#define TAINT_WARN			9
481#define TAINT_CRAP			10
482#define TAINT_FIRMWARE_WORKAROUND	11
483#define TAINT_OOT_MODULE		12
484#define TAINT_UNSIGNED_MODULE		13
485#define TAINT_SOFTLOCKUP		14
486#define TAINT_LIVEPATCH			15
487
488extern const char hex_asc[];
489#define hex_asc_lo(x)	hex_asc[((x) & 0x0f)]
490#define hex_asc_hi(x)	hex_asc[((x) & 0xf0) >> 4]
491
492static inline char *hex_byte_pack(char *buf, u8 byte)
493{
494	*buf++ = hex_asc_hi(byte);
495	*buf++ = hex_asc_lo(byte);
496	return buf;
497}
498
499extern const char hex_asc_upper[];
500#define hex_asc_upper_lo(x)	hex_asc_upper[((x) & 0x0f)]
501#define hex_asc_upper_hi(x)	hex_asc_upper[((x) & 0xf0) >> 4]
502
503static inline char *hex_byte_pack_upper(char *buf, u8 byte)
504{
505	*buf++ = hex_asc_upper_hi(byte);
506	*buf++ = hex_asc_upper_lo(byte);
507	return buf;
508}
509
510extern int hex_to_bin(char ch);
511extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
512extern char *bin2hex(char *dst, const void *src, size_t count);
513
514bool mac_pton(const char *s, u8 *mac);
515
516/*
517 * General tracing related utility functions - trace_printk(),
518 * tracing_on/tracing_off and tracing_start()/tracing_stop
519 *
520 * Use tracing_on/tracing_off when you want to quickly turn on or off
521 * tracing. It simply enables or disables the recording of the trace events.
522 * This also corresponds to the user space /sys/kernel/debug/tracing/tracing_on
523 * file, which gives a means for the kernel and userspace to interact.
524 * Place a tracing_off() in the kernel where you want tracing to end.
525 * From user space, examine the trace, and then echo 1 > tracing_on
526 * to continue tracing.
527 *
528 * tracing_stop/tracing_start has slightly more overhead. It is used
529 * by things like suspend to ram where disabling the recording of the
530 * trace is not enough, but tracing must actually stop because things
531 * like calling smp_processor_id() may crash the system.
532 *
533 * Most likely, you want to use tracing_on/tracing_off.
534 */
535#ifdef CONFIG_RING_BUFFER
536/* trace_off_permanent stops recording with no way to bring it back */
537void tracing_off_permanent(void);
538#else
539static inline void tracing_off_permanent(void) { }
540#endif
541
542enum ftrace_dump_mode {
543	DUMP_NONE,
544	DUMP_ALL,
545	DUMP_ORIG,
546};
547
548#ifdef CONFIG_TRACING
549void tracing_on(void);
550void tracing_off(void);
551int tracing_is_on(void);
552void tracing_snapshot(void);
553void tracing_snapshot_alloc(void);
554
555extern void tracing_start(void);
556extern void tracing_stop(void);
557
558static inline __printf(1, 2)
559void ____trace_printk_check_format(const char *fmt, ...)
560{
561}
562#define __trace_printk_check_format(fmt, args...)			\
563do {									\
564	if (0)								\
565		____trace_printk_check_format(fmt, ##args);		\
566} while (0)
567
568/**
569 * trace_printk - printf formatting in the ftrace buffer
570 * @fmt: the printf format for printing
571 *
572 * Note: __trace_printk is an internal function for trace_printk and
573 *       the @ip is passed in via the trace_printk macro.
574 *
575 * This function allows a kernel developer to debug fast path sections
576 * that printk is not appropriate for. By scattering in various
577 * printk like tracing in the code, a developer can quickly see
578 * where problems are occurring.
579 *
580 * This is intended as a debugging tool for the developer only.
581 * Please refrain from leaving trace_printks scattered around in
582 * your code. (Extra memory is used for special buffers that are
583 * allocated when trace_printk() is used)
584 *
585 * A little optization trick is done here. If there's only one
586 * argument, there's no need to scan the string for printf formats.
587 * The trace_puts() will suffice. But how can we take advantage of
588 * using trace_puts() when trace_printk() has only one argument?
589 * By stringifying the args and checking the size we can tell
590 * whether or not there are args. __stringify((__VA_ARGS__)) will
591 * turn into "()\0" with a size of 3 when there are no args, anything
592 * else will be bigger. All we need to do is define a string to this,
593 * and then take its size and compare to 3. If it's bigger, use
594 * do_trace_printk() otherwise, optimize it to trace_puts(). Then just
595 * let gcc optimize the rest.
596 */
597
598#define trace_printk(fmt, ...)				\
599do {							\
600	char _______STR[] = __stringify((__VA_ARGS__));	\
601	if (sizeof(_______STR) > 3)			\
602		do_trace_printk(fmt, ##__VA_ARGS__);	\
603	else						\
604		trace_puts(fmt);			\
605} while (0)
606
607#define do_trace_printk(fmt, args...)					\
608do {									\
609	static const char *trace_printk_fmt __used			\
610		__attribute__((section("__trace_printk_fmt"))) =	\
611		__builtin_constant_p(fmt) ? fmt : NULL;			\
612									\
613	__trace_printk_check_format(fmt, ##args);			\
614									\
615	if (__builtin_constant_p(fmt))					\
616		__trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args);	\
617	else								\
618		__trace_printk(_THIS_IP_, fmt, ##args);			\
619} while (0)
620
621extern __printf(2, 3)
622int __trace_bprintk(unsigned long ip, const char *fmt, ...);
623
624extern __printf(2, 3)
625int __trace_printk(unsigned long ip, const char *fmt, ...);
626
627/**
628 * trace_puts - write a string into the ftrace buffer
629 * @str: the string to record
630 *
631 * Note: __trace_bputs is an internal function for trace_puts and
632 *       the @ip is passed in via the trace_puts macro.
633 *
634 * This is similar to trace_printk() but is made for those really fast
635 * paths that a developer wants the least amount of "Heisenbug" affects,
636 * where the processing of the print format is still too much.
637 *
638 * This function allows a kernel developer to debug fast path sections
639 * that printk is not appropriate for. By scattering in various
640 * printk like tracing in the code, a developer can quickly see
641 * where problems are occurring.
642 *
643 * This is intended as a debugging tool for the developer only.
644 * Please refrain from leaving trace_puts scattered around in
645 * your code. (Extra memory is used for special buffers that are
646 * allocated when trace_puts() is used)
647 *
648 * Returns: 0 if nothing was written, positive # if string was.
649 *  (1 when __trace_bputs is used, strlen(str) when __trace_puts is used)
650 */
651
652#define trace_puts(str) ({						\
653	static const char *trace_printk_fmt __used			\
654		__attribute__((section("__trace_printk_fmt"))) =	\
655		__builtin_constant_p(str) ? str : NULL;			\
656									\
657	if (__builtin_constant_p(str))					\
658		__trace_bputs(_THIS_IP_, trace_printk_fmt);		\
659	else								\
660		__trace_puts(_THIS_IP_, str, strlen(str));		\
661})
662extern int __trace_bputs(unsigned long ip, const char *str);
663extern int __trace_puts(unsigned long ip, const char *str, int size);
664
665extern void trace_dump_stack(int skip);
666
667/*
668 * The double __builtin_constant_p is because gcc will give us an error
669 * if we try to allocate the static variable to fmt if it is not a
670 * constant. Even with the outer if statement.
671 */
672#define ftrace_vprintk(fmt, vargs)					\
673do {									\
674	if (__builtin_constant_p(fmt)) {				\
675		static const char *trace_printk_fmt __used		\
676		  __attribute__((section("__trace_printk_fmt"))) =	\
677			__builtin_constant_p(fmt) ? fmt : NULL;		\
678									\
679		__ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs);	\
680	} else								\
681		__ftrace_vprintk(_THIS_IP_, fmt, vargs);		\
682} while (0)
683
684extern int
685__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
686
687extern int
688__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
689
690extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
691#else
692static inline void tracing_start(void) { }
693static inline void tracing_stop(void) { }
694static inline void trace_dump_stack(int skip) { }
695
696static inline void tracing_on(void) { }
697static inline void tracing_off(void) { }
698static inline int tracing_is_on(void) { return 0; }
699static inline void tracing_snapshot(void) { }
700static inline void tracing_snapshot_alloc(void) { }
701
702static inline __printf(1, 2)
703int trace_printk(const char *fmt, ...)
704{
705	return 0;
706}
707static inline int
708ftrace_vprintk(const char *fmt, va_list ap)
709{
710	return 0;
711}
712static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
713#endif /* CONFIG_TRACING */
714
715/*
716 * min()/max()/clamp() macros that also do
717 * strict type-checking.. See the
718 * "unnecessary" pointer comparison.
719 */
720#define min(x, y) ({				\
721	typeof(x) _min1 = (x);			\
722	typeof(y) _min2 = (y);			\
723	(void) (&_min1 == &_min2);		\
724	_min1 < _min2 ? _min1 : _min2; })
725
726#define max(x, y) ({				\
727	typeof(x) _max1 = (x);			\
728	typeof(y) _max2 = (y);			\
729	(void) (&_max1 == &_max2);		\
730	_max1 > _max2 ? _max1 : _max2; })
731
732#define min3(x, y, z) min((typeof(x))min(x, y), z)
733#define max3(x, y, z) max((typeof(x))max(x, y), z)
734
735/**
736 * min_not_zero - return the minimum that is _not_ zero, unless both are zero
737 * @x: value1
738 * @y: value2
739 */
740#define min_not_zero(x, y) ({			\
741	typeof(x) __x = (x);			\
742	typeof(y) __y = (y);			\
743	__x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
744
745/**
746 * clamp - return a value clamped to a given range with strict typechecking
747 * @val: current value
748 * @lo: lowest allowable value
749 * @hi: highest allowable value
750 *
751 * This macro does strict typechecking of lo/hi to make sure they are of the
752 * same type as val.  See the unnecessary pointer comparisons.
753 */
754#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
755
756/*
757 * ..and if you can't take the strict
758 * types, you can specify one yourself.
759 *
760 * Or not use min/max/clamp at all, of course.
761 */
762#define min_t(type, x, y) ({			\
763	type __min1 = (x);			\
764	type __min2 = (y);			\
765	__min1 < __min2 ? __min1: __min2; })
766
767#define max_t(type, x, y) ({			\
768	type __max1 = (x);			\
769	type __max2 = (y);			\
770	__max1 > __max2 ? __max1: __max2; })
771
772/**
773 * clamp_t - return a value clamped to a given range using a given type
774 * @type: the type of variable to use
775 * @val: current value
776 * @lo: minimum allowable value
777 * @hi: maximum allowable value
778 *
779 * This macro does no typechecking and uses temporary variables of type
780 * 'type' to make all the comparisons.
781 */
782#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
783
784/**
785 * clamp_val - return a value clamped to a given range using val's type
786 * @val: current value
787 * @lo: minimum allowable value
788 * @hi: maximum allowable value
789 *
790 * This macro does no typechecking and uses temporary variables of whatever
791 * type the input argument 'val' is.  This is useful when val is an unsigned
792 * type and min and max are literals that will otherwise be assigned a signed
793 * integer type.
794 */
795#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
796
797
798/*
799 * swap - swap value of @a and @b
800 */
801#define swap(a, b) \
802	do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
803
804/**
805 * container_of - cast a member of a structure out to the containing structure
806 * @ptr:	the pointer to the member.
807 * @type:	the type of the container struct this is embedded in.
808 * @member:	the name of the member within the struct.
809 *
810 */
811#define container_of(ptr, type, member) ({			\
812	const typeof( ((type *)0)->member ) *__mptr = (ptr);	\
813	(type *)( (char *)__mptr - offsetof(type,member) );})
814
815/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
816#ifdef CONFIG_FTRACE_MCOUNT_RECORD
817# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
818#endif
819
820/* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */
821#define VERIFY_OCTAL_PERMISSIONS(perms)					\
822	(BUILD_BUG_ON_ZERO((perms) < 0) +				\
823	 BUILD_BUG_ON_ZERO((perms) > 0777) +				\
824	 /* User perms >= group perms >= other perms */			\
825	 BUILD_BUG_ON_ZERO(((perms) >> 6) < (((perms) >> 3) & 7)) +	\
826	 BUILD_BUG_ON_ZERO((((perms) >> 3) & 7) < ((perms) & 7)) +	\
827	 /* Other writable?  Generally considered a bad idea. */	\
828	 BUILD_BUG_ON_ZERO((perms) & 2) +				\
829	 (perms))
830#endif
831