1/*
2 *  arch/arm/include/asm/thread_info.h
3 *
4 *  Copyright (C) 2002 Russell King.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef __ASM_ARM_THREAD_INFO_H
11#define __ASM_ARM_THREAD_INFO_H
12
13#ifdef __KERNEL__
14
15#include <linux/compiler.h>
16#include <asm/fpstate.h>
17#include <asm/page.h>
18
19#define THREAD_SIZE_ORDER	1
20#define THREAD_SIZE		(PAGE_SIZE << THREAD_SIZE_ORDER)
21#define THREAD_START_SP		(THREAD_SIZE - 8)
22
23#ifndef __ASSEMBLY__
24
25struct task_struct;
26
27#include <asm/types.h>
28#include <asm/domain.h>
29
30typedef unsigned long mm_segment_t;
31
32struct cpu_context_save {
33	__u32	r4;
34	__u32	r5;
35	__u32	r6;
36	__u32	r7;
37	__u32	r8;
38	__u32	r9;
39	__u32	sl;
40	__u32	fp;
41	__u32	sp;
42	__u32	pc;
43	__u32	extra[2];		/* Xscale 'acc' register, etc */
44};
45
46/*
47 * low level task data that entry.S needs immediate access to.
48 * __switch_to() assumes cpu_context follows immediately after cpu_domain.
49 */
50struct thread_info {
51	unsigned long		flags;		/* low level flags */
52	int			preempt_count;	/* 0 => preemptable, <0 => bug */
53	mm_segment_t		addr_limit;	/* address limit */
54	struct task_struct	*task;		/* main task structure */
55	__u32			cpu;		/* cpu */
56	__u32			cpu_domain;	/* cpu domain */
57	struct cpu_context_save	cpu_context;	/* cpu context */
58	__u32			syscall;	/* syscall number */
59	__u8			used_cp[16];	/* thread used copro */
60	unsigned long		tp_value[2];	/* TLS registers */
61#ifdef CONFIG_CRUNCH
62	struct crunch_state	crunchstate;
63#endif
64	union fp_state		fpstate __attribute__((aligned(8)));
65	union vfp_state		vfpstate;
66#ifdef CONFIG_ARM_THUMBEE
67	unsigned long		thumbee_state;	/* ThumbEE Handler Base register */
68#endif
69};
70
71#define INIT_THREAD_INFO(tsk)						\
72{									\
73	.task		= &tsk,						\
74	.flags		= 0,						\
75	.preempt_count	= INIT_PREEMPT_COUNT,				\
76	.addr_limit	= KERNEL_DS,					\
77	.cpu_domain	= domain_val(DOMAIN_USER, DOMAIN_MANAGER) |	\
78			  domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) |	\
79			  domain_val(DOMAIN_IO, DOMAIN_CLIENT),		\
80}
81
82#define init_thread_info	(init_thread_union.thread_info)
83#define init_stack		(init_thread_union.stack)
84
85/*
86 * how to get the current stack pointer in C
87 */
88register unsigned long current_stack_pointer asm ("sp");
89
90/*
91 * how to get the thread information struct from C
92 */
93static inline struct thread_info *current_thread_info(void) __attribute_const__;
94
95static inline struct thread_info *current_thread_info(void)
96{
97	return (struct thread_info *)
98		(current_stack_pointer & ~(THREAD_SIZE - 1));
99}
100
101#define thread_saved_pc(tsk)	\
102	((unsigned long)(task_thread_info(tsk)->cpu_context.pc))
103#define thread_saved_sp(tsk)	\
104	((unsigned long)(task_thread_info(tsk)->cpu_context.sp))
105
106#ifndef CONFIG_THUMB2_KERNEL
107#define thread_saved_fp(tsk)	\
108	((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
109#else
110#define thread_saved_fp(tsk)	\
111	((unsigned long)(task_thread_info(tsk)->cpu_context.r7))
112#endif
113
114extern void crunch_task_disable(struct thread_info *);
115extern void crunch_task_copy(struct thread_info *, void *);
116extern void crunch_task_restore(struct thread_info *, void *);
117extern void crunch_task_release(struct thread_info *);
118
119extern void iwmmxt_task_disable(struct thread_info *);
120extern void iwmmxt_task_copy(struct thread_info *, void *);
121extern void iwmmxt_task_restore(struct thread_info *, void *);
122extern void iwmmxt_task_release(struct thread_info *);
123extern void iwmmxt_task_switch(struct thread_info *);
124
125extern void vfp_sync_hwstate(struct thread_info *);
126extern void vfp_flush_hwstate(struct thread_info *);
127
128struct user_vfp;
129struct user_vfp_exc;
130
131extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
132					   struct user_vfp_exc __user *);
133extern int vfp_restore_user_hwstate(struct user_vfp __user *,
134				    struct user_vfp_exc __user *);
135#endif
136
137/*
138 * thread information flags:
139 *  TIF_SYSCALL_TRACE	- syscall trace active
140 *  TIF_SYSCAL_AUDIT	- syscall auditing active
141 *  TIF_SIGPENDING	- signal pending
142 *  TIF_NEED_RESCHED	- rescheduling necessary
143 *  TIF_NOTIFY_RESUME	- callback before returning to user
144 *  TIF_USEDFPU		- FPU was used by this task this quantum (SMP)
145 *  TIF_POLLING_NRFLAG	- true if poll_idle() is polling TIF_NEED_RESCHED
146 */
147#define TIF_SIGPENDING		0
148#define TIF_NEED_RESCHED	1
149#define TIF_NOTIFY_RESUME	2	/* callback before returning to user */
150#define TIF_UPROBE		7
151#define TIF_SYSCALL_TRACE	8
152#define TIF_SYSCALL_AUDIT	9
153#define TIF_SYSCALL_TRACEPOINT	10
154#define TIF_SECCOMP		11	/* seccomp syscall filtering active */
155#define TIF_NOHZ		12	/* in adaptive nohz mode */
156#define TIF_USING_IWMMXT	17
157#define TIF_MEMDIE		18	/* is terminating due to OOM killer */
158#define TIF_RESTORE_SIGMASK	20
159
160#define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
161#define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
162#define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
163#define _TIF_UPROBE		(1 << TIF_UPROBE)
164#define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
165#define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
166#define _TIF_SYSCALL_TRACEPOINT	(1 << TIF_SYSCALL_TRACEPOINT)
167#define _TIF_SECCOMP		(1 << TIF_SECCOMP)
168#define _TIF_USING_IWMMXT	(1 << TIF_USING_IWMMXT)
169
170/* Checks for any syscall work in entry-common.S */
171#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
172			   _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
173
174/*
175 * Change these and you break ASM code in entry-common.S
176 */
177#define _TIF_WORK_MASK		(_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
178				 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
179
180#endif /* __KERNEL__ */
181#endif /* __ASM_ARM_THREAD_INFO_H */
182