Searched refs:thread (Results 1 - 200 of 1944) sorted by relevance

12345678910

/linux-4.1.27/arch/mips/include/asm/
H A Dasmmacro-32.h15 .macro fpu_save_single thread tmp=t0
19 s.d $f0, THREAD_FPR0(\thread)
20 s.d $f2, THREAD_FPR2(\thread)
21 s.d $f4, THREAD_FPR4(\thread)
22 s.d $f6, THREAD_FPR6(\thread)
23 s.d $f8, THREAD_FPR8(\thread)
24 s.d $f10, THREAD_FPR10(\thread)
25 s.d $f12, THREAD_FPR12(\thread)
26 s.d $f14, THREAD_FPR14(\thread)
27 s.d $f16, THREAD_FPR16(\thread)
28 s.d $f18, THREAD_FPR18(\thread)
29 s.d $f20, THREAD_FPR20(\thread)
30 s.d $f22, THREAD_FPR22(\thread)
31 s.d $f24, THREAD_FPR24(\thread)
32 s.d $f26, THREAD_FPR26(\thread)
33 s.d $f28, THREAD_FPR28(\thread)
34 s.d $f30, THREAD_FPR30(\thread)
35 sw \tmp, THREAD_FCR31(\thread)
39 .macro fpu_restore_single thread tmp=t0
42 lw \tmp, THREAD_FCR31(\thread)
43 l.d $f0, THREAD_FPR0(\thread)
44 l.d $f2, THREAD_FPR2(\thread)
45 l.d $f4, THREAD_FPR4(\thread)
46 l.d $f6, THREAD_FPR6(\thread)
47 l.d $f8, THREAD_FPR8(\thread)
48 l.d $f10, THREAD_FPR10(\thread)
49 l.d $f12, THREAD_FPR12(\thread)
50 l.d $f14, THREAD_FPR14(\thread)
51 l.d $f16, THREAD_FPR16(\thread)
52 l.d $f18, THREAD_FPR18(\thread)
53 l.d $f20, THREAD_FPR20(\thread)
54 l.d $f22, THREAD_FPR22(\thread)
55 l.d $f24, THREAD_FPR24(\thread)
56 l.d $f26, THREAD_FPR26(\thread)
57 l.d $f28, THREAD_FPR28(\thread)
58 l.d $f30, THREAD_FPR30(\thread)
63 .macro cpu_save_nonscratch thread
64 LONG_S s0, THREAD_REG16(\thread)
65 LONG_S s1, THREAD_REG17(\thread)
66 LONG_S s2, THREAD_REG18(\thread)
67 LONG_S s3, THREAD_REG19(\thread)
68 LONG_S s4, THREAD_REG20(\thread)
69 LONG_S s5, THREAD_REG21(\thread)
70 LONG_S s6, THREAD_REG22(\thread)
71 LONG_S s7, THREAD_REG23(\thread)
72 LONG_S sp, THREAD_REG29(\thread)
73 LONG_S fp, THREAD_REG30(\thread)
76 .macro cpu_restore_nonscratch thread
77 LONG_L s0, THREAD_REG16(\thread)
78 LONG_L s1, THREAD_REG17(\thread)
79 LONG_L s2, THREAD_REG18(\thread)
80 LONG_L s3, THREAD_REG19(\thread)
81 LONG_L s4, THREAD_REG20(\thread)
82 LONG_L s5, THREAD_REG21(\thread)
83 LONG_L s6, THREAD_REG22(\thread)
84 LONG_L s7, THREAD_REG23(\thread)
85 LONG_L sp, THREAD_REG29(\thread)
86 LONG_L fp, THREAD_REG30(\thread)
87 LONG_L ra, THREAD_REG31(\thread)
H A Dasmmacro-64.h16 .macro cpu_save_nonscratch thread
17 LONG_S s0, THREAD_REG16(\thread)
18 LONG_S s1, THREAD_REG17(\thread)
19 LONG_S s2, THREAD_REG18(\thread)
20 LONG_S s3, THREAD_REG19(\thread)
21 LONG_S s4, THREAD_REG20(\thread)
22 LONG_S s5, THREAD_REG21(\thread)
23 LONG_S s6, THREAD_REG22(\thread)
24 LONG_S s7, THREAD_REG23(\thread)
25 LONG_S sp, THREAD_REG29(\thread)
26 LONG_S fp, THREAD_REG30(\thread)
29 .macro cpu_restore_nonscratch thread
30 LONG_L s0, THREAD_REG16(\thread)
31 LONG_L s1, THREAD_REG17(\thread)
32 LONG_L s2, THREAD_REG18(\thread)
33 LONG_L s3, THREAD_REG19(\thread)
34 LONG_L s4, THREAD_REG20(\thread)
35 LONG_L s5, THREAD_REG21(\thread)
36 LONG_L s6, THREAD_REG22(\thread)
37 LONG_L s7, THREAD_REG23(\thread)
38 LONG_L sp, THREAD_REG29(\thread)
39 LONG_L fp, THREAD_REG30(\thread)
40 LONG_L ra, THREAD_REG31(\thread)
H A Dasmmacro.h59 .macro fpu_save_16even thread tmp=t0
63 sdc1 $f0, THREAD_FPR0(\thread)
64 sdc1 $f2, THREAD_FPR2(\thread)
65 sdc1 $f4, THREAD_FPR4(\thread)
66 sdc1 $f6, THREAD_FPR6(\thread)
67 sdc1 $f8, THREAD_FPR8(\thread)
68 sdc1 $f10, THREAD_FPR10(\thread)
69 sdc1 $f12, THREAD_FPR12(\thread)
70 sdc1 $f14, THREAD_FPR14(\thread)
71 sdc1 $f16, THREAD_FPR16(\thread)
72 sdc1 $f18, THREAD_FPR18(\thread)
73 sdc1 $f20, THREAD_FPR20(\thread)
74 sdc1 $f22, THREAD_FPR22(\thread)
75 sdc1 $f24, THREAD_FPR24(\thread)
76 sdc1 $f26, THREAD_FPR26(\thread)
77 sdc1 $f28, THREAD_FPR28(\thread)
78 sdc1 $f30, THREAD_FPR30(\thread)
79 sw \tmp, THREAD_FCR31(\thread)
83 .macro fpu_save_16odd thread
87 sdc1 $f1, THREAD_FPR1(\thread)
88 sdc1 $f3, THREAD_FPR3(\thread)
89 sdc1 $f5, THREAD_FPR5(\thread)
90 sdc1 $f7, THREAD_FPR7(\thread)
91 sdc1 $f9, THREAD_FPR9(\thread)
92 sdc1 $f11, THREAD_FPR11(\thread)
93 sdc1 $f13, THREAD_FPR13(\thread)
94 sdc1 $f15, THREAD_FPR15(\thread)
95 sdc1 $f17, THREAD_FPR17(\thread)
96 sdc1 $f19, THREAD_FPR19(\thread)
97 sdc1 $f21, THREAD_FPR21(\thread)
98 sdc1 $f23, THREAD_FPR23(\thread)
99 sdc1 $f25, THREAD_FPR25(\thread)
100 sdc1 $f27, THREAD_FPR27(\thread)
101 sdc1 $f29, THREAD_FPR29(\thread)
102 sdc1 $f31, THREAD_FPR31(\thread)
106 .macro fpu_save_double thread status tmp
111 fpu_save_16odd \thread
114 fpu_save_16even \thread \tmp
117 .macro fpu_restore_16even thread tmp=t0
120 lw \tmp, THREAD_FCR31(\thread)
121 ldc1 $f0, THREAD_FPR0(\thread)
122 ldc1 $f2, THREAD_FPR2(\thread)
123 ldc1 $f4, THREAD_FPR4(\thread)
124 ldc1 $f6, THREAD_FPR6(\thread)
125 ldc1 $f8, THREAD_FPR8(\thread)
126 ldc1 $f10, THREAD_FPR10(\thread)
127 ldc1 $f12, THREAD_FPR12(\thread)
128 ldc1 $f14, THREAD_FPR14(\thread)
129 ldc1 $f16, THREAD_FPR16(\thread)
130 ldc1 $f18, THREAD_FPR18(\thread)
131 ldc1 $f20, THREAD_FPR20(\thread)
132 ldc1 $f22, THREAD_FPR22(\thread)
133 ldc1 $f24, THREAD_FPR24(\thread)
134 ldc1 $f26, THREAD_FPR26(\thread)
135 ldc1 $f28, THREAD_FPR28(\thread)
136 ldc1 $f30, THREAD_FPR30(\thread)
140 .macro fpu_restore_16odd thread
144 ldc1 $f1, THREAD_FPR1(\thread)
145 ldc1 $f3, THREAD_FPR3(\thread)
146 ldc1 $f5, THREAD_FPR5(\thread)
147 ldc1 $f7, THREAD_FPR7(\thread)
148 ldc1 $f9, THREAD_FPR9(\thread)
149 ldc1 $f11, THREAD_FPR11(\thread)
150 ldc1 $f13, THREAD_FPR13(\thread)
151 ldc1 $f15, THREAD_FPR15(\thread)
152 ldc1 $f17, THREAD_FPR17(\thread)
153 ldc1 $f19, THREAD_FPR19(\thread)
154 ldc1 $f21, THREAD_FPR21(\thread)
155 ldc1 $f23, THREAD_FPR23(\thread)
156 ldc1 $f25, THREAD_FPR25(\thread)
157 ldc1 $f27, THREAD_FPR27(\thread)
158 ldc1 $f29, THREAD_FPR29(\thread)
159 ldc1 $f31, THREAD_FPR31(\thread)
163 .macro fpu_restore_double thread status tmp
169 fpu_restore_16odd \thread
172 fpu_restore_16even \thread \tmp
374 .macro msa_save_all thread
375 st_d 0, THREAD_FPR0, \thread
376 st_d 1, THREAD_FPR1, \thread
377 st_d 2, THREAD_FPR2, \thread
378 st_d 3, THREAD_FPR3, \thread
379 st_d 4, THREAD_FPR4, \thread
380 st_d 5, THREAD_FPR5, \thread
381 st_d 6, THREAD_FPR6, \thread
382 st_d 7, THREAD_FPR7, \thread
383 st_d 8, THREAD_FPR8, \thread
384 st_d 9, THREAD_FPR9, \thread
385 st_d 10, THREAD_FPR10, \thread
386 st_d 11, THREAD_FPR11, \thread
387 st_d 12, THREAD_FPR12, \thread
388 st_d 13, THREAD_FPR13, \thread
389 st_d 14, THREAD_FPR14, \thread
390 st_d 15, THREAD_FPR15, \thread
391 st_d 16, THREAD_FPR16, \thread
392 st_d 17, THREAD_FPR17, \thread
393 st_d 18, THREAD_FPR18, \thread
394 st_d 19, THREAD_FPR19, \thread
395 st_d 20, THREAD_FPR20, \thread
396 st_d 21, THREAD_FPR21, \thread
397 st_d 22, THREAD_FPR22, \thread
398 st_d 23, THREAD_FPR23, \thread
399 st_d 24, THREAD_FPR24, \thread
400 st_d 25, THREAD_FPR25, \thread
401 st_d 26, THREAD_FPR26, \thread
402 st_d 27, THREAD_FPR27, \thread
403 st_d 28, THREAD_FPR28, \thread
404 st_d 29, THREAD_FPR29, \thread
405 st_d 30, THREAD_FPR30, \thread
406 st_d 31, THREAD_FPR31, \thread
411 sw $1, THREAD_MSA_CSR(\thread)
415 .macro msa_restore_all thread
419 lw $1, THREAD_MSA_CSR(\thread)
422 ld_d 0, THREAD_FPR0, \thread
423 ld_d 1, THREAD_FPR1, \thread
424 ld_d 2, THREAD_FPR2, \thread
425 ld_d 3, THREAD_FPR3, \thread
426 ld_d 4, THREAD_FPR4, \thread
427 ld_d 5, THREAD_FPR5, \thread
428 ld_d 6, THREAD_FPR6, \thread
429 ld_d 7, THREAD_FPR7, \thread
430 ld_d 8, THREAD_FPR8, \thread
431 ld_d 9, THREAD_FPR9, \thread
432 ld_d 10, THREAD_FPR10, \thread
433 ld_d 11, THREAD_FPR11, \thread
434 ld_d 12, THREAD_FPR12, \thread
435 ld_d 13, THREAD_FPR13, \thread
436 ld_d 14, THREAD_FPR14, \thread
437 ld_d 15, THREAD_FPR15, \thread
438 ld_d 16, THREAD_FPR16, \thread
439 ld_d 17, THREAD_FPR17, \thread
440 ld_d 18, THREAD_FPR18, \thread
441 ld_d 19, THREAD_FPR19, \thread
442 ld_d 20, THREAD_FPR20, \thread
443 ld_d 21, THREAD_FPR21, \thread
444 ld_d 22, THREAD_FPR22, \thread
445 ld_d 23, THREAD_FPR23, \thread
446 ld_d 24, THREAD_FPR24, \thread
447 ld_d 25, THREAD_FPR25, \thread
448 ld_d 26, THREAD_FPR26, \thread
449 ld_d 27, THREAD_FPR27, \thread
450 ld_d 28, THREAD_FPR28, \thread
451 ld_d 29, THREAD_FPR29, \thread
452 ld_d 30, THREAD_FPR30, \thread
453 ld_d 31, THREAD_FPR31, \thread
H A Ddsp.h45 tsk->thread.dsp.dspr[0] = mfhi1(); \
46 tsk->thread.dsp.dspr[1] = mflo1(); \
47 tsk->thread.dsp.dspr[2] = mfhi2(); \
48 tsk->thread.dsp.dspr[3] = mflo2(); \
49 tsk->thread.dsp.dspr[4] = mfhi3(); \
50 tsk->thread.dsp.dspr[5] = mflo3(); \
51 tsk->thread.dsp.dspcontrol = rddsp(DSP_MASK); \
62 mthi1(tsk->thread.dsp.dspr[0]); \
63 mtlo1(tsk->thread.dsp.dspr[1]); \
64 mthi2(tsk->thread.dsp.dspr[2]); \
65 mtlo2(tsk->thread.dsp.dspr[3]); \
66 mthi3(tsk->thread.dsp.dspr[4]); \
67 mtlo3(tsk->thread.dsp.dspr[5]); \
68 wrdsp(tsk->thread.dsp.dspcontrol, DSP_MASK); \
82 tsk->thread.dsp.dspr; \
H A Dcop2.h19 #define cop2_save(r) octeon_cop2_save(&(r)->thread.cp2)
20 #define cop2_restore(r) octeon_cop2_restore(&(r)->thread.cp2)
30 #define cop2_save(r) nlm_cop2_save(&(r)->thread.cp2)
31 #define cop2_restore(r) nlm_cop2_restore(&(r)->thread.cp2)
H A Dthread_info.h1 /* thread_info.h: MIPS low-level thread information
27 unsigned long tp_value; /* thread pointer */
32 * thread address space limit:
34 * 0xffffffff for kernel-thread
41 * macros/functions for gaining access to the thread information structure
55 /* How to get the thread information struct from C. */
65 /* thread information allocation */
91 * thread information flags
110 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
/linux-4.1.27/tools/perf/util/
H A Dthread.h14 struct thread { struct
27 bool dead; /* if set thread has exited */
39 struct thread *thread__new(pid_t pid, pid_t tid);
40 int thread__init_map_groups(struct thread *thread, struct machine *machine);
41 void thread__delete(struct thread *thread);
43 struct thread *thread__get(struct thread *thread);
44 void thread__put(struct thread *thread);
46 static inline void __thread__zput(struct thread **thread) __thread__zput() argument
48 thread__put(*thread); __thread__zput()
49 *thread = NULL; __thread__zput()
52 #define thread__zput(thread) __thread__zput(&thread)
54 static inline void thread__exited(struct thread *thread) thread__exited() argument
56 thread->dead = true; thread__exited()
59 int __thread__set_comm(struct thread *thread, const char *comm, u64 timestamp,
61 static inline int thread__set_comm(struct thread *thread, const char *comm, thread__set_comm() argument
64 return __thread__set_comm(thread, comm, timestamp, false); thread__set_comm()
67 int thread__comm_len(struct thread *thread);
68 struct comm *thread__comm(const struct thread *thread);
69 struct comm *thread__exec_comm(const struct thread *thread);
70 const char *thread__comm_str(const struct thread *thread);
71 void thread__insert_map(struct thread *thread, struct map *map);
72 int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp);
73 size_t thread__fprintf(struct thread *thread, FILE *fp);
75 void thread__find_addr_map(struct thread *thread,
79 void thread__find_addr_location(struct thread *thread,
83 void thread__find_cpumode_addr_location(struct thread *thread,
87 static inline void *thread__priv(struct thread *thread) thread__priv() argument
89 return thread->priv; thread__priv()
92 static inline void thread__set_priv(struct thread *thread, void *p) thread__set_priv() argument
94 thread->priv = p; thread__set_priv()
97 static inline bool thread__is_filtered(struct thread *thread) thread__is_filtered() argument
100 !strlist__has_entry(symbol_conf.comm_list, thread__comm_str(thread))) { thread__is_filtered()
105 !intlist__has_entry(symbol_conf.pid_list, thread->pid_)) { thread__is_filtered()
110 !intlist__has_entry(symbol_conf.tid_list, thread->tid)) { thread__is_filtered()
H A Dunwind.h7 #include "thread.h"
19 struct thread *thread,
24 int unwind__prepare_access(struct thread *thread);
25 void unwind__flush_access(struct thread *thread);
26 void unwind__finish_access(struct thread *thread);
28 static inline int unwind__prepare_access(struct thread *thread __maybe_unused) unwind__prepare_access()
33 static inline void unwind__flush_access(struct thread *thread __maybe_unused) {} unwind__finish_access()
34 static inline void unwind__finish_access(struct thread *thread __maybe_unused) {} unwind__finish_access()
40 struct thread *thread __maybe_unused, unwind__get_entries()
47 static inline int unwind__prepare_access(struct thread *thread __maybe_unused) unwind__prepare_access()
52 static inline void unwind__flush_access(struct thread *thread __maybe_unused) {} unwind__finish_access()
53 static inline void unwind__finish_access(struct thread *thread __maybe_unused) {}
H A Dthread.c6 #include "thread.h"
7 #include "thread-stack.h"
13 int thread__init_map_groups(struct thread *thread, struct machine *machine) thread__init_map_groups() argument
15 struct thread *leader; thread__init_map_groups()
16 pid_t pid = thread->pid_; thread__init_map_groups()
18 if (pid == thread->tid || pid == -1) { thread__init_map_groups()
19 thread->mg = map_groups__new(machine); thread__init_map_groups()
23 thread->mg = map_groups__get(leader->mg); thread__init_map_groups()
26 return thread->mg ? 0 : -1; thread__init_map_groups()
29 struct thread *thread__new(pid_t pid, pid_t tid) thread__new()
33 struct thread *thread = zalloc(sizeof(*thread)); thread__new() local
35 if (thread != NULL) { thread__new()
36 thread->pid_ = pid; thread__new()
37 thread->tid = tid; thread__new()
38 thread->ppid = -1; thread__new()
39 thread->cpu = -1; thread__new()
40 INIT_LIST_HEAD(&thread->comm_list); thread__new()
42 if (unwind__prepare_access(thread) < 0) thread__new()
55 list_add(&comm->list, &thread->comm_list); thread__new()
59 return thread; thread__new()
62 free(thread); thread__new()
66 void thread__delete(struct thread *thread) thread__delete() argument
70 thread_stack__free(thread); thread__delete()
72 if (thread->mg) { thread__delete()
73 map_groups__put(thread->mg); thread__delete()
74 thread->mg = NULL; thread__delete()
76 list_for_each_entry_safe(comm, tmp, &thread->comm_list, list) { thread__delete()
80 unwind__finish_access(thread); thread__delete()
82 free(thread); thread__delete()
85 struct thread *thread__get(struct thread *thread) thread__get() argument
87 ++thread->refcnt; thread__get()
88 return thread; thread__get()
91 void thread__put(struct thread *thread) thread__put() argument
93 if (thread && --thread->refcnt == 0) { thread__put()
94 list_del_init(&thread->node); thread__put()
95 thread__delete(thread); thread__put()
99 struct comm *thread__comm(const struct thread *thread) thread__comm() argument
101 if (list_empty(&thread->comm_list)) thread__comm()
104 return list_first_entry(&thread->comm_list, struct comm, list); thread__comm()
107 struct comm *thread__exec_comm(const struct thread *thread) thread__exec_comm() argument
111 list_for_each_entry(comm, &thread->comm_list, list) { thread__exec_comm()
120 int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp, __thread__set_comm() argument
123 struct comm *new, *curr = thread__comm(thread); __thread__set_comm()
127 if (!thread->comm_set) { __thread__set_comm()
135 list_add(&new->list, &thread->comm_list); __thread__set_comm()
138 unwind__flush_access(thread); __thread__set_comm()
141 thread->comm_set = true; __thread__set_comm()
146 const char *thread__comm_str(const struct thread *thread) thread__comm_str() argument
148 const struct comm *comm = thread__comm(thread); thread__comm_str()
157 int thread__comm_len(struct thread *thread) thread__comm_len() argument
159 if (!thread->comm_len) { thread__comm_len()
160 const char *comm = thread__comm_str(thread); thread__comm_len()
163 thread->comm_len = strlen(comm); thread__comm_len()
166 return thread->comm_len; thread__comm_len()
169 size_t thread__fprintf(struct thread *thread, FILE *fp) thread__fprintf() argument
171 return fprintf(fp, "Thread %d %s\n", thread->tid, thread__comm_str(thread)) + thread__fprintf()
172 map_groups__fprintf(thread->mg, fp); thread__fprintf()
175 void thread__insert_map(struct thread *thread, struct map *map) thread__insert_map() argument
177 map_groups__fixup_overlappings(thread->mg, map, stderr); thread__insert_map()
178 map_groups__insert(thread->mg, map); thread__insert_map()
181 static int thread__clone_map_groups(struct thread *thread, thread__clone_map_groups() argument
182 struct thread *parent) thread__clone_map_groups()
186 /* This is new thread, we share map groups for process. */ thread__clone_map_groups()
187 if (thread->pid_ == parent->pid_) thread__clone_map_groups()
192 if (map_groups__clone(thread->mg, parent->mg, i) < 0) thread__clone_map_groups()
198 int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp) thread__fork() argument
206 err = thread__set_comm(thread, comm, timestamp); thread__fork()
211 thread->ppid = parent->tid; thread__fork()
212 return thread__clone_map_groups(thread, parent); thread__fork()
215 void thread__find_cpumode_addr_location(struct thread *thread, thread__find_cpumode_addr_location() argument
228 thread__find_addr_location(thread, cpumodes[i], type, addr, al); thread__find_cpumode_addr_location()
H A Dunwind-libdw.h6 #include "thread.h"
9 bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg);
15 struct thread *thread; member in struct:unwind_info
H A Dvdso.h24 struct thread;
26 struct dso *vdso__dso_findnew(struct machine *machine, struct thread *thread);
H A Dthread-stack.h2 * thread-stack.h: Synthesize a thread's stack using call / return events
24 struct thread;
47 * @thread: thread in which call/return occurred
59 struct thread *thread; member in struct:call_return
94 int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
96 void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr);
97 void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
99 void thread_stack__free(struct thread *thread);
105 int thread_stack__process(struct thread *thread, struct comm *comm,
H A Dthread-stack.c2 * thread-stack.c: Synthesize a thread's stack using call / return events
18 #include "thread.h"
25 #include "thread-stack.h"
66 * struct thread_stack_entry - thread stack entry.
84 * struct thread_stack - thread stack constructed from 'call' and 'return'
126 static struct thread_stack *thread_stack__new(struct thread *thread, thread_stack__new() argument
140 if (thread->mg && thread->mg->machine) thread_stack__new()
141 ts->kernel_start = machine__kernel_start(thread->mg->machine); thread_stack__new()
156 pr_warning("Out of memory: discarding thread stack\n"); thread_stack__push()
195 static int thread_stack__call_return(struct thread *thread, thread_stack__call_return() argument
202 .thread = thread, thread_stack__call_return()
222 static int thread_stack__flush(struct thread *thread, struct thread_stack *ts) thread_stack__flush() argument
233 err = thread_stack__call_return(thread, ts, --ts->cnt, thread_stack__flush()
236 pr_err("Error flushing thread stack!\n"); thread_stack__flush()
245 int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip, thread_stack__event() argument
248 if (!thread) thread_stack__event()
251 if (!thread->ts) { thread_stack__event()
252 thread->ts = thread_stack__new(thread, NULL); thread_stack__event()
253 if (!thread->ts) { thread_stack__event()
254 pr_warning("Out of memory: no thread stack\n"); thread_stack__event()
257 thread->ts->trace_nr = trace_nr; thread_stack__event()
265 if (trace_nr != thread->ts->trace_nr) { thread_stack__event()
266 if (thread->ts->trace_nr) thread_stack__event()
267 thread_stack__flush(thread, thread->ts); thread_stack__event()
268 thread->ts->trace_nr = trace_nr; thread_stack__event()
272 if (thread->ts->crp) thread_stack__event()
283 return thread_stack__push(thread->ts, ret_addr); thread_stack__event()
287 thread_stack__pop(thread->ts, to_ip); thread_stack__event()
293 void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr) thread_stack__set_trace_nr() argument
295 if (!thread || !thread->ts) thread_stack__set_trace_nr()
298 if (trace_nr != thread->ts->trace_nr) { thread_stack__set_trace_nr()
299 if (thread->ts->trace_nr) thread_stack__set_trace_nr()
300 thread_stack__flush(thread, thread->ts); thread_stack__set_trace_nr()
301 thread->ts->trace_nr = trace_nr; thread_stack__set_trace_nr()
305 void thread_stack__free(struct thread *thread) thread_stack__free() argument
307 if (thread->ts) { thread_stack__free()
308 thread_stack__flush(thread, thread->ts); thread_stack__free()
309 zfree(&thread->ts->stack); thread_stack__free()
310 zfree(&thread->ts); thread_stack__free()
314 void thread_stack__sample(struct thread *thread, struct ip_callchain *chain, thread_stack__sample() argument
319 if (!thread || !thread->ts) thread_stack__sample()
322 chain->nr = min(sz, thread->ts->cnt + 1); thread_stack__sample()
327 chain->ips[i] = thread->ts->stack[thread->ts->cnt - i].ret_addr; thread_stack__sample()
485 static int thread_stack__pop_cp(struct thread *thread, struct thread_stack *ts, thread_stack__pop_cp() argument
498 return thread_stack__call_return(thread, ts, --ts->cnt, thread_stack__pop_cp()
503 return thread_stack__call_return(thread, ts, --ts->cnt, thread_stack__pop_cp()
513 err = thread_stack__call_return(thread, ts, thread_stack__pop_cp()
520 return thread_stack__call_return(thread, ts, --ts->cnt, thread_stack__pop_cp()
528 static int thread_stack__bottom(struct thread *thread, struct thread_stack *ts, thread_stack__bottom() argument
553 return thread_stack__push_cp(thread->ts, ip, sample->time, ref, cp, thread_stack__bottom()
557 static int thread_stack__no_call_return(struct thread *thread, thread_stack__no_call_return() argument
571 err = thread_stack__call_return(thread, ts, --ts->cnt, thread_stack__no_call_return()
591 err = thread_stack__call_return(thread, ts, --ts->cnt, thread_stack__no_call_return()
615 return thread_stack__pop_cp(thread, ts, sample->addr, sample->time, ref, thread_stack__no_call_return()
619 static int thread_stack__trace_begin(struct thread *thread, thread_stack__trace_begin() argument
632 err = thread_stack__call_return(thread, ts, --ts->cnt, thread_stack__trace_begin()
663 int thread_stack__process(struct thread *thread, struct comm *comm, thread_stack__process() argument
669 struct thread_stack *ts = thread->ts; thread_stack__process()
675 thread_stack__free(thread); thread_stack__process()
676 thread->ts = thread_stack__new(thread, crp); thread_stack__process()
677 if (!thread->ts) thread_stack__process()
679 ts = thread->ts; thread_stack__process()
683 thread->ts = thread_stack__new(thread, crp); thread_stack__process()
684 if (!thread->ts) thread_stack__process()
686 ts = thread->ts; thread_stack__process()
691 if (ts->comm != comm && thread->pid_ == thread->tid) { thread_stack__process()
692 err = thread_stack__flush(thread, ts); thread_stack__process()
700 err = thread_stack__bottom(thread, ts, sample, from_al, to_al, thread_stack__process()
732 err = thread_stack__pop_cp(thread, ts, sample->addr, thread_stack__process()
737 err = thread_stack__no_call_return(thread, ts, sample, thread_stack__process()
741 err = thread_stack__trace_begin(thread, ts, sample->time, ref); thread_stack__process()
H A Ddb-export.c20 #include "thread.h"
25 #include "thread-stack.h"
122 int db_export__thread(struct db_export *dbe, struct thread *thread, db_export__thread() argument
128 if (thread->db_id) db_export__thread()
131 thread->db_id = ++dbe->thread_last_db_id; db_export__thread()
133 if (thread->pid_ != -1) { db_export__thread()
134 struct thread *main_thread; db_export__thread()
136 if (thread->pid_ == thread->tid) { db_export__thread()
137 main_thread = thread; db_export__thread()
140 thread->pid_, db_export__thread()
141 thread->pid_); db_export__thread()
149 err = db_export__comm_thread(dbe, comm, thread); db_export__thread()
158 return dbe->export_thread(dbe, thread, main_thread_db_id, db_export__thread()
165 struct thread *main_thread) db_export__comm()
187 struct thread *thread) db_export__comm_thread()
194 return dbe->export_comm_thread(dbe, db_id, comm, thread); db_export__comm_thread()
229 static struct thread *get_main_thread(struct machine *machine, struct thread *thread) get_main_thread() argument
231 if (thread->pid_ == thread->tid) get_main_thread()
232 return thread; get_main_thread()
234 if (thread->pid_ == -1) get_main_thread()
237 return machine__find_thread(machine, thread->pid_, thread->pid_); get_main_thread()
287 struct thread* thread = al->thread; db_export__sample() local
294 struct thread *main_thread; db_export__sample()
306 main_thread = get_main_thread(al->machine, thread); db_export__sample()
310 err = db_export__thread(dbe, thread, al->machine, comm); db_export__sample()
331 perf_event__preprocess_sample_addr(event, sample, thread, &addr_al); db_export__sample()
337 err = thread_stack__process(thread, comm, sample, al, db_export__sample()
186 db_export__comm_thread(struct db_export *dbe, struct comm *comm, struct thread *thread) db_export__comm_thread() argument
H A Ddb-export.h24 struct thread;
51 int (*export_thread)(struct db_export *dbe, struct thread *thread,
55 struct comm *comm, struct thread *thread);
85 int db_export__thread(struct db_export *dbe, struct thread *thread,
88 struct thread *main_thread);
90 struct thread *thread);
H A Dmachine.c10 #include "thread.h"
48 struct thread *thread = machine__findnew_thread(machine, -1, machine__init() local
52 if (thread == NULL) machine__init()
56 thread__set_comm(thread, comm, 0); machine__init()
97 struct thread *t = rb_entry(nd, struct thread, rb_node); machine__delete_threads()
294 struct thread *th, pid_t pid) machine__update_thread_pid()
296 struct thread *leader; machine__update_thread_pid()
322 * tid. Consequently there never should be any maps on a thread machine__update_thread_pid()
326 pr_err("Discarding thread maps for %d:%d\n", machine__update_thread_pid()
339 static struct thread *__machine__findnew_thread(struct machine *machine, __machine__findnew_thread()
345 struct thread *th; __machine__findnew_thread()
364 th = rb_entry(parent, struct thread, rb_node); __machine__findnew_thread()
391 * within thread__init_map_groups to find the thread __machine__findnew_thread()
409 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, machine__findnew_thread()
415 struct thread *machine__find_thread(struct machine *machine, pid_t pid, machine__find_thread()
422 struct thread *thread) machine__thread_exec_comm()
425 return thread__exec_comm(thread); machine__thread_exec_comm()
427 return thread__comm(thread); machine__thread_exec_comm()
433 struct thread *thread = machine__findnew_thread(machine, machine__process_comm_event() local
444 if (thread == NULL || machine__process_comm_event()
445 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) { machine__process_comm_event()
579 struct thread *pos = rb_entry(nd, struct thread, rb_node); machine__fprintf()
1164 struct thread *thread; machine__process_mmap2_event() local
1180 thread = machine__findnew_thread(machine, event->mmap2.pid, machine__process_mmap2_event()
1182 if (thread == NULL) machine__process_mmap2_event()
1197 event->mmap2.filename, type, thread); machine__process_mmap2_event()
1202 thread__insert_map(thread, map); machine__process_mmap2_event()
1214 struct thread *thread; machine__process_mmap_event() local
1230 thread = machine__findnew_thread(machine, event->mmap.pid, machine__process_mmap_event()
1232 if (thread == NULL) machine__process_mmap_event()
1244 type, thread); machine__process_mmap_event()
1249 thread__insert_map(thread, map); machine__process_mmap_event()
1257 void machine__remove_thread(struct machine *machine, struct thread *th) machine__remove_thread()
1275 struct thread *thread = machine__find_thread(machine, machine__process_fork_event() local
1278 struct thread *parent = machine__findnew_thread(machine, machine__process_fork_event()
1282 /* if a thread currently exists for the thread id remove it */ machine__process_fork_event()
1283 if (thread != NULL) machine__process_fork_event()
1284 machine__remove_thread(machine, thread); machine__process_fork_event()
1286 thread = machine__findnew_thread(machine, event->fork.pid, machine__process_fork_event()
1291 if (thread == NULL || parent == NULL || machine__process_fork_event()
1292 thread__fork(thread, parent, sample->time) < 0) { machine__process_fork_event()
1303 struct thread *thread = machine__find_thread(machine, machine__process_exit_event() local
1310 if (thread != NULL) machine__process_exit_event()
1311 thread__exited(thread); machine__process_exit_event()
1349 static void ip__resolve_ams(struct thread *thread, ip__resolve_ams() argument
1363 thread__find_cpumode_addr_location(thread, MAP__FUNCTION, ip, &al); ip__resolve_ams()
1371 static void ip__resolve_data(struct thread *thread, ip__resolve_data() argument
1378 thread__find_addr_location(thread, m, MAP__VARIABLE, addr, &al); ip__resolve_data()
1385 thread__find_addr_location(thread, m, MAP__FUNCTION, addr, &al); ip__resolve_data()
1402 ip__resolve_ams(al->thread, &mi->iaddr, sample->ip); sample__resolve_mem()
1403 ip__resolve_data(al->thread, al->cpumode, &mi->daddr, sample->addr); sample__resolve_mem()
1409 static int add_callchain_ip(struct thread *thread, add_callchain_ip() argument
1420 thread__find_cpumode_addr_location(thread, MAP__FUNCTION, add_callchain_ip()
1446 thread__find_addr_location(thread, *cpumode, MAP__FUNCTION, add_callchain_ip()
1477 ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to); sample__resolve_bstack()
1478 ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from); sample__resolve_bstack()
1532 static int resolve_lbr_callchain_sample(struct thread *thread, resolve_lbr_callchain_sample() argument
1587 err = add_callchain_ip(thread, parent, root_al, &cpumode, ip); resolve_lbr_callchain_sample()
1597 static int thread__resolve_callchain_sample(struct thread *thread, thread__resolve_callchain_sample() argument
1615 err = resolve_lbr_callchain_sample(thread, sample, parent, thread__resolve_callchain_sample()
1626 skip_idx = arch_skip_callchain_idx(thread, chain); thread__resolve_callchain_sample()
1672 err = add_callchain_ip(thread, parent, root_al, thread__resolve_callchain_sample()
1675 err = add_callchain_ip(thread, parent, root_al, thread__resolve_callchain_sample()
1705 err = add_callchain_ip(thread, parent, root_al, &cpumode, ip); thread__resolve_callchain_sample()
1721 int thread__resolve_callchain(struct thread *thread, thread__resolve_callchain() argument
1728 int ret = thread__resolve_callchain_sample(thread, evsel, thread__resolve_callchain()
1745 thread, sample, max_stack); thread__resolve_callchain()
1750 int (*fn)(struct thread *thread, void *p), machine__for_each_thread()
1754 struct thread *thread; machine__for_each_thread() local
1758 thread = rb_entry(nd, struct thread, rb_node); machine__for_each_thread()
1759 rc = fn(thread, priv); machine__for_each_thread()
1764 list_for_each_entry(thread, &machine->dead_threads, node) { machine__for_each_thread()
1765 rc = fn(thread, priv); machine__for_each_thread()
1795 struct thread *thread; machine__set_current_tid() local
1818 thread = machine__findnew_thread(machine, pid, tid); machine__set_current_tid()
1819 if (!thread) machine__set_current_tid()
1822 thread->cpu = cpu; machine__set_current_tid()
421 machine__thread_exec_comm(struct machine *machine, struct thread *thread) machine__thread_exec_comm() argument
1749 machine__for_each_thread(struct machine *machine, int (*fn)(struct thread *thread, void *p), void *priv) machine__for_each_thread() argument
H A Dunwind-libdw.c10 #include "thread.h"
29 thread__find_addr_location(ui->thread, __report_module()
78 /* We want only single thread to be processed. */ next_thread()
92 thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER, access_dso_mem()
167 struct thread *thread, unwind__get_entries()
173 .thread = thread, unwind__get_entries()
174 .machine = thread->mg->machine, unwind__get_entries()
197 if (!dwfl_attach_state(ui.dwfl, EM_NONE, thread->tid, &callbacks, &ui)) unwind__get_entries()
200 err = dwfl_getthread_frames(ui.dwfl, thread->tid, frame_callback, &ui); unwind__get_entries()
166 unwind__get_entries(unwind_entry_cb_t cb, void *arg, struct thread *thread, struct perf_sample *data, int max_stack) unwind__get_entries() argument
H A Dvdso.c15 #include "thread.h"
140 struct thread *thread) machine__thread_dso_type()
146 map = map_groups__first(thread->mg, MAP__FUNCTION); machine__thread_dso_type()
251 struct thread *thread, vdso__dso_findnew_compat()
257 dso_type = machine__thread_dso_type(machine, thread); vdso__dso_findnew_compat()
285 struct thread *thread __maybe_unused) vdso__dso_findnew()
298 if (vdso__dso_findnew_compat(machine, thread, vdso_info, &dso)) vdso__dso_findnew()
139 machine__thread_dso_type(struct machine *machine, struct thread *thread) machine__thread_dso_type() argument
250 vdso__dso_findnew_compat(struct machine *machine, struct thread *thread, struct vdso_info *vdso_info, struct dso **dso) vdso__dso_findnew_compat() argument
H A Devlist.c285 int cpu, thread; perf_evlist__disable() local
295 for (thread = 0; thread < nr_threads; thread++) evlist__for_each()
296 ioctl(FD(pos, cpu, thread), evlist__for_each()
304 int cpu, thread; perf_evlist__enable() local
314 for (thread = 0; thread < nr_threads; thread++) evlist__for_each()
315 ioctl(FD(pos, cpu, thread), evlist__for_each()
324 int cpu, thread, err; perf_evlist__disable_event() local
332 for (thread = 0; thread < nr_threads; thread++) { perf_evlist__disable_event()
333 err = ioctl(FD(evsel, cpu, thread), perf_evlist__disable_event()
345 int cpu, thread, err; perf_evlist__enable_event() local
353 for (thread = 0; thread < nr_threads; thread++) { perf_evlist__enable_event()
354 err = ioctl(FD(evsel, cpu, thread), perf_evlist__enable_event()
366 int thread, err; perf_evlist__enable_event_cpu() local
372 for (thread = 0; thread < nr_threads; thread++) { perf_evlist__enable_event_cpu()
373 err = ioctl(FD(evsel, cpu, thread), perf_evlist__enable_event_cpu()
383 int thread) perf_evlist__enable_event_thread()
392 err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0); perf_evlist__enable_event_thread()
472 int cpu, int thread, u64 id) perf_evlist__id_hash()
475 struct perf_sample_id *sid = SID(evsel, cpu, thread); perf_evlist__id_hash()
484 int cpu, int thread, u64 id) perf_evlist__id_add()
486 perf_evlist__id_hash(evlist, evsel, cpu, thread, id); perf_evlist__id_add()
492 int cpu, int thread, int fd) perf_evlist__id_add_fd()
527 perf_evlist__id_add(evlist, evsel, cpu, thread, id); perf_evlist__id_add_fd()
533 int thread) perf_evlist__set_sid_idx()
535 struct perf_sample_id *sid = SID(evsel, cpu, thread); perf_evlist__set_sid_idx()
541 if (!evsel->system_wide && evlist->threads && thread >= 0) perf_evlist__set_sid_idx()
542 sid->tid = evlist->threads->map[thread]; perf_evlist__set_sid_idx()
797 int thread, int *output) perf_evlist__mmap_per_evsel()
804 if (evsel->system_wide && thread) evlist__for_each()
807 fd = FD(evsel, cpu, thread); evlist__for_each()
834 if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread, evlist__for_each()
838 thread); evlist__for_each()
848 int cpu, thread; perf_evlist__mmap_per_cpu() local
856 for (thread = 0; thread < nr_threads; thread++) { perf_evlist__mmap_per_cpu()
858 thread, &output)) perf_evlist__mmap_per_cpu()
874 int thread; perf_evlist__mmap_per_thread() local
877 pr_debug2("perf event ring buffer mmapped per thread\n"); perf_evlist__mmap_per_thread()
878 for (thread = 0; thread < nr_threads; thread++) { perf_evlist__mmap_per_thread()
881 if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread, perf_evlist__mmap_per_thread()
889 for (thread = 0; thread < nr_threads; thread++) perf_evlist__mmap_per_thread()
890 __perf_evlist__munmap(evlist, thread); perf_evlist__mmap_per_thread()
1293 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL perf_evlist__open()
381 perf_evlist__enable_event_thread(struct perf_evlist *evlist, struct perf_evsel *evsel, int thread) perf_evlist__enable_event_thread() argument
470 perf_evlist__id_hash(struct perf_evlist *evlist, struct perf_evsel *evsel, int cpu, int thread, u64 id) perf_evlist__id_hash() argument
483 perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, int cpu, int thread, u64 id) perf_evlist__id_add() argument
490 perf_evlist__id_add_fd(struct perf_evlist *evlist, struct perf_evsel *evsel, int cpu, int thread, int fd) perf_evlist__id_add_fd() argument
531 perf_evlist__set_sid_idx(struct perf_evlist *evlist, struct perf_evsel *evsel, int idx, int cpu, int thread) perf_evlist__set_sid_idx() argument
795 perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, struct mmap_params *mp, int cpu, int thread, int *output) perf_evlist__mmap_per_evsel() argument
H A Dunwind-libunwind.c28 #include "thread.h"
90 struct thread *thread; member in struct:unwind_info
318 thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER, find_map()
412 thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER, access_dso_mem()
514 static int entry(u64 ip, struct thread *thread, entry() argument
520 thread__find_addr_location(thread, PERF_RECORD_MISC_USER, entry()
563 int unwind__prepare_access(struct thread *thread) unwind__prepare_access() argument
577 thread__set_priv(thread, addr_space); unwind__prepare_access()
582 void unwind__flush_access(struct thread *thread) unwind__flush_access() argument
589 addr_space = thread__priv(thread); unwind__flush_access()
593 void unwind__finish_access(struct thread *thread) unwind__finish_access() argument
600 addr_space = thread__priv(thread); unwind__finish_access()
611 addr_space = thread__priv(ui->thread); get_entries()
623 ret = ip ? entry(ip, ui->thread, cb, arg) : 0; get_entries()
630 struct thread *thread, unwind__get_entries()
636 .thread = thread, unwind__get_entries()
637 .machine = thread->mg->machine, unwind__get_entries()
648 ret = entry(ip, thread, cb, arg); unwind__get_entries()
629 unwind__get_entries(unwind_entry_cb_t cb, void *arg, struct thread *thread, struct perf_sample *data, int max_stack) unwind__get_entries() argument
H A Devent.c10 #include "thread.h"
187 * for main thread set parent to ppid from status file. For other perf_event__synthesize_fork()
188 * threads set parent pid to main thread. ie., assume main thread perf_event__synthesize_fork()
459 int err = -1, thread, j; perf_event__synthesize_thread_map() local
474 for (thread = 0; thread < threads->nr; ++thread) { perf_event__synthesize_thread_map()
477 threads->map[thread], 0, perf_event__synthesize_thread_map()
485 * comm.pid is set to thread group id by perf_event__synthesize_thread_map()
488 if ((int) comm_event->comm.pid != threads->map[thread]) { perf_event__synthesize_thread_map()
491 /* is thread group leader in thread_map? */ perf_event__synthesize_thread_map()
558 * We may race with exiting thread, so don't stop just because perf_event__synthesize_threads()
559 * one thread couldn't be synthesized. perf_event__synthesize_threads()
792 void thread__find_addr_map(struct thread *thread, u8 cpumode, thread__find_addr_map() argument
796 struct map_groups *mg = thread->mg; thread__find_addr_map()
801 al->thread = thread; thread__find_addr_map()
868 void thread__find_addr_location(struct thread *thread, thread__find_addr_location() argument
872 thread__find_addr_map(thread, cpumode, type, addr, al); thread__find_addr_location()
875 thread->mg->machine->symbol_filter); thread__find_addr_location()
886 struct thread *thread = machine__findnew_thread(machine, sample->pid, perf_event__preprocess_sample() local
889 if (thread == NULL) perf_event__preprocess_sample()
892 dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid); perf_event__preprocess_sample()
904 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, sample->ip, al); perf_event__preprocess_sample()
909 if (thread__is_filtered(thread)) perf_event__preprocess_sample()
963 struct thread *thread, perf_event__preprocess_sample_addr()
968 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, sample->addr, al); perf_event__preprocess_sample_addr()
970 thread__find_addr_map(thread, cpumode, MAP__VARIABLE, perf_event__preprocess_sample_addr()
961 perf_event__preprocess_sample_addr(union perf_event *event, struct perf_sample *sample, struct thread *thread, struct addr_location *al) perf_event__preprocess_sample_addr() argument
H A Dsession.h9 #include "thread.h"
16 struct thread;
61 struct thread *thread,
85 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid);
H A Dmachine.h15 struct thread;
34 struct thread *last_match;
71 struct thread *machine__find_thread(struct machine *machine, pid_t pid,
74 struct thread *thread);
123 void machine__remove_thread(struct machine *machine, struct thread *th);
129 int thread__resolve_callchain(struct thread *thread,
150 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
209 int (*fn)(struct thread *thread, void *p),
H A Devsel.h240 int cpu, int thread,
243 int perf_evsel__read_cb(struct perf_evsel *evsel, int cpu, int thread,
247 int cpu, int thread, bool scale);
250 * perf_evsel__read_on_cpu - Read out the results on a CPU and thread
254 * @thread - thread of interest
257 int cpu, int thread) perf_evsel__read_on_cpu()
259 return __perf_evsel__read_on_cpu(evsel, cpu, thread, false); perf_evsel__read_on_cpu()
263 * perf_evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled
267 * @thread - thread of interest
270 int cpu, int thread) perf_evsel__read_on_cpu_scaled()
272 return __perf_evsel__read_on_cpu(evsel, cpu, thread, true); perf_evsel__read_on_cpu_scaled()
256 perf_evsel__read_on_cpu(struct perf_evsel *evsel, int cpu, int thread) perf_evsel__read_on_cpu() argument
269 perf_evsel__read_on_cpu_scaled(struct perf_evsel *evsel, int cpu, int thread) perf_evsel__read_on_cpu_scaled() argument
/linux-4.1.27/tools/perf/tests/
H A Ddwarf-unwind.c11 #include "thread.h"
67 static int unwind_thread(struct thread *thread) unwind_thread() argument
75 if (test__arch_unwind_sample(&sample, thread)) { unwind_thread()
80 err = unwind__get_entries(unwind_entry, &cnt, thread, unwind_thread()
101 /* Any possible value should be 'thread' */ compare()
102 struct thread *thread = *(struct thread **)p1; compare() local
105 global_unwind_retval = unwind_thread(thread); compare()
111 static int krava_3(struct thread *thread) krava_3() argument
113 struct thread *array[2] = {thread, thread}; krava_3()
125 _bsearch(array, &thread, 2, sizeof(struct thread **), compare); krava_3()
130 static int krava_2(struct thread *thread) krava_2() argument
132 return krava_3(thread); krava_2()
136 static int krava_1(struct thread *thread) krava_1() argument
138 return krava_2(thread); krava_1()
145 struct thread *thread; test__dwarf_unwind() local
166 thread = machine__find_thread(machine, getpid(), getpid()); test__dwarf_unwind()
167 if (!thread) { test__dwarf_unwind()
168 pr_err("Could not get thread\n"); test__dwarf_unwind()
172 err = krava_1(thread); test__dwarf_unwind()
H A Dthread-mg-share.c3 #include "thread.h"
12 /* thread group */ test__thread_mg_share()
13 struct thread *leader; test__thread_mg_share()
14 struct thread *t1, *t2, *t3; test__thread_mg_share()
18 struct thread *other, *other_leader; test__thread_mg_share()
22 * This test create 2 processes abstractions (struct thread) test__thread_mg_share()
26 * thread group (pid: 0, tids: 0, 1, 2, 3) test__thread_mg_share()
39 /* and create 1 separated process, without thread leader */ test__thread_mg_share()
66 /* release thread group */ test__thread_mg_share()
H A Dmmap-thread-lookup.c13 #include "thread.h"
57 /* Signal thread_create thread is initialized. */ thread_fn()
65 /* Waiting for main thread to kill us. */ thread_fn()
83 /* Wait for thread initialization. */ thread_create()
100 /* 0 is main thread */ threads_create()
115 /* cleanup the main thread */ threads_destroy()
179 * thread object. mmap_events()
184 struct thread *thread; mmap_events() local
186 thread = machine__findnew_thread(machine, getpid(), td->tid); mmap_events()
190 thread__find_addr_map(thread, mmap_events()
210 * main thread) and each thread creates memory map.
220 * by using all thread objects.
H A Dhists_common.c8 #include "util/thread.h"
91 struct thread *thread; setup_fake_machine() local
93 thread = machine__findnew_thread(machine, fake_threads[i].pid, setup_fake_machine()
95 if (thread == NULL) setup_fake_machine()
98 thread__set_comm(thread, fake_threads[i].comm, 0); setup_fake_machine()
173 i, thread__comm_str(he->thread), print_hists_in()
200 i, thread__comm_str(he->thread), he->thread->tid, print_hists_out()
H A Dtests.h58 struct thread;
61 struct thread *thread);
H A Dhists_filter.c8 #include "util/thread.h"
16 struct thread *thread; member in struct:sample
88 fake_samples[i].thread = al.thread; evlist__for_each()
163 /* now applying thread filter for 'bash' */ evlist__for_each()
164 hists->thread_filter = fake_samples[9].thread; evlist__for_each()
168 pr_info("Histogram for thread filter\n"); evlist__for_each()
181 TEST_ASSERT_VAL("Unmatched nr samples for thread filter", evlist__for_each()
183 TEST_ASSERT_VAL("Unmatched nr hist entries for thread filter", evlist__for_each()
185 TEST_ASSERT_VAL("Unmatched total period for thread filter", evlist__for_each()
188 /* remove thread filter first */ evlist__for_each()
253 hists->thread_filter = fake_samples[1].thread; evlist__for_each()
H A Dhists_link.c9 #include "thread.h"
16 struct thread *thread; member in struct:sample
97 fake_common_samples[k].thread = al.thread; evlist__for_each()
121 fake_samples[i][k].thread = al.thread; evlist__for_each()
136 struct thread *t, struct map *m, struct symbol *s) find_sample()
139 if (samples->thread == t && samples->map == m && find_sample()
170 he->thread, he->ms.map, he->ms.sym)) { __validate_match()
222 he->thread, he->ms.map, he->ms.sym) && __validate_link()
225 he->thread, he->ms.map, he->ms.sym)) { __validate_link()
H A Dcode-reading.c15 #include "thread.h"
136 struct thread *thread, struct state *state) read_object_code()
147 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, addr, &al); read_object_code()
171 ret_len = dso__data_read_offset(al.map->dso, thread->mg->machine, read_object_code()
249 struct thread *thread; process_sample_event() local
257 thread = machine__findnew_thread(machine, sample.pid, sample.tid); process_sample_event()
258 if (!thread) { process_sample_event()
265 return read_object_code(sample.ip, READLEN, cpumode, thread, state); process_sample_event()
389 struct thread *thread; do_test_code_reading() local
457 thread = machine__findnew_thread(machine, pid, pid); do_test_code_reading()
458 if (!thread) { do_test_code_reading()
135 read_object_code(u64 addr, size_t len, u8 cpumode, struct thread *thread, struct state *state) read_object_code() argument
/linux-4.1.27/arch/sparc/kernel/
H A Dsigutil_32.c20 fpsave(&current->thread.float_regs[0], &current->thread.fsr, save_fpu_state()
21 &current->thread.fpqueue[0], &current->thread.fpqdepth); save_fpu_state()
28 fpsave(&current->thread.float_regs[0], &current->thread.fsr, save_fpu_state()
29 &current->thread.fpqueue[0], &current->thread.fpqdepth); save_fpu_state()
35 &current->thread.float_regs[0], save_fpu_state()
37 err |= __put_user(current->thread.fsr, &fpu->si_fsr); save_fpu_state()
38 err |= __put_user(current->thread.fpqdepth, &fpu->si_fpqdepth); save_fpu_state()
39 if (current->thread.fpqdepth != 0) save_fpu_state()
41 &current->thread.fpqueue[0], save_fpu_state()
66 err = __copy_from_user(&current->thread.float_regs[0], &fpu->si_float_regs[0], restore_fpu_state()
68 err |= __get_user(current->thread.fsr, &fpu->si_fsr); restore_fpu_state()
69 err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth); restore_fpu_state()
70 if (current->thread.fpqdepth != 0) restore_fpu_state()
71 err |= __copy_from_user(&current->thread.fpqueue[0], restore_fpu_state()
H A Dprocess_32.c185 * Free current thread data structures etc..
196 fpsave(&current->thread.float_regs[0], &current->thread.fsr,
197 &current->thread.fpqueue[0], &current->thread.fpqdepth);
217 fpsave(&current->thread.float_regs[0], &current->thread.fsr,
218 &current->thread.fpqueue[0], &current->thread.fpqdepth);
226 /* This task is no longer a kernel thread. */
227 if (current->thread.flags & SPARC_FLAG_KTHREAD) {
228 current->thread.flags &= ~SPARC_FLAG_KTHREAD;
232 current->thread.kregs = (struct pt_regs *)
292 /* Copy a Sparc thread. The fork() return value conventions
321 fpsave(&p->thread.float_regs[0], &p->thread.fsr,
322 &p->thread.fpqueue[0], &p->thread.fpqdepth);
343 p->thread.kregs = childregs;
349 p->thread.flags |= SPARC_FLAG_KTHREAD;
350 p->thread.current_ds = KERNEL_DS;
361 p->thread.flags &= ~SPARC_FLAG_KTHREAD;
362 p->thread.current_ds = USER_DS;
364 ti->kpsr = current->thread.fork_kpsr | PSR_PIL;
365 ti->kwim = current->thread.fork_kwim;
429 fpsave(&current->thread.float_regs[0], &current->thread.fsr, dump_fpu()
430 &current->thread.fpqueue[0], &current->thread.fpqdepth); dump_fpu()
439 fpsave(&current->thread.float_regs[0], &current->thread.fsr, dump_fpu()
440 &current->thread.fpqueue[0], &current->thread.fpqdepth); dump_fpu()
448 &current->thread.float_regs[0], dump_fpu()
450 fpregs->pr_fsr = current->thread.fsr; dump_fpu()
451 fpregs->pr_qcnt = current->thread.fpqdepth; dump_fpu()
456 &current->thread.fpqueue[0], dump_fpu()
H A Dtraps_32.c195 fpsave(&fptask->thread.float_regs[0], &fptask->thread.fsr, do_fpd_trap()
196 &fptask->thread.fpqueue[0], &fptask->thread.fpqdepth); do_fpd_trap()
200 fpload(&current->thread.float_regs[0], &current->thread.fsr); do_fpd_trap()
211 fpload(&current->thread.float_regs[0], &current->thread.fsr); do_fpd_trap()
248 fpsave(&fpt->thread.float_regs[0], &fpt->thread.fsr,
249 &fpt->thread.fpqueue[0], &fpt->thread.fpqdepth);
251 printk("Hmm, FP exception, fsr was %016lx\n", fpt->thread.fsr);
254 switch ((fpt->thread.fsr & 0x1c000)) {
279 fpload(&current->thread.float_regs[0], &current->thread.fsr);
302 fsr = fpt->thread.fsr;
/linux-4.1.27/arch/metag/include/asm/
H A Dhwthread.h19 * Each hardware thread's Control Unit registers are memory-mapped
20 * and can therefore be accessed by any other hardware thread.
22 * This helper function returns the memory address where "thread"'s
26 void __iomem *__CU_addr(unsigned int thread, unsigned int regnum) __CU_addr() argument
30 WARN_ON(thread == BAD_HWTHREAD_ID); __CU_addr()
34 thread_offset = TnUCTRX_STRIDE * thread; __CU_addr()
H A Dcachepart.h21 * get_global_dcache_size() - Get the thread's global dcache.
23 * Returns the size of the current thread's global dcache partition.
28 * get_global_icache_size() - Get the thread's global icache.
30 * Returns the size of the current thread's global icache partition.
37 * @thread_id: Hardware thread ID
H A Dcore_reg.h6 extern void core_reg_write(int unit, int reg, int thread, unsigned int val);
7 extern unsigned int core_reg_read(int unit, int reg, int thread);
H A Dthread_info.h1 /* thread_info.h: Meta low-level thread information
32 unsigned long status; /* thread-synchronous flags */
36 mm_segment_t addr_limit; /* thread address space */
63 * macros/functions for gaining access to the thread information structure
82 /* how to get the thread information struct from C */ current_thread_info()
99 * thread information flags
H A Dprocessor.h114 current->thread.int_depth = 1; \
129 /* Free all resources held by a thread. */ release_thread()
140 * Return saved PC of a blocked thread.
143 ((unsigned long)(tsk)->thread.kernel_context->CurrPC)
145 ((unsigned long)(tsk)->thread.kernel_context->AX[0].U0)
147 ((unsigned long)(tsk)->thread.kernel_context->AX[1].U0)
182 * Halt (stop) the hardware thread. This instruction sequence is the
183 * standard way to cause a Meta hardware thread to exit. The exit code
H A Dtlbflush.h27 /* flush TLB entries for just the current hardware thread */ __flush_tlb()
28 int thread = hard_processor_id(); __flush_tlb() local
30 LINSYSCFLUSH_TxMMCU_STRIDE * thread)); __flush_tlb()
/linux-4.1.27/arch/x86/um/
H A Dtls_64.c10 * If CLONE_SETTLS is set, we need to save the thread id arch_copy_tls()
14 t->thread.arch.fs = t->thread.regs.regs.gp[R8 / sizeof(long)]; arch_copy_tls()
H A Dsyscalls_64.c22 * GDT or thread.fs being set instead). So, we let the host arch_prctl()
23 * fiddle the registers and thread struct and restore the arch_prctl()
34 ret = restore_registers(pid, &current->thread.regs.regs); arch_prctl()
57 current->thread.arch.fs = (unsigned long) ptr; arch_prctl()
58 ret = save_registers(pid, &current->thread.regs.regs); arch_prctl()
61 ret = save_registers(pid, &current->thread.regs.regs); arch_prctl()
81 if ((to->thread.arch.fs == 0) || (to->mm == NULL)) arch_switch_to()
84 arch_prctl(to, ARCH_SET_FS, (void __user *) to->thread.arch.fs); arch_switch_to()
/linux-4.1.27/include/linux/
H A Dsmpboot.h11 * struct smp_hotplug_thread - CPU hotplug related thread descriptor
14 * @thread_should_run: Check whether the thread should run or not. Called with
16 * @thread_fn: The associated thread function
17 * @create: Optional setup function, called when the thread gets
18 * created (Not called from the thread context)
19 * @setup: Optional setup function, called when the thread gets
21 * @cleanup: Optional cleanup function, called when the thread
23 * @park: Optional park function, called when the thread is
25 * @unpark: Optional unpark function, called when the thread is
27 * @pre_unpark: Optional unpark function, called before the thread is
29 * called on the target cpu of the thread. Careful!
31 * @thread_comm: The base name of the thread
H A Dirqreturn.h8 * @IRQ_WAKE_THREAD handler requests to wake the handler thread
H A Dregset.h26 * @target: thread being examined
30 * Return %0 if no interesting state in this thread.
44 * @target: thread being examined
65 * @target: thread being examined
86 * @target: thread being examined
110 * struct user_regset - accessible thread CPU state
122 * This is part of the state of an individual thread, not necessarily
127 * These functions must be called only on the current thread or on a
128 * thread that is in %TASK_STOPPED or %TASK_TRACED state, that we are
130 * have called wait_task_inactive() on. (The target thread always might
132 * that thread's user_regset state might be scrambled.)
176 * above). This describes all the state of a thread that can be seen
179 * might refer to the same machine-specific state in the thread. For
180 * example, a 32-bit thread's state could be examined from the 32-bit
181 * view or from the 64-bit view. Either method reaches the same thread
199 * @tsk: a thread of the process in question
323 * copy_regset_to_user - fetch a thread's user_regset data into user memory
324 * @target: thread to be examined
325 * @view: &struct user_regset_view describing user thread machine state
349 * copy_regset_from_user - store into thread's user_regset data from user memory
350 * @target: thread to be examined
351 * @view: &struct user_regset_view describing user thread machine state
/linux-4.1.27/arch/parisc/kernel/
H A Dasm-offsets.c66 DEFINE(TASK_REGS, offsetof(struct task_struct, thread.regs)); main()
67 DEFINE(TASK_PT_PSW, offsetof(struct task_struct, thread.regs.gr[ 0])); main()
68 DEFINE(TASK_PT_GR1, offsetof(struct task_struct, thread.regs.gr[ 1])); main()
69 DEFINE(TASK_PT_GR2, offsetof(struct task_struct, thread.regs.gr[ 2])); main()
70 DEFINE(TASK_PT_GR3, offsetof(struct task_struct, thread.regs.gr[ 3])); main()
71 DEFINE(TASK_PT_GR4, offsetof(struct task_struct, thread.regs.gr[ 4])); main()
72 DEFINE(TASK_PT_GR5, offsetof(struct task_struct, thread.regs.gr[ 5])); main()
73 DEFINE(TASK_PT_GR6, offsetof(struct task_struct, thread.regs.gr[ 6])); main()
74 DEFINE(TASK_PT_GR7, offsetof(struct task_struct, thread.regs.gr[ 7])); main()
75 DEFINE(TASK_PT_GR8, offsetof(struct task_struct, thread.regs.gr[ 8])); main()
76 DEFINE(TASK_PT_GR9, offsetof(struct task_struct, thread.regs.gr[ 9])); main()
77 DEFINE(TASK_PT_GR10, offsetof(struct task_struct, thread.regs.gr[10])); main()
78 DEFINE(TASK_PT_GR11, offsetof(struct task_struct, thread.regs.gr[11])); main()
79 DEFINE(TASK_PT_GR12, offsetof(struct task_struct, thread.regs.gr[12])); main()
80 DEFINE(TASK_PT_GR13, offsetof(struct task_struct, thread.regs.gr[13])); main()
81 DEFINE(TASK_PT_GR14, offsetof(struct task_struct, thread.regs.gr[14])); main()
82 DEFINE(TASK_PT_GR15, offsetof(struct task_struct, thread.regs.gr[15])); main()
83 DEFINE(TASK_PT_GR16, offsetof(struct task_struct, thread.regs.gr[16])); main()
84 DEFINE(TASK_PT_GR17, offsetof(struct task_struct, thread.regs.gr[17])); main()
85 DEFINE(TASK_PT_GR18, offsetof(struct task_struct, thread.regs.gr[18])); main()
86 DEFINE(TASK_PT_GR19, offsetof(struct task_struct, thread.regs.gr[19])); main()
87 DEFINE(TASK_PT_GR20, offsetof(struct task_struct, thread.regs.gr[20])); main()
88 DEFINE(TASK_PT_GR21, offsetof(struct task_struct, thread.regs.gr[21])); main()
89 DEFINE(TASK_PT_GR22, offsetof(struct task_struct, thread.regs.gr[22])); main()
90 DEFINE(TASK_PT_GR23, offsetof(struct task_struct, thread.regs.gr[23])); main()
91 DEFINE(TASK_PT_GR24, offsetof(struct task_struct, thread.regs.gr[24])); main()
92 DEFINE(TASK_PT_GR25, offsetof(struct task_struct, thread.regs.gr[25])); main()
93 DEFINE(TASK_PT_GR26, offsetof(struct task_struct, thread.regs.gr[26])); main()
94 DEFINE(TASK_PT_GR27, offsetof(struct task_struct, thread.regs.gr[27])); main()
95 DEFINE(TASK_PT_GR28, offsetof(struct task_struct, thread.regs.gr[28])); main()
96 DEFINE(TASK_PT_GR29, offsetof(struct task_struct, thread.regs.gr[29])); main()
97 DEFINE(TASK_PT_GR30, offsetof(struct task_struct, thread.regs.gr[30])); main()
98 DEFINE(TASK_PT_GR31, offsetof(struct task_struct, thread.regs.gr[31])); main()
99 DEFINE(TASK_PT_FR0, offsetof(struct task_struct, thread.regs.fr[ 0])); main()
100 DEFINE(TASK_PT_FR1, offsetof(struct task_struct, thread.regs.fr[ 1])); main()
101 DEFINE(TASK_PT_FR2, offsetof(struct task_struct, thread.regs.fr[ 2])); main()
102 DEFINE(TASK_PT_FR3, offsetof(struct task_struct, thread.regs.fr[ 3])); main()
103 DEFINE(TASK_PT_FR4, offsetof(struct task_struct, thread.regs.fr[ 4])); main()
104 DEFINE(TASK_PT_FR5, offsetof(struct task_struct, thread.regs.fr[ 5])); main()
105 DEFINE(TASK_PT_FR6, offsetof(struct task_struct, thread.regs.fr[ 6])); main()
106 DEFINE(TASK_PT_FR7, offsetof(struct task_struct, thread.regs.fr[ 7])); main()
107 DEFINE(TASK_PT_FR8, offsetof(struct task_struct, thread.regs.fr[ 8])); main()
108 DEFINE(TASK_PT_FR9, offsetof(struct task_struct, thread.regs.fr[ 9])); main()
109 DEFINE(TASK_PT_FR10, offsetof(struct task_struct, thread.regs.fr[10])); main()
110 DEFINE(TASK_PT_FR11, offsetof(struct task_struct, thread.regs.fr[11])); main()
111 DEFINE(TASK_PT_FR12, offsetof(struct task_struct, thread.regs.fr[12])); main()
112 DEFINE(TASK_PT_FR13, offsetof(struct task_struct, thread.regs.fr[13])); main()
113 DEFINE(TASK_PT_FR14, offsetof(struct task_struct, thread.regs.fr[14])); main()
114 DEFINE(TASK_PT_FR15, offsetof(struct task_struct, thread.regs.fr[15])); main()
115 DEFINE(TASK_PT_FR16, offsetof(struct task_struct, thread.regs.fr[16])); main()
116 DEFINE(TASK_PT_FR17, offsetof(struct task_struct, thread.regs.fr[17])); main()
117 DEFINE(TASK_PT_FR18, offsetof(struct task_struct, thread.regs.fr[18])); main()
118 DEFINE(TASK_PT_FR19, offsetof(struct task_struct, thread.regs.fr[19])); main()
119 DEFINE(TASK_PT_FR20, offsetof(struct task_struct, thread.regs.fr[20])); main()
120 DEFINE(TASK_PT_FR21, offsetof(struct task_struct, thread.regs.fr[21])); main()
121 DEFINE(TASK_PT_FR22, offsetof(struct task_struct, thread.regs.fr[22])); main()
122 DEFINE(TASK_PT_FR23, offsetof(struct task_struct, thread.regs.fr[23])); main()
123 DEFINE(TASK_PT_FR24, offsetof(struct task_struct, thread.regs.fr[24])); main()
124 DEFINE(TASK_PT_FR25, offsetof(struct task_struct, thread.regs.fr[25])); main()
125 DEFINE(TASK_PT_FR26, offsetof(struct task_struct, thread.regs.fr[26])); main()
126 DEFINE(TASK_PT_FR27, offsetof(struct task_struct, thread.regs.fr[27])); main()
127 DEFINE(TASK_PT_FR28, offsetof(struct task_struct, thread.regs.fr[28])); main()
128 DEFINE(TASK_PT_FR29, offsetof(struct task_struct, thread.regs.fr[29])); main()
129 DEFINE(TASK_PT_FR30, offsetof(struct task_struct, thread.regs.fr[30])); main()
130 DEFINE(TASK_PT_FR31, offsetof(struct task_struct, thread.regs.fr[31])); main()
131 DEFINE(TASK_PT_SR0, offsetof(struct task_struct, thread.regs.sr[ 0])); main()
132 DEFINE(TASK_PT_SR1, offsetof(struct task_struct, thread.regs.sr[ 1])); main()
133 DEFINE(TASK_PT_SR2, offsetof(struct task_struct, thread.regs.sr[ 2])); main()
134 DEFINE(TASK_PT_SR3, offsetof(struct task_struct, thread.regs.sr[ 3])); main()
135 DEFINE(TASK_PT_SR4, offsetof(struct task_struct, thread.regs.sr[ 4])); main()
136 DEFINE(TASK_PT_SR5, offsetof(struct task_struct, thread.regs.sr[ 5])); main()
137 DEFINE(TASK_PT_SR6, offsetof(struct task_struct, thread.regs.sr[ 6])); main()
138 DEFINE(TASK_PT_SR7, offsetof(struct task_struct, thread.regs.sr[ 7])); main()
139 DEFINE(TASK_PT_IASQ0, offsetof(struct task_struct, thread.regs.iasq[0])); main()
140 DEFINE(TASK_PT_IASQ1, offsetof(struct task_struct, thread.regs.iasq[1])); main()
141 DEFINE(TASK_PT_IAOQ0, offsetof(struct task_struct, thread.regs.iaoq[0])); main()
142 DEFINE(TASK_PT_IAOQ1, offsetof(struct task_struct, thread.regs.iaoq[1])); main()
143 DEFINE(TASK_PT_CR27, offsetof(struct task_struct, thread.regs.cr27)); main()
144 DEFINE(TASK_PT_ORIG_R28, offsetof(struct task_struct, thread.regs.orig_r28)); main()
145 DEFINE(TASK_PT_KSP, offsetof(struct task_struct, thread.regs.ksp)); main()
146 DEFINE(TASK_PT_KPC, offsetof(struct task_struct, thread.regs.kpc)); main()
147 DEFINE(TASK_PT_SAR, offsetof(struct task_struct, thread.regs.sar)); main()
148 DEFINE(TASK_PT_IIR, offsetof(struct task_struct, thread.regs.iir)); main()
149 DEFINE(TASK_PT_ISR, offsetof(struct task_struct, thread.regs.isr)); main()
150 DEFINE(TASK_PT_IOR, offsetof(struct task_struct, thread.regs.ior)); main()
H A Dprocess.c148 * Free current thread data structures etc..
180 memcpy(r, tsk->thread.regs.fr, sizeof(*r)); dump_task_fpu()
185 * Copy architecture-specific thread state
191 struct pt_regs *cregs = &(p->thread.regs); copy_thread()
201 /* kernel thread */ copy_thread()
203 if (!usp) /* idle thread */ copy_thread()
222 /* user thread */ copy_thread()
225 * return for a kernel thread) */ copy_thread()
234 /* Setup thread TLS area from the 4th parameter in clone */ copy_thread()
244 return t->thread.regs.kpc; thread_saved_pc()
/linux-4.1.27/arch/um/kernel/skas/
H A Dprocess.c40 init_task.thread.request.u.thread.proc = start_kernel_proc; start_uml()
41 init_task.thread.request.u.thread.arg = NULL; start_uml()
43 &init_task.thread.switch_buf); start_uml()
/linux-4.1.27/tools/perf/arch/arm/tests/
H A Ddwarf-unwind.c3 #include "thread.h"
12 struct thread *thread, u64 *regs) sample_ustack()
27 map = map_groups__find(thread->mg, MAP__VARIABLE, (u64) sp); sample_ustack()
44 struct thread *thread) test__arch_unwind_sample()
60 return sample_ustack(sample, thread, buf); test__arch_unwind_sample()
11 sample_ustack(struct perf_sample *sample, struct thread *thread, u64 *regs) sample_ustack() argument
43 test__arch_unwind_sample(struct perf_sample *sample, struct thread *thread) test__arch_unwind_sample() argument
/linux-4.1.27/tools/perf/arch/x86/tests/
H A Ddwarf-unwind.c3 #include "thread.h"
12 struct thread *thread, u64 *regs) sample_ustack()
27 map = map_groups__find(thread->mg, MAP__VARIABLE, (u64) sp); sample_ustack()
44 struct thread *thread) test__arch_unwind_sample()
60 return sample_ustack(sample, thread, buf); test__arch_unwind_sample()
11 sample_ustack(struct perf_sample *sample, struct thread *thread, u64 *regs) sample_ustack() argument
43 test__arch_unwind_sample(struct perf_sample *sample, struct thread *thread) test__arch_unwind_sample() argument
/linux-4.1.27/arch/s390/kernel/
H A Druntime_instr.c56 if (!task->thread.ri_cb) exit_thread_runtime_instr()
59 kfree(task->thread.ri_cb); exit_thread_runtime_instr()
60 task->thread.ri_signum = 0; exit_thread_runtime_instr()
61 task->thread.ri_cb = NULL; exit_thread_runtime_instr()
74 if (!current->thread.ri_cb) runtime_instr_int_handler()
76 if (current->thread.ri_signum < SIGRTMIN || runtime_instr_int_handler()
77 current->thread.ri_signum > SIGRTMAX) { runtime_instr_int_handler()
83 info.si_signo = current->thread.ri_signum; runtime_instr_int_handler()
92 send_sig_info(current->thread.ri_signum, &info, current); runtime_instr_int_handler()
113 if (!current->thread.ri_cb) { SYSCALL_DEFINE2()
118 cb = current->thread.ri_cb; SYSCALL_DEFINE2()
123 current->thread.ri_signum = signum; SYSCALL_DEFINE2()
127 current->thread.ri_cb = cb; SYSCALL_DEFINE2()
H A Dprocess.c40 * Return saved PC of a blocked thread. used in kernel/sched.
55 sf = (struct stack_frame *) (tsk->thread.ksp & PSW_ADDR_INSN); thread_saved_pc()
67 * Free current thread data structures etc..
84 if (tsk->thread.vxrs) arch_release_task_struct()
85 kfree(tsk->thread.vxrs); arch_release_task_struct()
99 p->thread.ksp = (unsigned long) frame; copy_thread()
100 /* Save access registers to new thread structure. */ copy_thread()
101 save_access_regs(&p->thread.acrs[0]); copy_thread()
103 p->thread.mm_segment = get_fs(); copy_thread()
105 memset(&p->thread.per_user, 0, sizeof(p->thread.per_user)); copy_thread()
106 memset(&p->thread.per_event, 0, sizeof(p->thread.per_event)); copy_thread()
108 /* Initialize per thread user and system timer values */ copy_thread()
121 /* kernel thread */ copy_thread()
141 p->thread.ri_cb = NULL; copy_thread()
142 p->thread.ri_signum = 0; copy_thread()
145 /* Save the fpu registers to new thread structure. */ copy_thread()
146 save_fp_ctl(&p->thread.fp_regs.fpc); copy_thread()
147 save_fp_regs(p->thread.fp_regs.fprs); copy_thread()
148 p->thread.fp_regs.pad = 0; copy_thread()
149 p->thread.vxrs = NULL; copy_thread()
154 p->thread.acrs[0] = (unsigned int)tls; copy_thread()
156 p->thread.acrs[0] = (unsigned int)(tls >> 32); copy_thread()
157 p->thread.acrs[1] = (unsigned int)tls; copy_thread()
165 current->thread.fp_regs.fpc = 0; execve_tail()
190 sf = (struct stack_frame *) (p->thread.ksp & PSW_ADDR_INSN); get_wchan()
H A Dptrace.c44 struct thread_struct *thread = &task->thread; update_cr_regs() local
56 if (task->thread.per_flags & PER_FLAG_NO_TE) update_cr_regs()
62 if (task->thread.vxrs) update_cr_regs()
71 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) { update_cr_regs()
72 if (task->thread.per_flags & update_cr_regs()
83 new.control = thread->per_user.control; update_cr_regs()
84 new.start = thread->per_user.start; update_cr_regs()
85 new.end = thread->per_user.end; update_cr_regs()
138 memset(&task->thread.per_user, 0, sizeof(task->thread.per_user)); ptrace_disable()
139 memset(&task->thread.per_event, 0, sizeof(task->thread.per_event)); ptrace_disable()
142 task->thread.per_flags = 0; ptrace_disable()
155 PER_EVENT_IFETCH : child->thread.per_user.control; __peek_user_per()
159 0 : child->thread.per_user.start; __peek_user_per()
163 PSW_ADDR_INSN : child->thread.per_user.end; __peek_user_per()
170 return child->thread.per_user.start; __peek_user_per()
173 return child->thread.per_user.end; __peek_user_per()
177 child->thread.per_event.cause << (BITS_PER_LONG - 16); __peek_user_per()
180 return child->thread.per_event.address; __peek_user_per()
184 child->thread.per_event.paid << (BITS_PER_LONG - 8); __peek_user_per()
215 * access registers are stored in the thread structure __peek_user()
224 tmp = ((unsigned long) child->thread.acrs[15]) << 32; __peek_user()
226 tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset); __peek_user()
243 * floating point control reg. is in the thread structure __peek_user()
245 tmp = child->thread.fp_regs.fpc; __peek_user()
250 * floating point regs. are either in child->thread.fp_regs __peek_user()
251 * or the child->thread.vxrs array __peek_user()
254 if (child->thread.vxrs) __peek_user()
256 ((addr_t) child->thread.vxrs + 2*offset); __peek_user()
259 ((addr_t) &child->thread.fp_regs.fprs + offset); __peek_user()
313 child->thread.per_user.control = __poke_user_per()
317 child->thread.per_user.start = data; __poke_user_per()
320 child->thread.per_user.end = data; __poke_user_per()
356 * access registers are stored in the thread structure __poke_user()
366 child->thread.acrs[15] = (unsigned int) (data >> 32); __poke_user()
368 *(addr_t *)((addr_t) &child->thread.acrs + offset) = data; __poke_user()
385 * floating point control reg. is in the thread structure __poke_user()
390 child->thread.fp_regs.fpc = data >> (BITS_PER_LONG - 32); __poke_user()
394 * floating point regs. are either in child->thread.fp_regs __poke_user()
395 * or the child->thread.vxrs array __poke_user()
398 if (child->thread.vxrs) __poke_user()
400 child->thread.vxrs + 2*offset) = data; __poke_user()
403 &child->thread.fp_regs.fprs + offset) = data; __poke_user()
482 child->thread.per_flags &= ~PER_FLAG_NO_TE; arch_ptrace()
487 child->thread.per_flags |= PER_FLAG_NO_TE; arch_ptrace()
488 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND; arch_ptrace()
491 if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE)) arch_ptrace()
495 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND; arch_ptrace()
498 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND; arch_ptrace()
499 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND; arch_ptrace()
502 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND; arch_ptrace()
503 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND; arch_ptrace()
542 PER_EVENT_IFETCH : child->thread.per_user.control; __peek_user_per_compat()
546 0 : child->thread.per_user.start; __peek_user_per_compat()
550 PSW32_ADDR_INSN : child->thread.per_user.end; __peek_user_per_compat()
557 return (__u32) child->thread.per_user.start; __peek_user_per_compat()
560 return (__u32) child->thread.per_user.end; __peek_user_per_compat()
563 return (__u32) child->thread.per_event.cause << 16; __peek_user_per_compat()
566 return (__u32) child->thread.per_event.address; __peek_user_per_compat()
569 return (__u32) child->thread.per_event.paid << 24; __peek_user_per_compat()
602 * access registers are stored in the thread structure __peek_user_compat()
605 tmp = *(__u32*)((addr_t) &child->thread.acrs + offset); __peek_user_compat()
622 * floating point control reg. is in the thread structure __peek_user_compat()
624 tmp = child->thread.fp_regs.fpc; __peek_user_compat()
628 * floating point regs. are either in child->thread.fp_regs __peek_user_compat()
629 * or the child->thread.vxrs array __peek_user_compat()
632 if (child->thread.vxrs) __peek_user_compat()
634 ((addr_t) child->thread.vxrs + 2*offset); __peek_user_compat()
637 ((addr_t) &child->thread.fp_regs.fprs + offset); __peek_user_compat()
674 child->thread.per_user.control = __poke_user_per_compat()
678 child->thread.per_user.start = data; __poke_user_per_compat()
681 child->thread.per_user.end = data; __poke_user_per_compat()
725 * access registers are stored in the thread structure __poke_user_compat()
728 *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp; __poke_user_compat()
745 * floating point control reg. is in the thread structure __poke_user_compat()
749 child->thread.fp_regs.fpc = data; __poke_user_compat()
753 * floating point regs. are either in child->thread.fp_regs __poke_user_compat()
754 * or the child->thread.vxrs array __poke_user_compat()
757 if (child->thread.vxrs) __poke_user_compat()
759 child->thread.vxrs + 2*offset) = tmp; __poke_user_compat()
762 &child->thread.fp_regs.fprs + offset) = tmp; __poke_user_compat()
894 save_access_regs(target->thread.acrs); s390_regs_get()
923 save_access_regs(target->thread.acrs); s390_regs_set()
946 restore_access_regs(target->thread.acrs); s390_regs_set()
956 save_fp_ctl(&target->thread.fp_regs.fpc); s390_fpregs_get()
957 save_fp_regs(target->thread.fp_regs.fprs); s390_fpregs_get()
958 } else if (target->thread.vxrs) { s390_fpregs_get()
962 target->thread.fp_regs.fprs[i] = s390_fpregs_get()
963 *(freg_t *)(target->thread.vxrs + i); s390_fpregs_get()
966 &target->thread.fp_regs, 0, -1); s390_fpregs_get()
977 save_fp_ctl(&target->thread.fp_regs.fpc); s390_fpregs_set()
978 save_fp_regs(target->thread.fp_regs.fprs); s390_fpregs_set()
983 u32 ufpc[2] = { target->thread.fp_regs.fpc, 0 }; s390_fpregs_set()
990 target->thread.fp_regs.fpc = ufpc[0]; s390_fpregs_set()
995 target->thread.fp_regs.fprs, s390_fpregs_set()
1000 restore_fp_ctl(&target->thread.fp_regs.fpc); s390_fpregs_set()
1001 restore_fp_regs(target->thread.fp_regs.fprs); s390_fpregs_set()
1002 } else if (target->thread.vxrs) { s390_fpregs_set()
1006 *(freg_t *)(target->thread.vxrs + i) = s390_fpregs_set()
1007 target->thread.fp_regs.fprs[i]; s390_fpregs_set()
1050 data = target->thread.trap_tdb; s390_tdb_get()
1072 if (target->thread.vxrs) { s390_vxrs_low_get()
1074 save_vx_regs(target->thread.vxrs); s390_vxrs_low_get()
1076 vxrs[i] = *((__u64 *)(target->thread.vxrs + i) + 1); s390_vxrs_low_get()
1092 if (!target->thread.vxrs) { s390_vxrs_low_set()
1097 save_vx_regs(target->thread.vxrs); s390_vxrs_low_set()
1102 *((__u64 *)(target->thread.vxrs + i) + 1) = vxrs[i]; s390_vxrs_low_set()
1104 restore_vx_regs(target->thread.vxrs); s390_vxrs_low_set()
1119 if (target->thread.vxrs) { s390_vxrs_high_get()
1121 save_vx_regs(target->thread.vxrs); s390_vxrs_high_get()
1122 memcpy(vxrs, target->thread.vxrs + __NUM_VXRS_LOW, s390_vxrs_high_get()
1138 if (!target->thread.vxrs) { s390_vxrs_high_set()
1143 save_vx_regs(target->thread.vxrs); s390_vxrs_high_set()
1146 target->thread.vxrs + __NUM_VXRS_LOW, 0, -1); s390_vxrs_high_set()
1148 restore_vx_regs(target->thread.vxrs); s390_vxrs_high_set()
1246 save_access_regs(target->thread.acrs); s390_compat_regs_get()
1275 save_access_regs(target->thread.acrs); s390_compat_regs_set()
1298 restore_access_regs(target->thread.acrs); s390_compat_regs_set()
H A Dsignal.c107 save_access_regs(current->thread.acrs); store_sigregs()
108 save_fp_ctl(&current->thread.fp_regs.fpc); store_sigregs()
109 if (current->thread.vxrs) { store_sigregs()
112 save_vx_regs(current->thread.vxrs); store_sigregs()
114 current->thread.fp_regs.fprs[i] = store_sigregs()
115 *(freg_t *)(current->thread.vxrs + i); store_sigregs()
117 save_fp_regs(current->thread.fp_regs.fprs); store_sigregs()
123 restore_access_regs(current->thread.acrs); load_sigregs()
125 if (current->thread.vxrs) { load_sigregs()
129 *(freg_t *)(current->thread.vxrs + i) = load_sigregs()
130 current->thread.fp_regs.fprs[i]; load_sigregs()
131 restore_vx_regs(current->thread.vxrs); load_sigregs()
133 restore_fp_regs(current->thread.fp_regs.fprs); load_sigregs()
147 memcpy(&user_sregs.regs.acrs, current->thread.acrs, save_sigregs()
149 memcpy(&user_sregs.fpregs, &current->thread.fp_regs, save_sigregs()
185 memcpy(&current->thread.acrs, &user_sregs.regs.acrs, restore_sigregs()
186 sizeof(current->thread.acrs)); restore_sigregs()
188 memcpy(&current->thread.fp_regs, &user_sregs.fpregs, restore_sigregs()
189 sizeof(current->thread.fp_regs)); restore_sigregs()
203 if (current->thread.vxrs) { save_sigregs_ext()
205 vxrs[i] = *((__u64 *)(current->thread.vxrs + i) + 1); save_sigregs_ext()
209 current->thread.vxrs + __NUM_VXRS_LOW, save_sigregs_ext()
223 if (current->thread.vxrs) { restore_sigregs_ext()
226 __copy_from_user(current->thread.vxrs + __NUM_VXRS_LOW, restore_sigregs_ext()
231 *((__u64 *)(current->thread.vxrs + i) + 1) = vxrs[i]; restore_sigregs_ext()
403 if (current->thread.vxrs) setup_rt_frame()
H A Dtraps.c32 address = *(unsigned long *)(current->thread.trap_tdb + 24); get_trap_ip()
107 (void __force __user *) current->thread.per_event.address; do_per_trap()
239 save_fp_regs(tsk->thread.fp_regs.fprs); alloc_vector_registers()
242 *(freg_t *) &vxrs[i] = tsk->thread.fp_regs.fprs[i]; alloc_vector_registers()
243 tsk->thread.vxrs = vxrs; alloc_vector_registers()
262 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); vector_exception()
263 vic = (current->thread.fp_regs.fpc & 0xf00) >> 8; vector_exception()
300 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); data_exception()
302 if (MACHINE_HAS_VX && !current->thread.vxrs && data_exception()
303 (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) { data_exception()
310 if (current->thread.fp_regs.fpc & FPC_DXC_MASK) data_exception()
315 do_fp_trap(regs, current->thread.fp_regs.fpc); data_exception()
H A Dcompat_signal.c171 save_access_regs(current->thread.acrs); store_sigregs()
172 save_fp_ctl(&current->thread.fp_regs.fpc); store_sigregs()
173 if (current->thread.vxrs) { store_sigregs()
174 save_vx_regs(current->thread.vxrs); store_sigregs()
176 current->thread.fp_regs.fprs[i] = store_sigregs()
177 *(freg_t *)(current->thread.vxrs + i); store_sigregs()
179 save_fp_regs(current->thread.fp_regs.fprs); store_sigregs()
187 restore_access_regs(current->thread.acrs); load_sigregs()
189 if (current->thread.vxrs) { load_sigregs()
191 *(freg_t *)(current->thread.vxrs + i) = load_sigregs()
192 current->thread.fp_regs.fprs[i]; load_sigregs()
193 restore_vx_regs(current->thread.vxrs); load_sigregs()
195 restore_fp_regs(current->thread.fp_regs.fprs); load_sigregs()
210 memcpy(&user_sregs.regs.acrs, current->thread.acrs, save_sigregs32()
212 memcpy(&user_sregs.fpregs, &current->thread.fp_regs, save_sigregs32()
249 memcpy(&current->thread.acrs, &user_sregs.regs.acrs, restore_sigregs32()
250 sizeof(current->thread.acrs)); restore_sigregs32()
252 memcpy(&current->thread.fp_regs, &user_sregs.fpregs, restore_sigregs32()
253 sizeof(current->thread.fp_regs)); restore_sigregs32()
274 if (current->thread.vxrs) { save_sigregs_ext32()
276 vxrs[i] = *((__u64 *)(current->thread.vxrs + i) + 1); save_sigregs_ext32()
280 current->thread.vxrs + __NUM_VXRS_LOW, save_sigregs_ext32()
302 if (current->thread.vxrs) { restore_sigregs_ext32()
305 __copy_from_user(current->thread.vxrs + __NUM_VXRS_LOW, restore_sigregs_ext32()
310 *((__u64 *)(current->thread.vxrs + i) + 1) = vxrs[i]; restore_sigregs_ext32()
493 if (current->thread.vxrs) setup_rt_frame32()
/linux-4.1.27/arch/sh/kernel/
H A Dprocess.c19 * current task into the new thread.
28 if (src->thread.xstate) { arch_dup_task_struct()
29 dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep, arch_dup_task_struct()
31 if (!dst->thread.xstate) arch_dup_task_struct()
33 memcpy(dst->thread.xstate, src->thread.xstate, xstate_size); arch_dup_task_struct()
41 if (tsk->thread.xstate) { free_thread_xstate()
42 kmem_cache_free(task_xstate_cachep, tsk->thread.xstate); free_thread_xstate()
43 tsk->thread.xstate = NULL; free_thread_xstate()
H A Dprocess_32.c80 * Free current thread data structures etc..
138 * p->thread.dsp_status.status |= SR_DSP copy_thread()
140 p->thread.dsp_status = tsk->thread.dsp_status; copy_thread()
144 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); copy_thread()
147 p->thread.sp = (unsigned long) childregs; copy_thread()
150 p->thread.pc = (unsigned long) ret_from_kernel_thread; copy_thread()
159 p->thread.fpu_counter = 0; copy_thread()
172 p->thread.pc = (unsigned long) ret_from_fork; copy_thread()
183 struct thread_struct *next_t = &next->thread; __switch_to()
192 if (next->thread.fpu_counter > 5) __switch_to()
210 if (next->thread.fpu_counter > 5) __switch_to()
230 unsigned long schedule_frame = (unsigned long)p->thread.sp; get_wchan()
H A Dprocess_64.c282 tsk->thread.kregs = regs; show_regs()
289 * Free current thread data structures etc..
329 /* if we are a kernel thread, about to change to user thread, flush_thread()
332 if(current->thread.kregs==&fake_swapper_regs) { flush_thread()
333 current->thread.kregs = flush_thread()
335 current->thread.uregs = current->thread.kregs; flush_thread()
361 memcpy(fpu, &tsk->thread.xstate->hardfpu, sizeof(*fpu)); dump_fpu()
380 /* can't happen for a kernel thread */ copy_thread()
391 p->thread.sp = (unsigned long) childregs; copy_thread()
399 p->thread.pc = (unsigned long) ret_from_kernel_thread; copy_thread()
406 * Note that thread.pc and thread.pc will stay copy_thread()
412 p->thread.uregs = childregs; copy_thread()
417 p->thread.pc = (unsigned long) ret_from_fork; copy_thread()
451 sh64_switch_to_fp = (long) p->thread.sp; get_wchan()
/linux-4.1.27/arch/c6x/include/asm/
H A Dswitch_to.h26 current->thread.wchan = (u_long) __builtin_return_address(0); \
27 (last) = __switch_to(&(prev)->thread, \
28 &(next)->thread, (prev)); \
30 current->thread.wchan = 0; \
H A Dprocessor.h90 /* Free all resources held by a thread. */ release_thread()
99 * saved PC of a blocked thread.
104 * saved kernel SP and DP of a blocked thread.
108 (*(unsigned long *)&(tsk)->thread.b15_14)
110 (*(((unsigned long *)&(tsk)->thread.b15_14) + 1))
113 (*(((unsigned long *)&(tsk)->thread.b15_14) + 1))
115 (*(unsigned long *)&(tsk)->thread.b15_14)
H A Dthread_info.h46 mm_segment_t addr_limit; /* thread address space */
50 * macros/functions for gaining access to the thread information structure
66 /* get the thread information struct of current task */
82 * thread information flag bit numbers
/linux-4.1.27/arch/score/kernel/
H A Dptrace.c180 child->thread.single_step = 1; user_enable_single_step()
181 child->thread.ss_nextcnt = 1; user_enable_single_step()
191 child->thread.ss_nextcnt = 2; user_enable_single_step()
198 child->thread.ss_nextcnt = 2; user_enable_single_step()
211 child->thread.ss_nextcnt = 2; user_enable_single_step()
220 child->thread.ss_nextcnt = 2; user_enable_single_step()
228 if (child->thread.ss_nextcnt == 1) { user_enable_single_step()
240 child->thread.insn1_type = 0; user_enable_single_step()
241 child->thread.addr1 = epc; user_enable_single_step()
243 child->thread.insn1 = (short)epc_insn; user_enable_single_step()
245 child->thread.insn1_type = 1; user_enable_single_step()
246 child->thread.addr1 = epc; user_enable_single_step()
247 child->thread.insn1 = epc_insn; user_enable_single_step()
250 /* branch! have two target child->thread.ss_nextcnt=2 */ user_enable_single_step()
262 child->thread.insn1_type = 0; user_enable_single_step()
263 child->thread.addr1 = epc; user_enable_single_step()
265 child->thread.insn1 = (short)epc_insn; user_enable_single_step()
267 child->thread.insn1_type = 1; user_enable_single_step()
268 child->thread.addr1 = epc; user_enable_single_step()
269 child->thread.insn1 = epc_insn; user_enable_single_step()
281 child->thread.insn2_type = 0; user_enable_single_step()
282 child->thread.addr2 = far_epc; user_enable_single_step()
284 child->thread.insn2 = (short)far_epc_insn; user_enable_single_step()
286 child->thread.insn2_type = 1; user_enable_single_step()
287 child->thread.addr2 = far_epc; user_enable_single_step()
288 child->thread.insn2 = far_epc_insn; user_enable_single_step()
295 if (child->thread.insn1_type == 0) user_disable_single_step()
296 write_tsk_short(child, child->thread.addr1, user_disable_single_step()
297 child->thread.insn1); user_disable_single_step()
299 if (child->thread.insn1_type == 1) user_disable_single_step()
300 write_tsk_long(child, child->thread.addr1, user_disable_single_step()
301 child->thread.insn1); user_disable_single_step()
303 if (child->thread.ss_nextcnt == 2) { /* branch */ user_disable_single_step()
304 if (child->thread.insn1_type == 0) user_disable_single_step()
305 write_tsk_short(child, child->thread.addr1, user_disable_single_step()
306 child->thread.insn1); user_disable_single_step()
307 if (child->thread.insn1_type == 1) user_disable_single_step()
308 write_tsk_long(child, child->thread.addr1, user_disable_single_step()
309 child->thread.insn1); user_disable_single_step()
310 if (child->thread.insn2_type == 0) user_disable_single_step()
311 write_tsk_short(child, child->thread.addr2, user_disable_single_step()
312 child->thread.insn2); user_disable_single_step()
313 if (child->thread.insn2_type == 1) user_disable_single_step()
314 write_tsk_long(child, child->thread.addr2, user_disable_single_step()
315 child->thread.insn2); user_disable_single_step()
318 child->thread.single_step = 0; user_disable_single_step()
319 child->thread.ss_nextcnt = 0; user_disable_single_step()
H A Dasm-offsets.c117 OFFSET(THREAD_REG0, task_struct, thread.reg0); output_thread_defines()
118 OFFSET(THREAD_REG2, task_struct, thread.reg2); output_thread_defines()
119 OFFSET(THREAD_REG3, task_struct, thread.reg3); output_thread_defines()
120 OFFSET(THREAD_REG12, task_struct, thread.reg12); output_thread_defines()
121 OFFSET(THREAD_REG13, task_struct, thread.reg13); output_thread_defines()
122 OFFSET(THREAD_REG14, task_struct, thread.reg14); output_thread_defines()
123 OFFSET(THREAD_REG15, task_struct, thread.reg15); output_thread_defines()
124 OFFSET(THREAD_REG16, task_struct, thread.reg16); output_thread_defines()
125 OFFSET(THREAD_REG17, task_struct, thread.reg17); output_thread_defines()
126 OFFSET(THREAD_REG18, task_struct, thread.reg18); output_thread_defines()
127 OFFSET(THREAD_REG19, task_struct, thread.reg19); output_thread_defines()
128 OFFSET(THREAD_REG20, task_struct, thread.reg20); output_thread_defines()
129 OFFSET(THREAD_REG21, task_struct, thread.reg21); output_thread_defines()
130 OFFSET(THREAD_REG29, task_struct, thread.reg29); output_thread_defines()
132 OFFSET(THREAD_PSR, task_struct, thread.cp0_psr); output_thread_defines()
133 OFFSET(THREAD_EMA, task_struct, thread.cp0_ema); output_thread_defines()
134 OFFSET(THREAD_BADUADDR, task_struct, thread.cp0_baduaddr); output_thread_defines()
135 OFFSET(THREAD_ECODE, task_struct, thread.error_code); output_thread_defines()
136 OFFSET(THREAD_TRAPNO, task_struct, thread.trap_no); output_thread_defines()
H A Dprocess.c51 /* New thread loses kernel privileges. */ start_thread()
78 p->thread.reg0 = (unsigned long) childregs; copy_thread()
81 p->thread.reg12 = usp; copy_thread()
82 p->thread.reg13 = arg; copy_thread()
83 p->thread.reg3 = (unsigned long) ret_from_kernel_thread; copy_thread()
90 p->thread.reg3 = (unsigned long) ret_from_fork; copy_thread()
93 p->thread.cp0_psr = 0; copy_thread()
/linux-4.1.27/arch/metag/kernel/
H A Dprocess.c178 * Copy architecture-specific thread state
191 memset(&tsk->thread.kernel_context, 0, copy_thread()
192 sizeof(tsk->thread.kernel_context)); copy_thread()
194 tsk->thread.kernel_context = __TBISwitchInit(kernel_context, copy_thread()
201 * if kernel thread becomes a userspace thread in the future copy_thread()
211 tsk->thread.int_depth = 2; copy_thread()
225 tsk->thread.int_depth = 1; copy_thread()
232 tsk->thread.tls_ptr = copy_thread()
236 if (tsk->thread.fpu_context) { copy_thread()
239 ctx = kmemdup(tsk->thread.fpu_context, copy_thread()
241 tsk->thread.fpu_context = ctx; copy_thread()
246 if (tsk->thread.dsp_context) { copy_thread()
250 ctx = kmemdup(tsk->thread.dsp_context, copy_thread()
255 tsk->thread.dsp_context = ctx; copy_thread()
263 static void alloc_fpu_context(struct thread_struct *thread) alloc_fpu_context() argument
265 thread->fpu_context = kzalloc(sizeof(struct meta_fpu_context), alloc_fpu_context()
269 static void clear_fpu(struct thread_struct *thread) clear_fpu() argument
271 thread->user_flags &= ~TBICTX_FPAC_BIT; clear_fpu()
272 kfree(thread->fpu_context); clear_fpu()
273 thread->fpu_context = NULL; clear_fpu()
276 static void clear_fpu(struct thread_struct *thread) clear_fpu() argument
282 static void clear_dsp(struct thread_struct *thread) clear_dsp() argument
284 if (thread->dsp_context) { clear_dsp()
285 kfree(thread->dsp_context->ram[0]); clear_dsp()
286 kfree(thread->dsp_context->ram[1]); clear_dsp()
288 kfree(thread->dsp_context); clear_dsp()
290 thread->dsp_context = NULL; clear_dsp()
296 static void clear_dsp(struct thread_struct *thread) clear_dsp() argument
306 to.Switch.pCtx = next->thread.kernel_context; __switch_to()
310 if (prev->thread.user_flags & TBICTX_FPAC_BIT) { __switch_to()
314 state.Sig.SaveMask = prev->thread.user_flags; __switch_to()
317 if (!prev->thread.fpu_context) __switch_to()
318 alloc_fpu_context(&prev->thread); __switch_to()
319 if (prev->thread.fpu_context) __switch_to()
320 __TBICtxFPUSave(state, prev->thread.fpu_context); __switch_to()
326 if (prev->thread.fpu_context) __switch_to()
327 prev->thread.fpu_context->needs_restore = true; __switch_to()
331 from = __TBISwitch(to, &prev->thread.kernel_context); __switch_to()
334 set_gateway_tls(current->thread.tls_ptr); __switch_to()
341 clear_fpu(&current->thread); flush_thread()
342 clear_dsp(&current->thread); flush_thread()
346 * Free current thread data structures etc.
350 clear_fpu(&current->thread); exit_thread()
351 clear_dsp(&current->thread); exit_thread()
H A Dsmp.c69 * "thread" is assumed to be a valid Meta hardware thread ID.
71 static int boot_secondary(unsigned int thread, struct task_struct *idle) boot_secondary() argument
81 core_reg_write(TXUPC_ID, 0, thread, (unsigned int)secondary_startup); boot_secondary()
82 core_reg_write(TXUPC_ID, 1, thread, 0); boot_secondary()
85 * Give the thread privilege (PSTAT) and clear potentially problematic boot_secondary()
88 core_reg_write(TXUCT_ID, TXSTATUS_REGNUM, thread, TXSTATUS_PSTAT_BIT); boot_secondary()
91 val = core_reg_read(TXUCT_ID, TXPRIVEXT_REGNUM, thread); boot_secondary()
92 core_reg_write(TXUCT_ID, TXPRIVEXT_REGNUM, thread, val & ~0x80); boot_secondary()
96 * for the specified thread - off it goes! boot_secondary()
98 val = core_reg_read(TXUCT_ID, TXENABLE_REGNUM, thread); boot_secondary()
99 core_reg_write(TXUCT_ID, TXENABLE_REGNUM, thread, val | 0x1); boot_secondary()
112 * @thread: Hardware thread number.
121 static void describe_cachepart_change(unsigned int thread, const char *label, describe_cachepart_change() argument
132 pr_info("Thread %d: %s partition changed:", thread, label); describe_cachepart_change()
165 * setup_smp_cache: ensure cache coherency for new SMP thread.
166 * @thread: New hardware thread number.
171 static void setup_smp_cache(unsigned int thread) setup_smp_cache() argument
178 * Copy over the current thread's cache partition configuration to the setup_smp_cache()
179 * new thread so that they share cache partitions. setup_smp_cache()
185 dcpart_old = metag_in32(SYSC_DCPART(thread)); setup_smp_cache()
195 metag_out32(dcpart_new, SYSC_DCPART(thread)); setup_smp_cache()
198 icpart_old = metag_in32(SYSC_ICPART(thread)); setup_smp_cache()
199 metag_out32(icpart_new, SYSC_ICPART(thread)); setup_smp_cache()
210 describe_cachepart_change(thread, "dcache", dcsz, setup_smp_cache()
212 describe_cachepart_change(thread, "icache", icsz, setup_smp_cache()
218 unsigned int thread = cpu_2_hwthread_id[cpu]; __cpu_up() local
221 load_pgd(swapper_pg_dir, thread); __cpu_up()
225 setup_smp_cache(thread); __cpu_up()
228 * Tell the secondary CPU where to find its idle thread's stack. __cpu_up()
237 ret = boot_secondary(thread, idle); __cpu_up()
256 * FIXME: We need to clean up the new idle thread. --rmk __cpu_up()
296 * called on the thread which is asking for a CPU to be shutdown -
306 * Called from the idle thread for the CPU which has been shutdown.
335 * idle thread stack and the global page tables.
376 pr_info("CPU%u (thread %u): Booted secondary processor\n", secondary_start_kernel()
395 * OK, it's off to the idle thread for us secondary_start_kernel()
606 unsigned int thread; kick_raise_softirq() local
608 thread = cpu_2_hwthread_id[cpu]; kick_raise_softirq()
610 BUG_ON(thread == BAD_HWTHREAD_ID); kick_raise_softirq()
612 metag_out32(1, T0KICKI + (thread * TnXKICK_STRIDE)); kick_raise_softirq()
H A Dcore_reg.c34 * @thread: The thread we want to access.
41 void core_reg_write(int unit, int reg, int thread, unsigned int val) core_reg_write() argument
47 void __iomem *cu_reg = __CU_addr(thread, reg); core_reg_write()
62 val = UNIT_VAL(unit) | REG_VAL(reg) | THREAD_VAL(thread); core_reg_write()
77 * @thread: The thread we want to access.
83 unsigned int core_reg_read(int unit, int reg, int thread) core_reg_read() argument
90 void __iomem *cu_reg = __CU_addr(thread, reg); core_reg_read()
102 val = (UNIT_VAL(unit) | REG_VAL(reg) | THREAD_VAL(thread) | core_reg_read()
/linux-4.1.27/arch/um/kernel/
H A Dprocess.c82 to->thread.prev_sched = from; __switch_to()
85 switch_threads(&from->thread.switch_buf, &to->thread.switch_buf); __switch_to()
88 return current->thread.prev_sched; __switch_to()
98 tracehook_notify_resume(&current->thread.regs); interrupt_end()
119 if (current->thread.prev_sched != NULL) new_thread_handler()
120 schedule_tail(current->thread.prev_sched); new_thread_handler()
121 current->thread.prev_sched = NULL; new_thread_handler()
123 fn = current->thread.request.u.thread.proc; new_thread_handler()
124 arg = current->thread.request.u.thread.arg; new_thread_handler()
127 * callback returns only if the kernel thread execs a process new_thread_handler()
130 userspace(&current->thread.regs.regs); new_thread_handler()
138 schedule_tail(current->thread.prev_sched); fork_handler()
147 current->thread.prev_sched = NULL; fork_handler()
149 userspace(&current->thread.regs.regs); fork_handler()
159 p->thread = (struct thread_struct) INIT_THREAD; copy_thread()
162 memcpy(&p->thread.regs.regs, current_pt_regs(), copy_thread()
163 sizeof(p->thread.regs.regs)); copy_thread()
164 PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0); copy_thread()
166 REGS_SP(p->thread.regs.regs.gp) = sp; copy_thread()
170 arch_copy_thread(&current->thread.arch, &p->thread.arch); copy_thread()
172 get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp); copy_thread()
173 p->thread.request.u.thread.proc = (int (*)(void *))sp; copy_thread()
174 p->thread.request.u.thread.arg = (void *)arg; copy_thread()
178 new_thread(task_stack_page(p), &p->thread.switch_buf, handler); copy_thread()
184 * Set a new TLS for the child thread? copy_thread()
342 if (task->thread.singlestep_syscall) singlestepping()
377 sp = p->thread.switch_buf->JB_SP; get_wchan()
H A Dsignal.c98 * from it, the tracing thread used to PTRACE_SINGLESTEP the process kern_do_signal()
100 * on the host. The tracing thread will check this flag and kern_do_signal()
104 current->thread.singlestep_syscall = kern_do_signal()
105 is_syscall(PT_REGS_IP(&current->thread.regs)); kern_do_signal()
118 return kern_do_signal(&current->thread.regs); do_signal()
H A Dtrap.c168 current->thread.arch.faultinfo = fi; bad_segv()
212 current->thread.segv_regs = container_of(regs, struct pt_regs, regs); segv()
229 * A thread accessed NULL, we get a fault, but CR2 is invalid. segv()
236 catcher = current->thread.fault_catcher; segv()
240 current->thread.fault_addr = (void *) address; segv()
243 else if (current->thread.fault_addr != NULL) segv()
261 current->thread.arch.faultinfo = fi; segv()
267 current->thread.arch.faultinfo = fi; segv()
273 current->thread.segv_regs = NULL; segv()
304 current->thread.arch.faultinfo = *fi; relay_signal()
319 if (current->thread.fault_catcher != NULL) bus_handler()
320 UML_LONGJMP(current->thread.fault_catcher, 1); bus_handler()
/linux-4.1.27/arch/powerpc/kernel/
H A Dptrace.c173 return task->thread.regs->msr | task->thread.fpexc_mode; get_user_msr()
178 task->thread.regs->msr &= ~MSR_DEBUGCHANGE; set_user_msr()
179 task->thread.regs->msr |= msr & MSR_DEBUGCHANGE; set_user_msr()
186 *data = task->thread.dscr; get_user_dscr()
192 task->thread.dscr = dscr; set_user_dscr()
193 task->thread.dscr_inherit = 1; set_user_dscr()
214 task->thread.regs->trap = trap & 0xfff0; set_user_trap()
223 if ((task->thread.regs == NULL) || !data) ptrace_get_reg()
235 *data = ((unsigned long *)task->thread.regs)[regno]; ptrace_get_reg()
247 if (task->thread.regs == NULL) ptrace_put_reg()
258 ((unsigned long *)task->thread.regs)[regno] = data; ptrace_put_reg()
270 if (target->thread.regs == NULL) gpr_get()
273 if (!FULL_REGS(target->thread.regs)) { gpr_get()
276 target->thread.regs->gpr[i] = NV_REG_POISON; gpr_get()
280 target->thread.regs, gpr_get()
295 &target->thread.regs->orig_gpr3, gpr_get()
312 if (target->thread.regs == NULL) gpr_set()
315 CHECK_FULL_REGS(target->thread.regs); gpr_set()
318 target->thread.regs, gpr_set()
334 &target->thread.regs->orig_gpr3, gpr_set()
373 buf[i] = target->thread.TS_FPR(i); fpr_get()
374 buf[32] = target->thread.fp_state.fpscr; fpr_get()
382 &target->thread.fp_state, 0, -1); fpr_get()
402 target->thread.TS_FPR(i) = buf[i]; fpr_set()
403 target->thread.fp_state.fpscr = buf[32]; fpr_set()
410 &target->thread.fp_state, 0, -1); fpr_set()
432 return target->thread.used_vr ? regset->n : 0; vr_active()
447 &target->thread.vr_state, 0, vr_get()
458 vrsave.word = target->thread.vrsave; vr_get()
478 &target->thread.vr_state, 0, vr_set()
489 vrsave.word = target->thread.vrsave; vr_set()
493 target->thread.vrsave = vrsave.word; vr_set()
511 return target->thread.used_vsr ? regset->n : 0; vsr_active()
524 buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET]; vsr_get()
543 target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i]; vsr_set()
566 return target->thread.used_spe ? regset->n : 0; evr_active()
578 &target->thread.evr, evr_get()
579 0, sizeof(target->thread.evr)); evr_get()
586 &target->thread.acc, evr_get()
587 sizeof(target->thread.evr), -1); evr_get()
601 &target->thread.evr, evr_set()
602 0, sizeof(target->thread.evr)); evr_set()
609 &target->thread.acc, evr_set()
610 sizeof(target->thread.evr), -1); evr_set()
681 const unsigned long *regs = &target->thread.regs->gpr[0]; gpr32_get()
687 if (target->thread.regs == NULL) gpr32_get()
690 if (!FULL_REGS(target->thread.regs)) { gpr32_get()
693 target->thread.regs->gpr[i] = NV_REG_POISON; gpr32_get()
738 unsigned long *regs = &target->thread.regs->gpr[0]; gpr32_set()
743 if (target->thread.regs == NULL) gpr32_set()
746 CHECK_FULL_REGS(target->thread.regs); gpr32_set()
854 struct pt_regs *regs = task->thread.regs; user_enable_single_step()
858 task->thread.debug.dbcr0 &= ~DBCR0_BT; user_enable_single_step()
859 task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC; user_enable_single_step()
871 struct pt_regs *regs = task->thread.regs; user_enable_block_step()
875 task->thread.debug.dbcr0 &= ~DBCR0_IC; user_enable_block_step()
876 task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT; user_enable_block_step()
888 struct pt_regs *regs = task->thread.regs; user_disable_single_step()
898 task->thread.debug.dbcr0 &= ~(DBCR0_IC|DBCR0_BT); user_disable_single_step()
902 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0, user_disable_single_step()
903 task->thread.debug.dbcr1)) { user_disable_single_step()
907 task->thread.debug.dbcr0 &= ~DBCR0_IDM; user_disable_single_step()
940 struct thread_struct *thread = &(task->thread); ptrace_set_debugreg() local
979 bp = thread->ptrace_bps[0]; ptrace_set_debugreg()
983 thread->ptrace_bps[0] = NULL; ptrace_set_debugreg()
999 thread->ptrace_bps[0] = bp; ptrace_set_debugreg()
1000 thread->hw_brk = hw_brk; ptrace_set_debugreg()
1010 thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr, ptrace_set_debugreg()
1013 thread->ptrace_bps[0] = NULL; ptrace_set_debugreg()
1018 task->thread.hw_brk = hw_brk; ptrace_set_debugreg()
1026 task->thread.debug.dac1 = data & ~0x3UL; ptrace_set_debugreg()
1028 if (task->thread.debug.dac1 == 0) { ptrace_set_debugreg()
1030 if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0, ptrace_set_debugreg()
1031 task->thread.debug.dbcr1)) { ptrace_set_debugreg()
1032 task->thread.regs->msr &= ~MSR_DE; ptrace_set_debugreg()
1033 task->thread.debug.dbcr0 &= ~DBCR0_IDM; ptrace_set_debugreg()
1045 task->thread.debug.dbcr0 |= DBCR0_IDM; ptrace_set_debugreg()
1054 task->thread.regs->msr |= MSR_DE; ptrace_set_debugreg()
1075 int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0); set_instruction_bp()
1076 int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0); set_instruction_bp()
1077 int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0); set_instruction_bp()
1078 int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0); set_instruction_bp()
1097 child->thread.debug.iac1 = bp_info->addr; set_instruction_bp()
1098 child->thread.debug.iac2 = bp_info->addr2; set_instruction_bp()
1099 child->thread.debug.dbcr0 |= DBCR0_IAC1; set_instruction_bp()
1108 child->thread.debug.iac3 = bp_info->addr; set_instruction_bp()
1109 child->thread.debug.iac4 = bp_info->addr2; set_instruction_bp()
1110 child->thread.debug.dbcr0 |= DBCR0_IAC3; set_instruction_bp()
1130 child->thread.debug.iac1 = bp_info->addr; set_instruction_bp()
1131 child->thread.debug.dbcr0 |= DBCR0_IAC1; set_instruction_bp()
1137 child->thread.debug.iac2 = bp_info->addr; set_instruction_bp()
1138 child->thread.debug.dbcr0 |= DBCR0_IAC2; set_instruction_bp()
1142 child->thread.debug.iac3 = bp_info->addr; set_instruction_bp()
1143 child->thread.debug.dbcr0 |= DBCR0_IAC3; set_instruction_bp()
1146 child->thread.debug.iac4 = bp_info->addr; set_instruction_bp()
1147 child->thread.debug.dbcr0 |= DBCR0_IAC4; set_instruction_bp()
1153 child->thread.debug.dbcr0 |= DBCR0_IDM; set_instruction_bp()
1154 child->thread.regs->msr |= MSR_DE; set_instruction_bp()
1163 if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0) del_instruction_bp()
1168 child->thread.debug.iac2 = 0; del_instruction_bp()
1171 child->thread.debug.iac1 = 0; del_instruction_bp()
1172 child->thread.debug.dbcr0 &= ~DBCR0_IAC1; del_instruction_bp()
1175 if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0) del_instruction_bp()
1181 child->thread.debug.iac2 = 0; del_instruction_bp()
1182 child->thread.debug.dbcr0 &= ~DBCR0_IAC2; del_instruction_bp()
1186 if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0) del_instruction_bp()
1191 child->thread.debug.iac4 = 0; del_instruction_bp()
1194 child->thread.debug.iac3 = 0; del_instruction_bp()
1195 child->thread.debug.dbcr0 &= ~DBCR0_IAC3; del_instruction_bp()
1198 if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0) del_instruction_bp()
1204 child->thread.debug.iac4 = 0; del_instruction_bp()
1205 child->thread.debug.dbcr0 &= ~DBCR0_IAC4; del_instruction_bp()
1235 child->thread.debug.dac1 = (unsigned long)bp_info->addr; set_dac()
1238 child->thread.debug.dvc1 = set_dac()
1240 child->thread.debug.dbcr2 |= set_dac()
1246 } else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) { set_dac()
1256 child->thread.debug.dac2 = (unsigned long)bp_info->addr; set_dac()
1259 child->thread.debug.dvc2 = set_dac()
1261 child->thread.debug.dbcr2 |= set_dac()
1268 child->thread.debug.dbcr0 |= DBCR0_IDM; set_dac()
1269 child->thread.regs->msr |= MSR_DE; set_dac()
1280 child->thread.debug.dac1 = 0; del_dac()
1283 if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) { del_dac()
1284 child->thread.debug.dac2 = 0; del_dac()
1285 child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE; del_dac()
1287 child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE); del_dac()
1290 child->thread.debug.dvc1 = 0; del_dac()
1297 if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) del_dac()
1300 child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE); del_dac()
1303 child->thread.debug.dvc2 = 0; del_dac()
1305 child->thread.debug.dac2 = 0; del_dac()
1347 if (child->thread.debug.dbcr0 & set_dac_range()
1352 child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM); set_dac_range()
1354 child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM); set_dac_range()
1355 child->thread.debug.dac1 = bp_info->addr; set_dac_range()
1356 child->thread.debug.dac2 = bp_info->addr2; set_dac_range()
1358 child->thread.debug.dbcr2 |= DBCR2_DAC12M; set_dac_range()
1360 child->thread.debug.dbcr2 |= DBCR2_DAC12MX; set_dac_range()
1362 child->thread.debug.dbcr2 |= DBCR2_DAC12MM; set_dac_range()
1363 child->thread.regs->msr |= MSR_DE; set_dac_range()
1374 struct thread_struct *thread = &(child->thread); ppc_set_hwdebug() local
1445 bp = thread->ptrace_bps[0]; ppc_set_hwdebug()
1455 thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr, ppc_set_hwdebug()
1458 thread->ptrace_bps[0] = NULL; ppc_set_hwdebug()
1468 if (child->thread.hw_brk.address) ppc_set_hwdebug()
1471 child->thread.hw_brk = brk; ppc_set_hwdebug()
1481 struct thread_struct *thread = &(child->thread); ppc_del_hwdebug() local
1493 if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0, ppc_del_hwdebug()
1494 child->thread.debug.dbcr1)) { ppc_del_hwdebug()
1495 child->thread.debug.dbcr0 &= ~DBCR0_IDM; ppc_del_hwdebug()
1496 child->thread.regs->msr &= ~MSR_DE; ppc_del_hwdebug()
1505 bp = thread->ptrace_bps[0]; ppc_del_hwdebug()
1508 thread->ptrace_bps[0] = NULL; ppc_del_hwdebug()
1513 if (child->thread.hw_brk.address == 0) ppc_del_hwdebug()
1516 child->thread.hw_brk.address = 0; ppc_del_hwdebug()
1517 child->thread.hw_brk.type = 0; ppc_del_hwdebug()
1541 || (child->thread.regs == NULL)) arch_ptrace()
1548 CHECK_FULL_REGS(child->thread.regs); arch_ptrace()
1558 memcpy(&tmp, &child->thread.TS_FPR(fpidx), arch_ptrace()
1561 tmp = child->thread.fp_state.fpscr; arch_ptrace()
1576 || (child->thread.regs == NULL)) arch_ptrace()
1583 CHECK_FULL_REGS(child->thread.regs); arch_ptrace()
1591 memcpy(&child->thread.TS_FPR(fpidx), &data, arch_ptrace()
1594 child->thread.fp_state.fpscr = data; arch_ptrace()
1673 ret = put_user(child->thread.debug.dac1, datalp); arch_ptrace()
1675 dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) | arch_ptrace()
1676 (child->thread.hw_brk.type & HW_BRK_TYPE_DABR)); arch_ptrace()
H A Dprocess.c81 * If we are saving the current thread's registers, and the giveup_fpu_maybe_transactional()
82 * thread is in a transactional state, set the TIF_RESTORE_TM giveup_fpu_maybe_transactional()
86 if (tsk == current && tsk->thread.regs && giveup_fpu_maybe_transactional()
87 MSR_TM_ACTIVE(tsk->thread.regs->msr) && giveup_fpu_maybe_transactional()
89 tsk->thread.tm_orig_msr = tsk->thread.regs->msr; giveup_fpu_maybe_transactional()
99 * If we are saving the current thread's registers, and the giveup_altivec_maybe_transactional()
100 * thread is in a transactional state, set the TIF_RESTORE_TM giveup_altivec_maybe_transactional()
104 if (tsk == current && tsk->thread.regs && giveup_altivec_maybe_transactional()
105 MSR_TM_ACTIVE(tsk->thread.regs->msr) && giveup_altivec_maybe_transactional()
107 tsk->thread.tm_orig_msr = tsk->thread.regs->msr; giveup_altivec_maybe_transactional()
126 if (tsk->thread.regs) { flush_fp_to_thread()
136 if (tsk->thread.regs->msr & MSR_FP) { flush_fp_to_thread()
160 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) enable_kernel_fp()
176 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) enable_kernel_altivec()
192 if (tsk->thread.regs) { flush_altivec_to_thread()
194 if (tsk->thread.regs->msr & MSR_VEC) { flush_altivec_to_thread()
212 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) enable_kernel_vsx()
232 if (tsk->thread.regs) { flush_vsx_to_thread()
234 if (tsk->thread.regs->msr & MSR_VSX) { flush_vsx_to_thread()
253 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) enable_kernel_spe()
265 if (tsk->thread.regs) { flush_spe_to_thread()
267 if (tsk->thread.regs->msr & MSR_SPE) { flush_spe_to_thread()
271 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR); flush_spe_to_thread()
311 current->thread.trap_nr = signal_code; do_send_trap()
329 current->thread.trap_nr = TRAP_HWBKPT; do_break()
355 static void set_debug_reg_defaults(struct thread_struct *thread) set_debug_reg_defaults() argument
357 thread->debug.iac1 = thread->debug.iac2 = 0; set_debug_reg_defaults()
359 thread->debug.iac3 = thread->debug.iac4 = 0; set_debug_reg_defaults()
361 thread->debug.dac1 = thread->debug.dac2 = 0; set_debug_reg_defaults()
363 thread->debug.dvc1 = thread->debug.dvc2 = 0; set_debug_reg_defaults()
365 thread->debug.dbcr0 = 0; set_debug_reg_defaults()
370 thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | set_debug_reg_defaults()
376 thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US; set_debug_reg_defaults()
378 thread->debug.dbcr1 = 0; set_debug_reg_defaults()
410 * Unless neither the old or new thread are making use of the
412 * stored in the new thread.
416 if ((current->thread.debug.dbcr0 & DBCR0_IDM) switch_booke_debug_regs()
423 static void set_debug_reg_defaults(struct thread_struct *thread) set_debug_reg_defaults() argument
425 thread->hw_brk.address = 0; set_debug_reg_defaults()
426 thread->hw_brk.type = 0; set_debug_reg_defaults()
427 set_breakpoint(&thread->hw_brk); set_debug_reg_defaults()
540 * the thread will no longer be transactional. tm_reclaim_thread()
561 * we need to exit this thread which calls __switch_to() which tm_reclaim_thread()
586 tm_reclaim_thread(&current->thread, current_thread_info(), cause); tm_reclaim_current()
595 * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the tm_reclaim_task()
597 * (current) FPRs into oldtask->thread.transact_fpr[]. tm_reclaim_task()
601 struct thread_struct *thr = &tsk->thread; tm_reclaim_task()
609 /* Stash the original thread MSR, as giveup_fpu et al will tm_reclaim_task()
630 * This context-switches a thread's TM info SPRs. We do it here to tm_reclaim_task()
637 extern void __tm_recheckpoint(struct thread_struct *thread,
640 void tm_recheckpoint(struct thread_struct *thread, tm_recheckpoint() argument
655 tm_restore_sprs(thread); tm_recheckpoint()
657 __tm_recheckpoint(thread, orig_msr); tm_recheckpoint()
669 /* Recheckpoint the registers of the thread we're about to switch to. tm_recheckpoint_new_task()
677 if (!new->thread.regs) tm_recheckpoint_new_task()
680 if (!MSR_TM_ACTIVE(new->thread.regs->msr)){ tm_recheckpoint_new_task()
681 tm_restore_sprs(&new->thread); tm_recheckpoint_new_task()
684 msr = new->thread.tm_orig_msr; tm_recheckpoint_new_task()
688 new->pid, new->thread.regs->msr, msr); tm_recheckpoint_new_task()
691 tm_recheckpoint(&new->thread, msr); tm_recheckpoint_new_task()
695 do_load_up_transact_fpu(&new->thread); tm_recheckpoint_new_task()
696 new->thread.regs->msr |= tm_recheckpoint_new_task()
697 (MSR_FP | new->thread.fpexc_mode); tm_recheckpoint_new_task()
701 do_load_up_transact_altivec(&new->thread); tm_recheckpoint_new_task()
702 new->thread.regs->msr |= MSR_VEC; tm_recheckpoint_new_task()
707 new->thread.regs->msr |= MSR_VSX; tm_recheckpoint_new_task()
744 msr_diff = current->thread.tm_orig_msr & ~regs->msr; restore_tm_state()
748 load_fp_state(&current->thread.fp_state); restore_tm_state()
749 regs->msr |= current->thread.fpexc_mode; restore_tm_state()
753 load_vr_state(&current->thread.vr_state); restore_tm_state()
782 save_early_sprs(&prev->thread); __switch_to()
796 if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP)) __switch_to()
800 * If the previous thread used altivec in the last quantum __switch_to()
810 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)) __switch_to()
814 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX)) __switch_to()
820 * If the previous thread used spe in the last quantum __switch_to()
826 if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE))) __switch_to()
835 if (new->thread.regs && last_task_used_altivec == new) __switch_to()
836 new->thread.regs->msr |= MSR_VEC; __switch_to()
839 if (new->thread.regs && last_task_used_vsx == new) __switch_to()
840 new->thread.regs->msr |= MSR_VSX; __switch_to()
846 if (new->thread.regs && last_task_used_spe == new) __switch_to()
847 new->thread.regs->msr |= MSR_SPE; __switch_to()
853 switch_booke_debug_regs(&new->thread.debug); __switch_to()
860 if (unlikely(!hw_brk_match(this_cpu_ptr(&current_brk), &new->thread.hw_brk))) __switch_to()
861 __set_breakpoint(&new->thread.hw_brk); __switch_to()
866 new_thread = &new->thread; __switch_to()
867 old_thread = &current->thread; __switch_to()
1074 set_debug_reg_defaults(&current->thread); flush_thread()
1085 * copy the current task into the new thread.
1123 p->thread.ksp_vsid = sp_vsid; setup_ksp_vsid()
1128 * Copy a thread..
1133 * Copy architecture-specific thread state
1148 /* kernel thread */ copy_thread()
1160 p->thread.regs = NULL; /* no user register state */ copy_thread()
1164 /* user thread */ copy_thread()
1170 p->thread.regs = childregs; copy_thread()
1197 p->thread.ksp = sp; copy_thread()
1199 p->thread.ksp_limit = (unsigned long)task_stack_page(p) + copy_thread()
1203 p->thread.ptrace_bps[0] = NULL; copy_thread()
1206 p->thread.fp_save_area = NULL; copy_thread()
1208 p->thread.vr_save_area = NULL; copy_thread()
1215 p->thread.dscr_inherit = current->thread.dscr_inherit; copy_thread()
1216 p->thread.dscr = current->thread.dscr; copy_thread()
1219 p->thread.ppr = INIT_PPR; copy_thread()
1226 * Set up a thread for executing a new program
1235 * If we exec out of a kernel thread then thread.regs will not be start_thread()
1238 if (!current->thread.regs) { start_thread()
1240 current->thread.regs = regs - 1; start_thread()
1253 * ptrace to examine the thread immediately after exec. start_thread()
1311 current->thread.used_vsr = 0; start_thread()
1313 memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state)); start_thread()
1314 current->thread.fp_save_area = NULL; start_thread()
1316 memset(&current->thread.vr_state, 0, sizeof(current->thread.vr_state)); start_thread()
1317 current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */ start_thread()
1318 current->thread.vr_save_area = NULL; start_thread()
1319 current->thread.vrsave = 0; start_thread()
1320 current->thread.used_vr = 0; start_thread()
1323 memset(current->thread.evr, 0, sizeof(current->thread.evr)); start_thread()
1324 current->thread.acc = 0; start_thread()
1325 current->thread.spefscr = 0; start_thread()
1326 current->thread.used_spe = 0; start_thread()
1331 current->thread.tm_tfhar = 0; start_thread()
1332 current->thread.tm_texasr = 0; start_thread()
1333 current->thread.tm_tfiar = 0; start_thread()
1343 struct pt_regs *regs = tsk->thread.regs; set_fpexc_mode()
1364 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR); set_fpexc_mode()
1365 tsk->thread.fpexc_mode = val & set_fpexc_mode()
1383 tsk->thread.fpexc_mode = __pack_fe01(val); set_fpexc_mode()
1386 | tsk->thread.fpexc_mode; set_fpexc_mode()
1394 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) get_fpexc_mode()
1409 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR); get_fpexc_mode()
1410 val = tsk->thread.fpexc_mode; get_fpexc_mode()
1417 val = __unpack_fe01(tsk->thread.fpexc_mode); get_fpexc_mode()
1423 struct pt_regs *regs = tsk->thread.regs; set_endian()
1444 struct pt_regs *regs = tsk->thread.regs; get_endian()
1467 tsk->thread.align_ctl = val; set_unalign_ctl()
1473 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr); get_unalign_ctl()
1522 sp = p->thread.ksp; get_wchan()
1559 sp = tsk->thread.ksp; show_stack()
H A Dsignal_32.c272 buf[i] = task->thread.TS_FPR(i); copy_fpr_to_user()
273 buf[i] = task->thread.fp_state.fpscr; copy_fpr_to_user()
286 task->thread.TS_FPR(i) = buf[i]; copy_fpr_from_user()
287 task->thread.fp_state.fpscr = buf[i]; copy_fpr_from_user()
300 buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET]; copy_vsx_to_user()
313 task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i]; copy_vsx_from_user()
326 buf[i] = task->thread.TS_TRANS_FPR(i); copy_transact_fpr_to_user()
327 buf[i] = task->thread.transact_fp.fpscr; copy_transact_fpr_to_user()
340 task->thread.TS_TRANS_FPR(i) = buf[i]; copy_transact_fpr_from_user()
341 task->thread.transact_fp.fpscr = buf[i]; copy_transact_fpr_from_user()
354 buf[i] = task->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET]; copy_transact_vsx_to_user()
367 task->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = buf[i]; copy_transact_vsx_from_user()
375 return __copy_to_user(to, task->thread.fp_state.fpr, copy_fpr_to_user()
382 return __copy_from_user(task->thread.fp_state.fpr, from, copy_fpr_from_user()
390 return __copy_to_user(to, task->thread.transact_fp.fpr, copy_transact_fpr_to_user()
397 return __copy_from_user(task->thread.transact_fp.fpr, from, copy_transact_fpr_from_user()
423 if (current->thread.used_vr) { save_user_regs()
425 if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state, save_user_regs()
441 current->thread.vrsave = mfspr(SPRN_VRSAVE); save_user_regs()
442 if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32])) save_user_regs()
460 if (current->thread.used_vsr && ctx_has_vsx_region) { save_user_regs()
469 if (current->thread.used_spe) { save_user_regs()
471 if (__copy_to_user(&frame->mc_vregs, current->thread.evr, save_user_regs()
481 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG)) save_user_regs()
521 /* Remove TM bits from thread's MSR. The MSR in the sigcontext save_tm_user_regs()
532 if (save_general_regs(&current->thread.ckpt_regs, frame) save_tm_user_regs()
547 if (current->thread.used_vr) { save_tm_user_regs()
549 if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state, save_tm_user_regs()
554 &current->thread.transact_vr, save_tm_user_regs()
559 &current->thread.vr_state, save_tm_user_regs()
576 current->thread.vrsave = mfspr(SPRN_VRSAVE); save_tm_user_regs()
577 if (__put_user(current->thread.vrsave, save_tm_user_regs()
581 if (__put_user(current->thread.transact_vrsave, save_tm_user_regs()
585 if (__put_user(current->thread.vrsave, save_tm_user_regs()
608 if (current->thread.used_vsr) { save_tm_user_regs()
628 if (current->thread.used_spe) { save_tm_user_regs()
630 if (__copy_to_user(&frame->mc_vregs, current->thread.evr, save_tm_user_regs()
639 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG)) save_tm_user_regs()
691 * Do this before updating the thread state in restore_user_regs()
692 * current->thread.fpr/vr/evr. That way, if we get preempted restore_user_regs()
702 * current->thread when it next does altivec instructions restore_user_regs()
707 if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs, restore_user_regs()
710 } else if (current->thread.used_vr) restore_user_regs()
711 memset(&current->thread.vr_state, 0, restore_user_regs()
715 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32])) restore_user_regs()
718 mtspr(SPRN_VRSAVE, current->thread.vrsave); restore_user_regs()
726 * current->thread when it next does VSX instruction. restore_user_regs()
736 } else if (current->thread.used_vsr) restore_user_regs()
738 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; restore_user_regs()
742 * current->thread when it next does FP instructions restore_user_regs()
748 current->thread when it next does spe instructions */ restore_user_regs()
752 if (__copy_from_user(current->thread.evr, &sr->mc_vregs, restore_user_regs()
755 } else if (current->thread.used_spe) restore_user_regs()
756 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32)); restore_user_regs()
759 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG)) restore_user_regs()
790 err |= restore_general_regs(&current->thread.ckpt_regs, sr); restore_tm_user_regs()
792 err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]); restore_tm_user_regs()
802 * Do this before updating the thread state in restore_tm_user_regs()
803 * current->thread.fpr/vr/evr. That way, if we get preempted restore_tm_user_regs()
814 if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs, restore_tm_user_regs()
816 __copy_from_user(&current->thread.transact_vr, restore_tm_user_regs()
820 } else if (current->thread.used_vr) { restore_tm_user_regs()
821 memset(&current->thread.vr_state, 0, restore_tm_user_regs()
823 memset(&current->thread.transact_vr, 0, restore_tm_user_regs()
828 if (__get_user(current->thread.vrsave, restore_tm_user_regs()
830 __get_user(current->thread.transact_vrsave, restore_tm_user_regs()
834 mtspr(SPRN_VRSAVE, current->thread.vrsave); restore_tm_user_regs()
853 } else if (current->thread.used_vsr) restore_tm_user_regs()
855 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; restore_tm_user_regs()
856 current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0; restore_tm_user_regs()
866 if (__copy_from_user(current->thread.evr, &sr->mc_vregs, restore_tm_user_regs()
869 } else if (current->thread.used_spe) restore_tm_user_regs()
870 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32)); restore_tm_user_regs()
873 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs restore_tm_user_regs()
893 current->thread.tm_texasr |= TEXASR_FS; restore_tm_user_regs()
895 tm_recheckpoint(&current->thread, msr); restore_tm_user_regs()
899 do_load_up_transact_fpu(&current->thread); restore_tm_user_regs()
900 regs->msr |= (MSR_FP | current->thread.fpexc_mode); restore_tm_user_regs()
904 do_load_up_transact_altivec(&current->thread); restore_tm_user_regs()
1045 current->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */ handle_rt_signal32()
1215 * or if another thread unmaps the region containing the context. sys_swapcontext()
1313 unsigned long new_dbcr0 = current->thread.debug.dbcr0; sys_debug_setcontext()
1328 current->thread.debug.dbcr1)) { sys_debug_setcontext()
1363 current->thread.debug.dbcr0 = new_dbcr0; sys_debug_setcontext()
1379 * or if another thread unmaps the region containing the context. sys_debug_setcontext()
1465 current->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */ handle_signal32()
H A Dsignal_64.c102 if (current->thread.used_vr) { setup_sigcontext()
105 err |= __copy_to_user(v_regs, &current->thread.vr_state, setup_sigcontext()
116 current->thread.vrsave = mfspr(SPRN_VRSAVE); setup_sigcontext()
117 err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); setup_sigcontext()
136 if (current->thread.used_vsr && ctx_has_vsx_region) { setup_sigcontext()
194 /* Remove TM bits from thread's MSR. The MSR in the sigcontext setup_tm_sigcontexts()
208 if (current->thread.used_vr) { setup_tm_sigcontexts()
211 err |= __copy_to_user(v_regs, &current->thread.vr_state, setup_tm_sigcontexts()
218 &current->thread.transact_vr, setup_tm_sigcontexts()
222 &current->thread.vr_state, setup_tm_sigcontexts()
234 current->thread.vrsave = mfspr(SPRN_VRSAVE); setup_tm_sigcontexts()
235 err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); setup_tm_sigcontexts()
237 err |= __put_user(current->thread.transact_vrsave, setup_tm_sigcontexts()
240 err |= __put_user(current->thread.vrsave, setup_tm_sigcontexts()
261 if (current->thread.used_vsr) { setup_tm_sigcontexts()
285 &current->thread.ckpt_regs, GP_REGS_SIZE); setup_tm_sigcontexts()
342 * Do this before updating the thread state in restore_sigcontext()
343 * current->thread.fpr/vr. That way, if we get preempted restore_sigcontext()
352 * This has to be done before copying stuff into current->thread.fpr/vr restore_sigcontext()
365 err |= __copy_from_user(&current->thread.vr_state, v_regs, restore_sigcontext()
367 else if (current->thread.used_vr) restore_sigcontext()
368 memset(&current->thread.vr_state, 0, 33 * sizeof(vector128)); restore_sigcontext()
371 err |= __get_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); restore_sigcontext()
373 current->thread.vrsave = 0; restore_sigcontext()
375 mtspr(SPRN_VRSAVE, current->thread.vrsave); restore_sigcontext()
390 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; restore_sigcontext()
414 err |= __copy_from_user(&current->thread.ckpt_regs, sc->gp_regs, restore_tm_sigcontexts()
420 * Users doing anything abhorrent like thread-switching w/ signals for restore_tm_sigcontexts()
426 err |= __get_user(current->thread.tm_tfhar, &sc->gp_regs[PT_NIP]); restore_tm_sigcontexts()
445 err |= __get_user(current->thread.ckpt_regs.ctr, restore_tm_sigcontexts()
447 err |= __get_user(current->thread.ckpt_regs.link, restore_tm_sigcontexts()
449 err |= __get_user(current->thread.ckpt_regs.xer, restore_tm_sigcontexts()
451 err |= __get_user(current->thread.ckpt_regs.ccr, restore_tm_sigcontexts()
461 * Do this before updating the thread state in restore_tm_sigcontexts()
462 * current->thread.fpr/vr. That way, if we get preempted restore_tm_sigcontexts()
471 * This has to be done before copying stuff into current->thread.fpr/vr restore_tm_sigcontexts()
488 err |= __copy_from_user(&current->thread.vr_state, v_regs, restore_tm_sigcontexts()
490 err |= __copy_from_user(&current->thread.transact_vr, tm_v_regs, restore_tm_sigcontexts()
493 else if (current->thread.used_vr) { restore_tm_sigcontexts()
494 memset(&current->thread.vr_state, 0, 33 * sizeof(vector128)); restore_tm_sigcontexts()
495 memset(&current->thread.transact_vr, 0, 33 * sizeof(vector128)); restore_tm_sigcontexts()
499 err |= __get_user(current->thread.vrsave, restore_tm_sigcontexts()
501 err |= __get_user(current->thread.transact_vrsave, restore_tm_sigcontexts()
505 current->thread.vrsave = 0; restore_tm_sigcontexts()
506 current->thread.transact_vrsave = 0; restore_tm_sigcontexts()
509 mtspr(SPRN_VRSAVE, current->thread.vrsave); restore_tm_sigcontexts()
527 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; restore_tm_sigcontexts()
528 current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0; restore_tm_sigcontexts()
534 current->thread.tm_texasr |= TEXASR_FS; restore_tm_sigcontexts()
536 tm_recheckpoint(&current->thread, msr); restore_tm_sigcontexts()
540 do_load_up_transact_fpu(&current->thread); restore_tm_sigcontexts()
541 regs->msr |= (MSR_FP | current->thread.fpexc_mode); restore_tm_sigcontexts()
545 do_load_up_transact_altivec(&current->thread); restore_tm_sigcontexts()
642 * or if another thread unmaps the region containing the context. sys_swapcontext()
758 current->thread.fp_state.fpscr = 0; handle_rt_signal64()
H A Didle_power7.S139 * because as soon as we do that, another thread can switch
173 andc r15,r15,r7 /* Clear thread bit */
178 * If cr0 = 0, then current thread is the last thread of the core entering
179 * sleep. Last thread needs to execute the hardware bug workaround code if
217 * Note all register i.e per-core, per-subcore or per-thread is saved
218 * here since any thread in the core might wake up first
311 * a. In the sleep/winkle enter path, the last thread is executing
313 * b. In the wake up path, another thread is executing fastsleep
326 * cr1 - 0b0100 if first thread to wakeup in subcore
327 * cr2 - 0b0100 if first thread to wakeup in core
332 or r15,r15,r7 /* Set thread bit */
336 /* Not first thread in subcore to wake up */
343 /* First thread in subcore to wakeup */
365 * Check if the thread is also the first thread in the core. If not,
373 * First thread in the core waking up from fastsleep. It needs to
420 /* Restore per thread state */
/linux-4.1.27/arch/mn10300/include/asm/
H A Dswitch_to.h24 if ((prev)->thread.fpu_flags & THREAD_HAS_FPU) { \
25 (prev)->thread.fpu_flags &= ~THREAD_HAS_FPU; \
26 (prev)->thread.uregs->epsw &= ~EPSW_FE; \
27 fpu_save(&(prev)->thread.fpu_state); \
43 current->thread.wchan = (u_long) __builtin_return_address(0); \
44 (last) = __switch_to(&(prev)->thread, &(next)->thread, (prev)); \
46 current->thread.wchan = 0; \
H A Dfpu.h41 : "i"(THREAD_USING_FPU), "a"(&tsk->thread.fpu_flags) set_using_fpu()
50 : "i"(THREAD_USING_FPU), "a"(&tsk->thread.fpu_flags) clear_using_fpu()
54 #define is_using_fpu(tsk) ((tsk)->thread.fpu_flags & THREAD_USING_FPU)
67 if (tsk->thread.fpu_flags & THREAD_HAS_FPU) { unlazy_fpu()
68 fpu_save(&tsk->thread.fpu_state); unlazy_fpu()
69 tsk->thread.fpu_flags &= ~THREAD_HAS_FPU; unlazy_fpu()
70 tsk->thread.uregs->epsw &= ~EPSW_FE; unlazy_fpu()
74 fpu_save(&tsk->thread.fpu_state); unlazy_fpu()
97 if (tsk->thread.fpu_flags & THREAD_HAS_FPU) { flush_fpu()
98 tsk->thread.fpu_flags &= ~THREAD_HAS_FPU; flush_fpu()
99 tsk->thread.uregs->epsw &= ~EPSW_FE; flush_fpu()
104 tsk->thread.uregs->epsw &= ~EPSW_FE; flush_fpu()
H A Dthread_info.h1 /* MN10300 Low-level thread information
48 mm_segment_t addr_limit; /* thread address space:
50 0-0xFFFFFFFF for kernel-thread
69 * macros/functions for gaining access to the thread information structure
90 /* how to get the thread information struct from C */
126 /* how to get the thread information struct from ASM */
135 * thread information flags
/linux-4.1.27/arch/s390/include/asm/
H A Dswitch_to.h130 if (task->thread.vxrs) save_fp_vx_regs()
131 save_vx_regs(task->thread.vxrs); save_fp_vx_regs()
133 save_fp_regs(task->thread.fp_regs.fprs); save_fp_vx_regs()
138 if (task->thread.vxrs) restore_fp_vx_regs()
139 restore_vx_regs(task->thread.vxrs); restore_fp_vx_regs()
141 restore_fp_regs(task->thread.fp_regs.fprs); restore_fp_vx_regs()
160 save_fp_ctl(&prev->thread.fp_regs.fpc); \
162 save_access_regs(&prev->thread.acrs[0]); \
163 save_ri_cb(prev->thread.ri_cb); \
167 restore_fp_ctl(&next->thread.fp_regs.fpc); \
169 restore_access_regs(&next->thread.acrs[0]); \
170 restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
/linux-4.1.27/arch/mips/kernel/
H A Dasm-offsets.c110 OFFSET(THREAD_REG16, task_struct, thread.reg16); output_thread_defines()
111 OFFSET(THREAD_REG17, task_struct, thread.reg17); output_thread_defines()
112 OFFSET(THREAD_REG18, task_struct, thread.reg18); output_thread_defines()
113 OFFSET(THREAD_REG19, task_struct, thread.reg19); output_thread_defines()
114 OFFSET(THREAD_REG20, task_struct, thread.reg20); output_thread_defines()
115 OFFSET(THREAD_REG21, task_struct, thread.reg21); output_thread_defines()
116 OFFSET(THREAD_REG22, task_struct, thread.reg22); output_thread_defines()
117 OFFSET(THREAD_REG23, task_struct, thread.reg23); output_thread_defines()
118 OFFSET(THREAD_REG29, task_struct, thread.reg29); output_thread_defines()
119 OFFSET(THREAD_REG30, task_struct, thread.reg30); output_thread_defines()
120 OFFSET(THREAD_REG31, task_struct, thread.reg31); output_thread_defines()
122 thread.cp0_status); output_thread_defines()
123 OFFSET(THREAD_FPU, task_struct, thread.fpu); output_thread_defines()
126 thread.cp0_badvaddr); output_thread_defines()
128 thread.cp0_baduaddr); output_thread_defines()
130 thread.error_code); output_thread_defines()
136 OFFSET(THREAD_FPR0, task_struct, thread.fpu.fpr[0]); output_thread_fpu_defines()
137 OFFSET(THREAD_FPR1, task_struct, thread.fpu.fpr[1]); output_thread_fpu_defines()
138 OFFSET(THREAD_FPR2, task_struct, thread.fpu.fpr[2]); output_thread_fpu_defines()
139 OFFSET(THREAD_FPR3, task_struct, thread.fpu.fpr[3]); output_thread_fpu_defines()
140 OFFSET(THREAD_FPR4, task_struct, thread.fpu.fpr[4]); output_thread_fpu_defines()
141 OFFSET(THREAD_FPR5, task_struct, thread.fpu.fpr[5]); output_thread_fpu_defines()
142 OFFSET(THREAD_FPR6, task_struct, thread.fpu.fpr[6]); output_thread_fpu_defines()
143 OFFSET(THREAD_FPR7, task_struct, thread.fpu.fpr[7]); output_thread_fpu_defines()
144 OFFSET(THREAD_FPR8, task_struct, thread.fpu.fpr[8]); output_thread_fpu_defines()
145 OFFSET(THREAD_FPR9, task_struct, thread.fpu.fpr[9]); output_thread_fpu_defines()
146 OFFSET(THREAD_FPR10, task_struct, thread.fpu.fpr[10]); output_thread_fpu_defines()
147 OFFSET(THREAD_FPR11, task_struct, thread.fpu.fpr[11]); output_thread_fpu_defines()
148 OFFSET(THREAD_FPR12, task_struct, thread.fpu.fpr[12]); output_thread_fpu_defines()
149 OFFSET(THREAD_FPR13, task_struct, thread.fpu.fpr[13]); output_thread_fpu_defines()
150 OFFSET(THREAD_FPR14, task_struct, thread.fpu.fpr[14]); output_thread_fpu_defines()
151 OFFSET(THREAD_FPR15, task_struct, thread.fpu.fpr[15]); output_thread_fpu_defines()
152 OFFSET(THREAD_FPR16, task_struct, thread.fpu.fpr[16]); output_thread_fpu_defines()
153 OFFSET(THREAD_FPR17, task_struct, thread.fpu.fpr[17]); output_thread_fpu_defines()
154 OFFSET(THREAD_FPR18, task_struct, thread.fpu.fpr[18]); output_thread_fpu_defines()
155 OFFSET(THREAD_FPR19, task_struct, thread.fpu.fpr[19]); output_thread_fpu_defines()
156 OFFSET(THREAD_FPR20, task_struct, thread.fpu.fpr[20]); output_thread_fpu_defines()
157 OFFSET(THREAD_FPR21, task_struct, thread.fpu.fpr[21]); output_thread_fpu_defines()
158 OFFSET(THREAD_FPR22, task_struct, thread.fpu.fpr[22]); output_thread_fpu_defines()
159 OFFSET(THREAD_FPR23, task_struct, thread.fpu.fpr[23]); output_thread_fpu_defines()
160 OFFSET(THREAD_FPR24, task_struct, thread.fpu.fpr[24]); output_thread_fpu_defines()
161 OFFSET(THREAD_FPR25, task_struct, thread.fpu.fpr[25]); output_thread_fpu_defines()
162 OFFSET(THREAD_FPR26, task_struct, thread.fpu.fpr[26]); output_thread_fpu_defines()
163 OFFSET(THREAD_FPR27, task_struct, thread.fpu.fpr[27]); output_thread_fpu_defines()
164 OFFSET(THREAD_FPR28, task_struct, thread.fpu.fpr[28]); output_thread_fpu_defines()
165 OFFSET(THREAD_FPR29, task_struct, thread.fpu.fpr[29]); output_thread_fpu_defines()
166 OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]); output_thread_fpu_defines()
167 OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]); output_thread_fpu_defines()
169 OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31); output_thread_fpu_defines()
170 OFFSET(THREAD_MSA_CSR, task_struct, thread.fpu.msacsr); output_thread_fpu_defines()
319 OFFSET(THREAD_CP2, task_struct, thread.cp2); output_octeon_cop2_state_defines()
320 OFFSET(THREAD_CVMSEG, task_struct, thread.cvmseg.cvmseg); output_octeon_cop2_state_defines()
/linux-4.1.27/arch/sh/kernel/cpu/sh4/
H A Dfpu.c88 :"0"((char *)(&tsk->thread.xstate->hardfpu.status)), save_fpu()
138 :"0" (tsk->thread.xstate), "r" (FPSCR_RCHG) restore_fpu()
234 if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR)) ieee_fpe_handler()
236 denormal_to_double(&tsk->thread.xstate->hardfpu, ieee_fpe_handler()
252 hx = tsk->thread.xstate->hardfpu.fp_regs[n]; ieee_fpe_handler()
253 hy = tsk->thread.xstate->hardfpu.fp_regs[m]; ieee_fpe_handler()
254 fpscr = tsk->thread.xstate->hardfpu.fpscr; ieee_fpe_handler()
264 | tsk->thread.xstate->hardfpu.fp_regs[n + 1]; ieee_fpe_handler()
266 | tsk->thread.xstate->hardfpu.fp_regs[m + 1]; ieee_fpe_handler()
268 tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32; ieee_fpe_handler()
269 tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff; ieee_fpe_handler()
275 tsk->thread.xstate->hardfpu.fp_regs[n] = hx; ieee_fpe_handler()
290 hx = tsk->thread.xstate->hardfpu.fp_regs[n]; ieee_fpe_handler()
291 hy = tsk->thread.xstate->hardfpu.fp_regs[m]; ieee_fpe_handler()
292 fpscr = tsk->thread.xstate->hardfpu.fpscr; ieee_fpe_handler()
302 | tsk->thread.xstate->hardfpu.fp_regs[n + 1]; ieee_fpe_handler()
304 | tsk->thread.xstate->hardfpu.fp_regs[m + 1]; ieee_fpe_handler()
309 tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32; ieee_fpe_handler()
310 tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff; ieee_fpe_handler()
319 tsk->thread.xstate->hardfpu.fp_regs[n] = hx; ieee_fpe_handler()
334 hx = tsk->thread.xstate->hardfpu.fp_regs[n]; ieee_fpe_handler()
335 hy = tsk->thread.xstate->hardfpu.fp_regs[m]; ieee_fpe_handler()
336 fpscr = tsk->thread.xstate->hardfpu.fpscr; ieee_fpe_handler()
346 | tsk->thread.xstate->hardfpu.fp_regs[n + 1]; ieee_fpe_handler()
348 | tsk->thread.xstate->hardfpu.fp_regs[m + 1]; ieee_fpe_handler()
352 tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32; ieee_fpe_handler()
353 tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff; ieee_fpe_handler()
359 tsk->thread.xstate->hardfpu.fp_regs[n] = hx; ieee_fpe_handler()
372 hx = tsk->thread.xstate->hardfpu.fp_regs[m]; ieee_fpe_handler()
374 if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR) ieee_fpe_handler()
379 llx = ((long long)tsk->thread.xstate->hardfpu.fp_regs[m] << 32) ieee_fpe_handler()
380 | tsk->thread.xstate->hardfpu.fp_regs[m + 1]; ieee_fpe_handler()
382 tsk->thread.xstate->hardfpu.fpul = float64_to_float32(llx); ieee_fpe_handler()
401 int roundingMode = FPSCR_ROUNDING_MODE(tsk->thread.xstate->hardfpu.fpscr); float_rounding_mode()
413 tsk->thread.xstate->hardfpu.fpscr &= BUILD_TRAP_HANDLER()
415 tsk->thread.xstate->hardfpu.fpscr |= fpu_exception_flags; BUILD_TRAP_HANDLER()
418 tsk->thread.xstate->hardfpu.fpscr |= (fpu_exception_flags >> 10); BUILD_TRAP_HANDLER()
422 if ((((tsk->thread.xstate->hardfpu.fpscr & FPSCR_ENABLE_MASK) >> 7) & BUILD_TRAP_HANDLER()
/linux-4.1.27/arch/frv/kernel/
H A Dprocess.c140 p->thread.frame = childregs; copy_thread()
141 p->thread.curr = p; copy_thread()
142 p->thread.sp = (unsigned long) childregs; copy_thread()
143 p->thread.fp = 0; copy_thread()
144 p->thread.lr = 0; copy_thread()
145 p->thread.frame0 = childregs; copy_thread()
150 p->thread.pc = (unsigned long) ret_from_kernel_thread; copy_thread()
151 save_user_regs(p->thread.user); copy_thread()
158 p->thread.pc = (unsigned long) ret_from_fork; copy_thread()
164 save_user_regs(p->thread.user); copy_thread()
179 fp = p->thread.fp; get_wchan()
180 regs0 = p->thread.frame0; get_wchan()
200 /* Check whether the thread is blocked in resume() */ thread_saved_pc()
201 if (in_sched_functions(tsk->thread.pc)) thread_saved_pc()
202 return ((unsigned long *)tsk->thread.fp)[2]; thread_saved_pc()
204 return tsk->thread.pc; thread_saved_pc()
278 &current->thread.user->f, dump_fpu()
279 sizeof(current->thread.user->f)); dump_fpu()
H A Dptrace.c45 const struct user_int_regs *iregs = &target->thread.user->i; genregs_get()
65 struct user_int_regs *iregs = &target->thread.user->i; genregs_set()
110 const struct user_fpmedia_regs *fpregs = &target->thread.user->f; fpmregs_get()
130 struct user_fpmedia_regs *fpregs = &target->thread.user->f; fpmregs_set()
205 struct user_context *user = task->thread.user; get_reg()
219 struct user_context *user = task->thread.user; put_reg()
243 child->thread.frame0->__status |= REG__STATUS_STEP; user_enable_single_step()
248 child->thread.frame0->__status &= ~REG__STATUS_STEP; user_disable_single_step()
323 0, sizeof(child->thread.user->i), arch_ptrace()
329 0, sizeof(child->thread.user->i), arch_ptrace()
335 0, sizeof(child->thread.user->f), arch_ptrace()
341 0, sizeof(child->thread.user->f), arch_ptrace()
/linux-4.1.27/arch/powerpc/include/asm/
H A Dswitch_to.h90 t->thread.ebbrr = 0; clear_task_ebb()
91 t->thread.ebbhr = 0; clear_task_ebb()
92 t->thread.bescr = 0; clear_task_ebb()
93 t->thread.mmcr2 = 0; clear_task_ebb()
94 t->thread.mmcr0 = 0; clear_task_ebb()
95 t->thread.siar = 0; clear_task_ebb()
96 t->thread.sdar = 0; clear_task_ebb()
97 t->thread.sier = 0; clear_task_ebb()
98 t->thread.used_ebb = 0; clear_task_ebb()
/linux-4.1.27/arch/ia64/include/asm/
H A Dswitch_to.h19 * Context switch from one thread to another. If the two threads have
25 * newly created thread returns directly to
41 ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \
55 * In the SMP case, we save the fph state when context-switching away from a thread that
56 * modified fph. This way, when the thread gets scheduled on another CPU, the CPU can
57 * pick up the state from task->thread.fph, avoiding the complication of having to fetch
63 (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \
64 __ia64_save_fpu((prev)->thread.fph); \
68 if (unlikely((current->thread.flags & IA64_THREAD_MIGRATION) && \
H A Dcurrent.h12 * In kernel mode, thread pointer (r13) is used to point to the current task
/linux-4.1.27/arch/sh/include/asm/
H A Dswitch_to_64.h27 struct pt_regs *regs = next->thread.uregs; \
30 last = sh64_switch_to(prev, &prev->thread, next, \
31 &next->thread); \
H A Dswitch_to_32.h7 (!!(tsk->thread.dsp_status.status & SR_DSP))
12 (u32 *)&tsk->thread.dsp_status; \
35 (u32 *)&tsk->thread.dsp_status + 14; \
82 __ts1 = (u32 *)&prev->thread.sp; \
83 __ts2 = (u32 *)&prev->thread.pc; \
86 __ts6 = (u32 *)&next->thread.sp; \
87 __ts7 = next->thread.pc; \
/linux-4.1.27/include/uapi/linux/nfsd/
H A Dstats.h14 /* thread usage wraps very million seconds (approx one fortnight) */
/linux-4.1.27/arch/cris/include/arch-v10/arch/
H A Dthread_info.h4 /* how to get the thread information struct from C */ current_thread_info()
/linux-4.1.27/mm/
H A Dmmu_context.c15 * Makes the calling kernel thread take on the specified
18 * from a kernel thread context)
47 * by the calling kernel thread
49 * from a kernel thread context)
/linux-4.1.27/arch/nios2/include/asm/
H A Dprocessor.h24 #define NIOS2_FLAG_KTHREAD 0x00000001 /* task is a kernel thread */
50 /* The Nios processor specific thread struct. */
73 /* Free all resources held by a thread. */ release_thread()
78 /* Free current thread data structures etc.. */ exit_thread()
83 /* Return saved PC of a blocked thread. */
84 #define thread_saved_pc(tsk) ((tsk)->thread.kregs->ea)
92 #define KSTK_EIP(tsk) ((tsk)->thread.kregs->ea)
93 #define KSTK_ESP(tsk) ((tsk)->thread.kregs->sp)
H A Dthread_info.h2 * NiosII low-level thread information
45 mm_segment_t addr_limit; /* thread address space:
47 0-0xFFFFFFFF for kernel-thread
53 * macros/functions for gaining access to the thread information structure
69 /* how to get the thread information struct from C */ current_thread_info()
79 * thread information flags
/linux-4.1.27/arch/sh/kernel/cpu/
H A Dfpu.c18 if (!tsk->thread.xstate) { init_fpu()
19 tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep, init_fpu()
21 if (!tsk->thread.xstate) init_fpu()
26 struct sh_fpu_hard_struct *fp = &tsk->thread.xstate->hardfpu; init_fpu()
30 struct sh_fpu_soft_struct *fp = &tsk->thread.xstate->softfpu; init_fpu()
47 tsk->thread.fpu_counter++; __fpu_state_restore()
/linux-4.1.27/drivers/acpi/acpica/
H A Dexmutex.c55 struct acpi_thread_state *thread);
71 struct acpi_thread_state *thread = obj_desc->mutex.owner_thread; acpi_ex_unlink_mutex() local
73 if (!thread) { acpi_ex_unlink_mutex()
95 thread->acquired_mutex_list = obj_desc->mutex.next; acpi_ex_unlink_mutex()
104 * thread - Current executing thread object
114 struct acpi_thread_state *thread) acpi_ex_link_mutex()
118 list_head = thread->acquired_mutex_list; acpi_ex_link_mutex()
133 thread->acquired_mutex_list = obj_desc; acpi_ex_link_mutex()
142 * thread_id - Current thread state
147 * path that supports multiple acquires by the same thread.
172 /* Support for multiple acquires by the owning thread */ acpi_ex_acquire_mutex_object()
176 * The mutex is already owned by this thread, just increment the acpi_ex_acquire_mutex_object()
236 /* Must have a valid thread state struct */ acpi_ex_acquire_mutex()
238 if (!walk_state->thread) { acpi_ex_acquire_mutex()
240 "Cannot acquire Mutex [%4.4s], null thread info", acpi_ex_acquire_mutex()
249 if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) { acpi_ex_acquire_mutex()
253 walk_state->thread->current_sync_level)); acpi_ex_acquire_mutex()
259 walk_state->thread->thread_id); acpi_ex_acquire_mutex()
264 obj_desc->mutex.owner_thread = walk_state->thread; acpi_ex_acquire_mutex()
266 walk_state->thread->current_sync_level; acpi_ex_acquire_mutex()
267 walk_state->thread->current_sync_level = acpi_ex_acquire_mutex()
270 /* Link the mutex to the current thread for force-unlock at method exit */ acpi_ex_acquire_mutex()
272 acpi_ex_link_mutex(obj_desc, walk_state->thread); acpi_ex_acquire_mutex()
288 * previous multiple acquires) by the same thread.
380 /* Must have a valid thread ID */ acpi_ex_release_mutex()
382 if (!walk_state->thread) { acpi_ex_release_mutex()
384 "Cannot release Mutex [%4.4s], null thread info", acpi_ex_release_mutex()
390 * The Mutex is owned, but this thread must be the owner. acpi_ex_release_mutex()
391 * Special case for Global Lock, any thread can release acpi_ex_release_mutex()
393 if ((owner_thread->thread_id != walk_state->thread->thread_id) && acpi_ex_release_mutex()
396 "Thread %u cannot release Mutex [%4.4s] acquired by thread %u", acpi_ex_release_mutex()
397 (u32)walk_state->thread->thread_id, acpi_ex_release_mutex()
415 walk_state->thread->current_sync_level)); acpi_ex_release_mutex()
446 * PARAMETERS: thread - Current executing thread object
450 * DESCRIPTION: Release all mutexes held by this thread
452 * NOTE: This function is called as the thread is exiting the interpreter.
454 * only when the parent thread actually exits the interpreter. This allows one
460 void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread) acpi_ex_release_all_mutexes() argument
462 union acpi_operand_object *next = thread->acquired_mutex_list; acpi_ex_release_all_mutexes()
499 thread->current_sync_level = acpi_ex_release_all_mutexes()
113 acpi_ex_link_mutex(union acpi_operand_object *obj_desc, struct acpi_thread_state *thread) acpi_ex_link_mutex() argument
H A Ddswstate.c437 * PARAMETERS: thread - Get current active state for this Thread
447 *thread) acpi_ds_get_current_walk_state()
451 if (!thread) { acpi_ds_get_current_walk_state()
456 thread->walk_state_list)); acpi_ds_get_current_walk_state()
458 return (thread->walk_state_list); acpi_ds_get_current_walk_state()
466 * thread - Thread state object
476 struct acpi_thread_state *thread) acpi_ds_push_walk_state()
480 walk_state->next = thread->walk_state_list; acpi_ds_push_walk_state()
481 thread->walk_state_list = walk_state; acpi_ds_push_walk_state()
490 * PARAMETERS: thread - Current thread state
492 * RETURN: A walk_state object popped from the thread's stack
500 struct acpi_walk_state *acpi_ds_pop_walk_state(struct acpi_thread_state *thread) acpi_ds_pop_walk_state() argument
506 walk_state = thread->walk_state_list; acpi_ds_pop_walk_state()
512 thread->walk_state_list = walk_state->next; acpi_ds_pop_walk_state()
531 * thread - Current thread state
546 *thread) acpi_ds_create_walk_state()
561 walk_state->thread = thread; acpi_ds_create_walk_state()
573 if (thread) { acpi_ds_create_walk_state()
574 acpi_ds_push_walk_state(walk_state, thread); acpi_ds_create_walk_state()
446 acpi_ds_get_current_walk_state(struct acpi_thread_state *thread) acpi_ds_get_current_walk_state() argument
475 acpi_ds_push_walk_state(struct acpi_walk_state *walk_state, struct acpi_thread_state *thread) acpi_ds_push_walk_state() argument
540 acpi_ds_create_walk_state(acpi_owner_id owner_id, union acpi_parse_object *origin, union acpi_operand_object *method_desc, struct acpi_thread_state *thread) acpi_ds_create_walk_state() argument
H A Ddsmethod.c314 * increments the thread count, and waits at the method semaphore
332 /* Prevent wraparound of thread count */ acpi_ds_begin_method_execution()
357 * The current_sync_level (per-thread) must be less than or equal to acpi_ds_begin_method_execution()
370 && (walk_state->thread->current_sync_level > acpi_ds_begin_method_execution()
375 walk_state->thread->current_sync_level)); acpi_ds_begin_method_execution()
386 (walk_state->thread->thread_id != acpi_ds_begin_method_execution()
405 walk_state->thread->current_sync_level; acpi_ds_begin_method_execution()
408 walk_state->thread->thread_id; acpi_ds_begin_method_execution()
409 walk_state->thread->current_sync_level = acpi_ds_begin_method_execution()
427 * Allocate an Owner ID for this method, only if this is the first thread acpi_ds_begin_method_execution()
439 * Increment the method parse tree thread count since it has been acpi_ds_begin_method_execution()
440 * reentered one more time (even if it is the same thread) acpi_ds_begin_method_execution()
459 * PARAMETERS: thread - Info for this thread
470 acpi_ds_call_control_method(struct acpi_thread_state *thread, acpi_ds_call_control_method() argument
511 NULL, obj_desc, thread); acpi_ds_call_control_method()
713 * current sync level for this thread acpi_ds_terminate_control_method()
721 walk_state->thread->current_sync_level = acpi_ds_terminate_control_method()
737 * will wait until the last thread has completed. acpi_ds_terminate_control_method()
765 /* Decrement the thread count on the method */ acpi_ds_terminate_control_method()
770 ACPI_ERROR((AE_INFO, "Invalid zero thread count in method")); acpi_ds_terminate_control_method()
778 * we immediately reuse it for the next thread executing this method acpi_ds_terminate_control_method()
781 "*** Completed execution of one thread, %u threads remaining\n", acpi_ds_terminate_control_method()
784 /* This is the only executing thread for this method */ acpi_ds_terminate_control_method()
789 * does not support multiple thread execution. The best example of this acpi_ds_terminate_control_method()
791 * thread will fail with an AE_ALREADY_EXISTS exception. acpi_ds_terminate_control_method()
793 * This code is here because we must wait until the last thread exits acpi_ds_terminate_control_method()
811 * a named object and then blocked, causing the second thread acpi_ds_terminate_control_method()
814 * thread exits here. acpi_ds_terminate_control_method()
H A Dpsparse.c438 struct acpi_thread_state *thread; acpi_ps_parse_aml() local
453 /* Create and initialize a new thread state */ acpi_ps_parse_aml()
455 thread = acpi_ut_create_thread_state(); acpi_ps_parse_aml()
456 if (!thread) { acpi_ps_parse_aml()
470 walk_state->thread = thread; acpi_ps_parse_aml()
477 walk_state->thread->current_sync_level = acpi_ps_parse_aml()
481 acpi_ds_push_walk_state(walk_state, thread); acpi_ps_parse_aml()
487 acpi_gbl_current_walk_list = thread; acpi_ps_parse_aml()
515 acpi_ds_call_control_method(thread, walk_state, acpi_ps_parse_aml()
526 walk_state = acpi_ds_get_current_walk_state(thread); acpi_ps_parse_aml()
538 /* Check for possible multi-thread reentrancy problem */ acpi_ps_parse_aml()
547 * then mark "serialized" when the last thread exits. acpi_ps_parse_aml()
556 walk_state = acpi_ds_pop_walk_state(thread); acpi_ps_parse_aml()
586 walk_state = acpi_ds_get_current_walk_state(thread); acpi_ps_parse_aml()
683 acpi_ex_release_all_mutexes(thread); acpi_ps_parse_aml()
685 (union acpi_generic_state, thread)); acpi_ps_parse_aml()
/linux-4.1.27/arch/mn10300/kernel/
H A Dfpu.c52 info.si_addr = (void *) tsk->thread.uregs->pc; fpu_exception()
57 fpcr = tsk->thread.fpu_state.fpcr; fpu_exception()
88 if (tsk->thread.fpu_flags & THREAD_HAS_FPU) { fpu_setup_sigcontext()
89 fpu_save(&tsk->thread.fpu_state); fpu_setup_sigcontext()
90 tsk->thread.uregs->epsw &= ~EPSW_FE; fpu_setup_sigcontext()
91 tsk->thread.fpu_flags &= ~THREAD_HAS_FPU; fpu_setup_sigcontext()
95 fpu_save(&tsk->thread.fpu_state); fpu_setup_sigcontext()
96 fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE; fpu_setup_sigcontext()
108 &tsk->thread.fpu_state, fpu_setup_sigcontext()
125 if (tsk->thread.fpu_flags & THREAD_HAS_FPU) { fpu_kill_state()
126 tsk->thread.uregs->epsw &= ~EPSW_FE; fpu_kill_state()
127 tsk->thread.fpu_flags &= ~THREAD_HAS_FPU; fpu_kill_state()
131 fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE; fpu_kill_state()
151 ret = copy_from_user(&tsk->thread.fpu_state, fpucontext, fpu_restore_sigcontext()
171 memcpy(fpreg, &tsk->thread.fpu_state, sizeof(*fpreg)); dump_fpu()
H A Dprocess.c40 * return saved PC of a blocked thread.
44 return ((unsigned long *) tsk->thread.sp)[3]; thread_saved_pc()
104 * free current thread data structures etc..
130 * current task into the new thread.
140 * set up the kernel stack for a new thread and copy arch-specific thread
159 p->thread.uregs = c_regs; copy_thread()
161 p->thread.a3 = (unsigned long) c_regs; copy_thread()
162 p->thread.sp = c_ksp; copy_thread()
163 p->thread.wchan = p->thread.pc; copy_thread()
164 p->thread.usp = c_usp; copy_thread()
172 p->thread.pc = (unsigned long) ret_from_kernel_thread; copy_thread()
184 p->thread.pc = (unsigned long) ret_from_fork; copy_thread()
191 return p->thread.wchan; get_wchan()
/linux-4.1.27/arch/m68k/kernel/
H A Dprocess.c42 * Return saved PC from a blocked thread
46 struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp; thread_saved_pc()
47 /* Check whether the thread is blocked in resume() */ thread_saved_pc()
105 current->thread.fs = __USER_DS; flush_thread()
142 p->thread.ksp = (unsigned long)frame; copy_thread()
143 p->thread.esp0 = (unsigned long)&frame->regs; copy_thread()
149 p->thread.fs = get_fs().seg; copy_thread()
152 /* kernel thread */ copy_thread()
158 p->thread.usp = 0; copy_thread()
165 p->thread.usp = usp ?: rdusp(); copy_thread()
173 asm volatile ("fsave %0" : : "m" (p->thread.fpstate[0]) : "memory"); copy_thread()
175 if (!CPU_IS_060 ? p->thread.fpstate[0] : p->thread.fpstate[2]) { copy_thread()
182 : "m" (p->thread.fp[0]), copy_thread()
183 "m" (p->thread.fpcntl[0]), copy_thread()
184 "m" (p->thread.fpcntl[1]), copy_thread()
185 "m" (p->thread.fpcntl[2]) copy_thread()
191 : "m" (p->thread.fp[0]), copy_thread()
192 "m" (p->thread.fpcntl[0]) copy_thread()
198 asm volatile ("frestore %0" : : "m" (p->thread.fpstate[0])); copy_thread()
214 memcpy(fpu->fpcntl, current->thread.fpcntl, 12); dump_fpu()
215 memcpy(fpu->fpregs, current->thread.fp, 96); dump_fpu()
266 fp = ((struct switch_stack *)p->thread.ksp)->a6; get_wchan()
H A Dptrace.c42 /* Find the stack offset for a register, relative to thread.esp0. */
79 addr = &task->thread.usp; get_reg()
81 addr = (unsigned long *)(task->thread.esp0 + regoff[regno]); get_reg()
86 long stkadj = *(long *)(task->thread.esp0 + PT_REG(stkadj)); get_reg()
104 addr = &task->thread.usp; put_reg()
106 addr = (unsigned long *)(task->thread.esp0 + regoff[regno]); put_reg()
111 long stkadj = *(long *)(task->thread.esp0 + PT_REG(stkadj)); put_reg()
178 tmp = child->thread.fp[regno - 21]; arch_ptrace()
219 child->thread.fp[regno - 21] = data; arch_ptrace()
249 if (copy_to_user(datap, &child->thread.fp, arch_ptrace()
255 if (copy_from_user(&child->thread.fp, datap, arch_ptrace()
/linux-4.1.27/arch/arm/vfp/
H A Dvfpmodule.c51 * The pointer to the vfpstate structure of the thread which currently
55 * For UP, this is sufficient to tell which thread owns the VFP context.
62 * Is 'thread's most up to date state stored in this CPUs hardware?
65 static bool vfp_state_in_hw(unsigned int cpu, struct thread_info *thread) vfp_state_in_hw() argument
68 if (thread->vfpstate.hard.cpu != cpu) vfp_state_in_hw()
71 return vfp_current_hw_state[cpu] == &thread->vfpstate; vfp_state_in_hw()
75 * Force a reload of the VFP context from the thread structure. We do
79 static void vfp_force_reload(unsigned int cpu, struct thread_info *thread) vfp_force_reload() argument
81 if (vfp_state_in_hw(cpu, thread)) { vfp_force_reload()
86 thread->vfpstate.hard.cpu = NR_CPUS; vfp_force_reload()
91 * Per-thread VFP initialization.
93 static void vfp_thread_flush(struct thread_info *thread) vfp_thread_flush() argument
95 union vfp_state *vfp = &thread->vfpstate; vfp_thread_flush()
121 static void vfp_thread_exit(struct thread_info *thread) vfp_thread_exit() argument
123 /* release case: Per-thread VFP cleanup. */ vfp_thread_exit()
124 union vfp_state *vfp = &thread->vfpstate; vfp_thread_exit()
132 static void vfp_thread_copy(struct thread_info *thread) vfp_thread_copy() argument
137 thread->vfpstate = parent->vfpstate; vfp_thread_copy()
139 thread->vfpstate.hard.cpu = NR_CPUS; vfp_thread_copy()
147 * - the previously running thread will not be scheduled onto another CPU.
148 * - the next thread to be run (v) will not be running on another CPU.
149 * - thread->cpu is the local CPU number
150 * - not preemptible as we're called in the middle of a thread switch
152 * - the thread (v) will be running on the local CPU, so
154 * - thread->cpu is the local CPU number at the time it is accessed,
157 * it is unsafe to use thread->cpu.
159 * - the thread (v) will be running on the local CPU, so
161 * - thread->cpu is the local CPU number at the time it is accessed,
164 * it is unsafe to use thread->cpu.
168 struct thread_info *thread = v; vfp_notifier() local
179 cpu = thread->cpu; vfp_notifier()
183 * case the thread migrates to a different CPU. The vfp_notifier()
198 vfp_thread_flush(thread); vfp_notifier()
202 vfp_thread_exit(thread); vfp_notifier()
206 vfp_thread_copy(thread); vfp_notifier()
235 current->thread.error_code = 0; vfp_raise_sigfpe()
236 current->thread.trap_no = 6; vfp_raise_sigfpe()
513 * Ensure that the VFP state stored in 'thread->vfpstate' is up to date vfp_pm_init()
516 void vfp_sync_hwstate(struct thread_info *thread) vfp_sync_hwstate() argument
520 if (vfp_state_in_hw(cpu, thread)) { vfp_sync_hwstate()
527 vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN); vfp_sync_hwstate()
534 /* Ensure that the thread reloads the hardware VFP state on the next use. */ vfp_flush_hwstate()
535 void vfp_flush_hwstate(struct thread_info *thread) vfp_flush_hwstate() argument
539 vfp_force_reload(cpu, thread); vfp_flush_hwstate()
551 struct thread_info *thread = current_thread_info(); vfp_preserve_user_clear_hwstate() local
552 struct vfp_hard_struct *hwstate = &thread->vfpstate.hard; vfp_preserve_user_clear_hwstate()
556 vfp_sync_hwstate(thread); vfp_preserve_user_clear_hwstate()
580 vfp_flush_hwstate(thread); vfp_preserve_user_clear_hwstate()
594 struct thread_info *thread = current_thread_info(); vfp_restore_user_hwstate() local
595 struct vfp_hard_struct *hwstate = &thread->vfpstate.hard; vfp_restore_user_hwstate()
599 /* Disable VFP to avoid corrupting the new thread state. */ vfp_restore_user_hwstate()
600 vfp_flush_hwstate(thread); vfp_restore_user_hwstate()
634 * hardware state at every thread switch. We clear our held state when
679 struct thread_info *thread = current_thread_info(); kernel_neon_begin() local
698 if (vfp_state_in_hw(cpu, thread)) kernel_neon_begin()
699 vfp_save_state(&thread->vfpstate, fpexc); kernel_neon_begin()
/linux-4.1.27/arch/m68k/mm/
H A Dfault.c25 siginfo.si_signo = current->thread.signo; send_fault_sig()
26 siginfo.si_code = current->thread.code; send_fault_sig()
27 siginfo.si_addr = (void *)current->thread.faddr; send_fault_sig()
196 current->thread.signo = SIGBUS; do_page_fault()
197 current->thread.faddr = address; do_page_fault()
201 current->thread.signo = SIGBUS; do_page_fault()
202 current->thread.code = BUS_ADRERR; do_page_fault()
203 current->thread.faddr = address; do_page_fault()
207 current->thread.signo = SIGSEGV; do_page_fault()
208 current->thread.code = SEGV_MAPERR; do_page_fault()
209 current->thread.faddr = address; do_page_fault()
213 current->thread.signo = SIGSEGV; do_page_fault()
214 current->thread.code = SEGV_ACCERR; do_page_fault()
215 current->thread.faddr = address; do_page_fault()
/linux-4.1.27/arch/score/include/asm/
H A Dthread_info.h13 /* thread information allocation */
32 unsigned long tp_value; /* thread pointer */
39 * thread address space:
41 * 0-0xFFFFFFFF for kernel-thread
48 * macros/functions for gaining access to the thread information structure
63 /* How to get the thread information struct from C. */
70 * thread information flags
/linux-4.1.27/kernel/irq/
H A Dhandle.c52 "but no thread function available.", irq, action->name); warn_no_thread()
58 * In case the thread crashed and was killed we just pretend that __irq_wake_thread()
62 if (action->thread->flags & PF_EXITING) __irq_wake_thread()
66 * Wake up the handler thread for this action. If the __irq_wake_thread()
75 * irq thread. __irq_wake_thread()
97 * irq thread: __irq_wake_thread()
111 * So either the thread waits for us to clear IRQS_INPROGRESS __irq_wake_thread()
113 * released before we reach this point. The thread also checks __irq_wake_thread()
115 * threads_oneshot untouched and runs the thread another time. __irq_wake_thread()
121 * the irq thread. The irq thread decrements the counter when __irq_wake_thread()
130 wake_up_process(action->thread); __irq_wake_thread()
154 * did not set up a thread function handle_irq_event_percpu()
/linux-4.1.27/arch/c6x/kernel/
H A Dprocess.c90 * Do necessary setup to start up a newly executed thread.
107 current->thread.usp = usp; start_thread()
111 * Copy a new thread context in its stack.
125 p->thread.pc = (unsigned long) ret_from_kernel_thread; copy_thread()
133 p->thread.pc = (unsigned long) ret_from_fork; copy_thread()
137 p->thread.usp = childregs->sp; copy_thread()
139 p->thread.wchan = p->thread.pc; copy_thread()
156 return p->thread.wchan; get_wchan()
/linux-4.1.27/arch/m32r/include/asm/
H A Dthread_info.h4 /* thread_info.h: m32r low-level thread information
28 unsigned long status; /* thread-synchronous flags */
32 mm_segment_t addr_limit; /* thread address space:
33 0-0xBFFFFFFF for user-thread
34 0-0xFFFFFFFF for kernel-thread
45 * macros/functions for gaining access to the thread information structure
61 /* how to get the thread information struct from C */ current_thread_info()
93 * thread information flags
121 * ever touches our thread-synchronous status, so we don't
H A Dswitch_to.h45 "r" (&(prev->thread.sp)), "r" (&(next->thread.sp)), \
46 "r" (&(prev->thread.lr)), "r" (&(next->thread.lr)) \
/linux-4.1.27/arch/m68k/include/asm/
H A Dthread_info.h9 * On machines with 4k pages we default to an 8k thread size, though we
11 * the thread size must match the page size (which is 8k and larger here).
29 mm_segment_t addr_limit; /* thread address space */
32 unsigned long tp_value; /* thread pointer */
46 /* how to get the thread information struct from C */ current_thread_info()
H A Dprocessor.h105 * true on thread creation). We need to set this explicitly.
115 * Do necessary setup to start up a newly executed thread.
151 /* Free all resources held by a thread. */ release_thread()
157 * Free current thread data structures etc..
170 if ((tsk)->thread.esp0 > PAGE_SIZE && \
171 (virt_addr_valid((tsk)->thread.esp0))) \
172 eip = ((struct pt_regs *) (tsk)->thread.esp0)->pc; \
174 #define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
176 #define task_pt_regs(tsk) ((struct pt_regs *) ((tsk)->thread.esp0))
/linux-4.1.27/arch/arm/mach-ep93xx/
H A Dcrunch.c26 void crunch_task_release(struct thread_info *thread) crunch_task_release() argument
29 if (crunch_owner == &thread->crunchstate) crunch_task_release()
41 struct thread_info *thread = (struct thread_info *)t; crunch_do() local
45 crunch_state = &thread->crunchstate; crunch_do()
57 crunch_task_release(thread); crunch_do()
/linux-4.1.27/drivers/android/
H A Dbinder.c1214 pr_err("reply failed, target thread, %d:%d, has error code %d already\n", binder_send_failed_reply()
1230 "reply failed, no target thread at root\n"); binder_send_failed_reply()
1235 "reply failed, no target thread -- retry %d\n", binder_send_failed_reply()
1318 struct binder_thread *thread, binder_transaction()
1336 e->from_thread = thread->pid; binder_transaction()
1342 in_reply_to = thread->transaction_stack; binder_transaction()
1345 proc->pid, thread->pid); binder_transaction()
1350 if (in_reply_to->to_thread != thread) { binder_transaction()
1352 proc->pid, thread->pid, in_reply_to->debug_id, binder_transaction()
1361 thread->transaction_stack = in_reply_to->to_parent; binder_transaction()
1369 proc->pid, thread->pid, binder_transaction()
1386 proc->pid, thread->pid); binder_transaction()
1409 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { binder_transaction()
1412 tmp = thread->transaction_stack; binder_transaction()
1413 if (tmp->to_thread != thread) { binder_transaction()
1415 proc->pid, thread->pid, tmp->debug_id, binder_transaction()
1460 proc->pid, thread->pid, t->debug_id, binder_transaction()
1468 proc->pid, thread->pid, t->debug_id, binder_transaction()
1475 t->from = thread; binder_transaction()
1507 proc->pid, thread->pid); binder_transaction()
1514 proc->pid, thread->pid); binder_transaction()
1520 proc->pid, thread->pid, (u64)tr->offsets_size); binder_transaction()
1532 proc->pid, thread->pid, (u64)*offp); binder_transaction()
1554 proc->pid, thread->pid, binder_transaction()
1576 &thread->todo); binder_transaction()
1591 thread->pid, fp->handle); binder_transaction()
1639 proc->pid, thread->pid, fp->handle); binder_transaction()
1645 proc->pid, thread->pid, fp->handle); binder_transaction()
1653 proc->pid, thread->pid, fp->handle); binder_transaction()
1680 proc->pid, thread->pid, fp->type); binder_transaction()
1691 t->from_parent = thread->transaction_stack; binder_transaction()
1692 thread->transaction_stack = t; binder_transaction()
1705 list_add_tail(&tcomplete->entry, &thread->todo); binder_transaction()
1737 proc->pid, thread->pid, return_error, binder_transaction()
1747 BUG_ON(thread->return_error != BR_OK); binder_transaction()
1749 thread->return_error = BR_TRANSACTION_COMPLETE; binder_transaction()
1752 thread->return_error = return_error; binder_transaction()
1756 struct binder_thread *thread, binder_thread_write()
1765 while (ptr < end && thread->return_error == BR_OK) { binder_thread_write()
1773 thread->stats.bc[_IOC_NR(cmd)]++; binder_thread_write()
1793 proc->pid, thread->pid, binder_thread_write()
1800 proc->pid, thread->pid, target); binder_thread_write()
1824 proc->pid, thread->pid, debug_string, ref->debug_id, binder_thread_write()
1843 proc->pid, thread->pid, binder_thread_write()
1852 proc->pid, thread->pid, binder_thread_write()
1862 proc->pid, thread->pid, binder_thread_write()
1870 proc->pid, thread->pid, binder_thread_write()
1879 proc->pid, thread->pid, binder_thread_write()
1902 proc->pid, thread->pid, (u64)data_ptr); binder_thread_write()
1907 proc->pid, thread->pid, (u64)data_ptr); binder_thread_write()
1912 proc->pid, thread->pid, (u64)data_ptr, binder_thread_write()
1925 list_move_tail(buffer->target_node->async_todo.next, &thread->todo); binder_thread_write()
1940 binder_transaction(proc, thread, &tr, cmd == BC_REPLY); binder_thread_write()
1947 proc->pid, thread->pid); binder_thread_write()
1948 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { binder_thread_write()
1949 thread->looper |= BINDER_LOOPER_STATE_INVALID; binder_thread_write()
1951 proc->pid, thread->pid); binder_thread_write()
1953 thread->looper |= BINDER_LOOPER_STATE_INVALID; binder_thread_write()
1955 proc->pid, thread->pid); binder_thread_write()
1960 thread->looper |= BINDER_LOOPER_STATE_REGISTERED; binder_thread_write()
1965 proc->pid, thread->pid); binder_thread_write()
1966 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { binder_thread_write()
1967 thread->looper |= BINDER_LOOPER_STATE_INVALID; binder_thread_write()
1969 proc->pid, thread->pid); binder_thread_write()
1971 thread->looper |= BINDER_LOOPER_STATE_ENTERED; binder_thread_write()
1976 proc->pid, thread->pid); binder_thread_write()
1977 thread->looper |= BINDER_LOOPER_STATE_EXITED; binder_thread_write()
1996 proc->pid, thread->pid, binder_thread_write()
2006 proc->pid, thread->pid, binder_thread_write()
2016 proc->pid, thread->pid); binder_thread_write()
2021 thread->return_error = BR_ERROR; binder_thread_write()
2024 proc->pid, thread->pid); binder_thread_write()
2033 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { binder_thread_write()
2034 list_add_tail(&ref->death->work.entry, &thread->todo); binder_thread_write()
2043 proc->pid, thread->pid); binder_thread_write()
2049 proc->pid, thread->pid, binder_thread_write()
2057 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { binder_thread_write()
2058 list_add_tail(&death->work.entry, &thread->todo); binder_thread_write()
2088 proc->pid, thread->pid, (u64)cookie, binder_thread_write()
2092 proc->pid, thread->pid, (u64)cookie); binder_thread_write()
2099 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { binder_thread_write()
2100 list_add_tail(&death->work.entry, &thread->todo); binder_thread_write()
2110 proc->pid, thread->pid, cmd); binder_thread_write()
2119 struct binder_thread *thread, uint32_t cmd) binder_stat_br()
2125 thread->stats.br[_IOC_NR(cmd)]++; binder_stat_br()
2130 struct binder_thread *thread) binder_has_proc_work()
2133 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); binder_has_proc_work()
2136 static int binder_has_thread_work(struct binder_thread *thread) binder_has_thread_work() argument
2138 return !list_empty(&thread->todo) || thread->return_error != BR_OK || binder_has_thread_work()
2139 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); binder_has_thread_work()
2143 struct binder_thread *thread, binder_thread_read()
2161 wait_for_proc_work = thread->transaction_stack == NULL && binder_thread_read()
2162 list_empty(&thread->todo); binder_thread_read()
2164 if (thread->return_error != BR_OK && ptr < end) { binder_thread_read()
2165 if (thread->return_error2 != BR_OK) { binder_thread_read()
2166 if (put_user(thread->return_error2, (uint32_t __user *)ptr)) binder_thread_read()
2169 binder_stat_br(proc, thread, thread->return_error2); binder_thread_read()
2172 thread->return_error2 = BR_OK; binder_thread_read()
2174 if (put_user(thread->return_error, (uint32_t __user *)ptr)) binder_thread_read()
2177 binder_stat_br(proc, thread, thread->return_error); binder_thread_read()
2178 thread->return_error = BR_OK; binder_thread_read()
2183 thread->looper |= BINDER_LOOPER_STATE_WAITING; binder_thread_read()
2190 !!thread->transaction_stack, binder_thread_read()
2191 !list_empty(&thread->todo)); binder_thread_read()
2193 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | binder_thread_read()
2196 proc->pid, thread->pid, thread->looper); binder_thread_read()
2202 if (!binder_has_proc_work(proc, thread)) binder_thread_read()
2205 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread)); binder_thread_read()
2208 if (!binder_has_thread_work(thread)) binder_thread_read()
2211 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread)); binder_thread_read()
2218 thread->looper &= ~BINDER_LOOPER_STATE_WAITING; binder_thread_read()
2229 if (!list_empty(&thread->todo)) { binder_thread_read()
2230 w = list_first_entry(&thread->todo, struct binder_work, binder_thread_read()
2238 !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) binder_thread_read()
2256 binder_stat_br(proc, thread, cmd); binder_thread_read()
2259 proc->pid, thread->pid); binder_thread_read()
2306 binder_stat_br(proc, thread, cmd); binder_thread_read()
2309 proc->pid, thread->pid, cmd_name, binder_thread_read()
2317 proc->pid, thread->pid, binder_thread_read()
2327 proc->pid, thread->pid, binder_thread_read()
2352 binder_stat_br(proc, thread, cmd); binder_thread_read()
2355 proc->pid, thread->pid, binder_thread_read()
2424 binder_stat_br(proc, thread, cmd); binder_thread_read()
2427 proc->pid, thread->pid, binder_thread_read()
2438 t->to_parent = thread->transaction_stack; binder_thread_read()
2439 t->to_thread = thread; binder_thread_read()
2440 thread->transaction_stack = t; binder_thread_read()
2454 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | binder_thread_read()
2456 /*spawn a new thread if we leave this out */) { binder_thread_read()
2460 proc->pid, thread->pid); binder_thread_read()
2463 binder_stat_br(proc, thread, BR_SPAWN_LOOPER); binder_thread_read()
2520 struct binder_thread *thread = NULL; binder_get_thread() local
2526 thread = rb_entry(parent, struct binder_thread, rb_node); binder_get_thread()
2528 if (current->pid < thread->pid) binder_get_thread()
2530 else if (current->pid > thread->pid) binder_get_thread()
2536 thread = kzalloc(sizeof(*thread), GFP_KERNEL); binder_get_thread()
2537 if (thread == NULL) binder_get_thread()
2540 thread->proc = proc; binder_get_thread()
2541 thread->pid = current->pid; binder_get_thread()
2542 init_waitqueue_head(&thread->wait); binder_get_thread()
2543 INIT_LIST_HEAD(&thread->todo); binder_get_thread()
2544 rb_link_node(&thread->rb_node, parent, p); binder_get_thread()
2545 rb_insert_color(&thread->rb_node, &proc->threads); binder_get_thread()
2546 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; binder_get_thread()
2547 thread->return_error = BR_OK; binder_get_thread()
2548 thread->return_error2 = BR_OK; binder_get_thread()
2550 return thread; binder_get_thread()
2554 struct binder_thread *thread) binder_free_thread()
2560 rb_erase(&thread->rb_node, &proc->threads); binder_free_thread()
2561 t = thread->transaction_stack; binder_free_thread()
2562 if (t && t->to_thread == thread) binder_free_thread()
2568 proc->pid, thread->pid, binder_free_thread()
2570 (t->to_thread == thread) ? "in" : "out"); binder_free_thread()
2572 if (t->to_thread == thread) { binder_free_thread()
2580 } else if (t->from == thread) { binder_free_thread()
2588 binder_release_work(&thread->todo); binder_free_thread()
2589 kfree(thread); binder_free_thread()
2598 struct binder_thread *thread = NULL; binder_poll() local
2603 thread = binder_get_thread(proc); binder_poll()
2605 wait_for_proc_work = thread->transaction_stack == NULL && binder_poll()
2606 list_empty(&thread->todo) && thread->return_error == BR_OK; binder_poll()
2611 if (binder_has_proc_work(proc, thread)) binder_poll()
2614 if (binder_has_proc_work(proc, thread)) binder_poll()
2617 if (binder_has_thread_work(thread)) binder_poll()
2619 poll_wait(filp, &thread->wait, wait); binder_poll()
2620 if (binder_has_thread_work(thread)) binder_poll()
2628 struct binder_thread *thread) binder_ioctl_write_read()
2646 proc->pid, thread->pid, binder_ioctl_write_read()
2651 ret = binder_thread_write(proc, thread, binder_ioctl_write_read()
2664 ret = binder_thread_read(proc, thread, bwr.read_buffer, binder_ioctl_write_read()
2679 proc->pid, thread->pid, binder_ioctl_write_read()
2733 struct binder_thread *thread; binder_ioctl() local
2747 thread = binder_get_thread(proc); binder_ioctl()
2748 if (thread == NULL) { binder_ioctl()
2755 ret = binder_ioctl_write_read(filp, cmd, arg, thread); binder_ioctl()
2772 proc->pid, thread->pid); binder_ioctl()
2773 binder_free_thread(proc, thread); binder_ioctl()
2774 thread = NULL; binder_ioctl()
2796 if (thread) binder_ioctl()
2797 thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN; binder_ioctl()
2994 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); binder_deferred_flush() local
2996 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; binder_deferred_flush()
2997 if (thread->looper & BINDER_LOOPER_STATE_WAITING) { binder_deferred_flush()
2998 wake_up_interruptible(&thread->wait); binder_deferred_flush()
3085 struct binder_thread *thread; binder_deferred_release() local
3087 thread = rb_entry(n, struct binder_thread, rb_node); binder_deferred_release()
3089 active_transactions += binder_free_thread(proc, thread); binder_deferred_release()
3292 struct binder_thread *thread, print_binder_thread()
3300 seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper); print_binder_thread()
3302 t = thread->transaction_stack; print_binder_thread()
3304 if (t->from == thread) { print_binder_thread()
3308 } else if (t->to_thread == thread) { print_binder_thread()
3317 list_for_each_entry(w, &thread->todo, entry) { print_binder_thread()
3440 "thread",
1317 binder_transaction(struct binder_proc *proc, struct binder_thread *thread, struct binder_transaction_data *tr, int reply) binder_transaction() argument
1755 binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, binder_size_t *consumed) binder_thread_write() argument
2118 binder_stat_br(struct binder_proc *proc, struct binder_thread *thread, uint32_t cmd) binder_stat_br() argument
2129 binder_has_proc_work(struct binder_proc *proc, struct binder_thread *thread) binder_has_proc_work() argument
2142 binder_thread_read(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, binder_size_t *consumed, int non_block) binder_thread_read() argument
2553 binder_free_thread(struct binder_proc *proc, struct binder_thread *thread) binder_free_thread() argument
2626 binder_ioctl_write_read(struct file *filp, unsigned int cmd, unsigned long arg, struct binder_thread *thread) binder_ioctl_write_read() argument
3291 print_binder_thread(struct seq_file *m, struct binder_thread *thread, int print_always) print_binder_thread() argument
/linux-4.1.27/arch/x86/um/asm/
H A Dprocessor_32.h38 static inline void arch_flush_thread(struct arch_thread *thread) arch_flush_thread() argument
41 memset(&thread->tls_array, 0, sizeof(thread->tls_array)); arch_flush_thread()
H A Dprocessor_64.h24 static inline void arch_flush_thread(struct arch_thread *thread) arch_flush_thread() argument
/linux-4.1.27/arch/avr32/kernel/
H A Dswitch_to.S15 /* Switch thread context from "prev" to "next", returning "last"
17 * r11 : &prev->thread + 1
18 * r10 : &next->thread
H A Dprocess.c63 * Free current thread data structures etc
129 fp = tsk->thread.cpu_context.r7; show_trace_log_lvl()
186 sp = tsk->thread.cpu_context.ksp; show_stack_log_lvl()
290 p->thread.cpu_context.r0 = arg; copy_thread()
291 p->thread.cpu_context.r1 = usp; /* fn */ copy_thread()
292 p->thread.cpu_context.r2 = (unsigned long)syscall_return; copy_thread()
293 p->thread.cpu_context.pc = (unsigned long)ret_from_kernel_thread; copy_thread()
300 p->thread.cpu_context.pc = (unsigned long)ret_from_fork; copy_thread()
303 p->thread.cpu_context.sr = MODE_SUPERVISOR | SR_GM; copy_thread()
304 p->thread.cpu_context.ksp = (unsigned long)childregs; copy_thread()
335 unsigned long fp = p->thread.cpu_context.r7; get_wchan()
348 unsigned long sp = p->thread.cpu_context.ksp + 16; get_wchan()
/linux-4.1.27/arch/frv/include/asm/
H A Dswitch_to.h29 (prev)->thread.sched_lr = \
31 (last) = __switch_to(&(prev)->thread, &(next)->thread, (prev)); \
H A Dprocessor.h43 struct pt_regs *frame; /* [GR28] exception frame ptr for this thread */
44 struct task_struct *curr; /* [GR29] current pointer for this thread */
78 * do necessary setup to start up a newly executed thread.
87 /* Free all resources held by a thread. */ release_thread()
100 * Free current thread data structures etc..
107 * Return saved PC of a blocked thread.
113 #define KSTK_EIP(tsk) ((tsk)->thread.frame0->pc)
114 #define KSTK_ESP(tsk) ((tsk)->thread.frame0->sp)
H A Dthread_info.h35 unsigned long status; /* thread-synchronous flags */
39 mm_segment_t addr_limit; /* thread address space:
41 * 0-0xFFFFFFFF for kernel-thread
54 * macros/functions for gaining access to the thread information structure
70 /* how to get the thread information struct from C */
78 * thread information flags
112 * ever touches our thread-synchronous status, so we don't
/linux-4.1.27/arch/arm/include/asm/
H A Dthread_notify.h32 static inline void thread_notify(unsigned long rc, struct thread_info *thread) thread_notify() argument
35 atomic_notifier_call_chain(&thread_notify_head, rc, thread); thread_notify()
41 * These are the reason codes for the thread notifier.
H A Ddomain.h73 struct thread_info *thread = current_thread_info(); \
74 unsigned int domain = thread->cpu_domain; \
76 thread->cpu_domain = domain | domain_val(dom, type); \
77 set_domain(thread->cpu_domain); \
/linux-4.1.27/arch/cris/include/asm/
H A Dthread_info.h1 /* thread_info.h: CRIS low-level thread information
34 __u32 tls; /* TLS for this thread */
36 mm_segment_t addr_limit; /* thread address space:
38 0-0xFFFFFFFF for kernel-thread
46 * macros/functions for gaining access to the thread information structure
63 * thread information flags
H A Dswitch_to.h10 (int)&((struct task_struct *)0)->thread)
/linux-4.1.27/arch/openrisc/kernel/
H A Dprocess.c48 * Pointer to Current thread info structure.
108 * Copy the thread-specific (arch specific) info from the current
116 * @usp: user stack pointer or fn for kernel thread
117 * @arg: arg to fn for kernel thread; always NULL for userspace thread
119 * @regs: CPU context to copy for userspace thread; always NULL for kthread
122 * structures. The first (topmost) is the userspace context of the thread.
123 * The second is the kernelspace context of the thread.
125 * A kernel thread will not be returning to userspace, so the topmost pt_regs
127 * a kernel thread can become a userspace thread by doing a kernel_execve, in
132 * ret_from_fork. A kernel thread will need to set r20 to the address of
137 * A kernel thread 'fn' may return; this is effectively what happens when
141 * 'kernel thread' to return to userspace as a userspace thread.
169 kregs->gpr[20] = usp; /* fn, kernel thread */ copy_thread()
178 kregs->gpr[20] = 0; /* Userspace thread */ copy_thread()
194 * Set up a thread for executing a new program
/linux-4.1.27/drivers/net/wireless/cw1200/
H A Dbh.h2 * Device handling thread interface for mac80211 ST-Ericsson CW1200 drivers
23 /* Must be called from BH thread. */
/linux-4.1.27/arch/sparc/include/asm/
H A Dprocessor_32.h50 /* The Sparc processor specific thread struct. */
68 #define SPARC_FLAG_KTHREAD 0x1 /* task is a kernel thread */
76 /* Return saved PC of a blocked thread. */
79 /* Do necessary setup to start up a newly executed thread. */ start_thread()
107 /* Free all resources held by a thread. */
112 #define task_pt_regs(tsk) ((tsk)->thread.kregs)
113 #define KSTK_EIP(tsk) ((tsk)->thread.kregs->pc)
114 #define KSTK_ESP(tsk) ((tsk)->thread.kregs->u_regs[UREG_FP])
H A Dswitch_to_32.h20 fpsave(&(prv)->thread.float_regs[0], &(prv)->thread.fsr, \
21 &(prv)->thread.fpqueue[0], &(prv)->thread.fpqdepth); \
23 (prv)->thread.kregs->psr &= ~PSR_EF; \
33 (nxt)->thread.kregs->psr&=~PSR_EF; \
H A Dthread_info_32.h2 * thread_info.h: sparc low-level thread information
54 * macros/functions for gaining access to the thread information structure
68 /* how to get the thread information struct from C */
73 * thread information allocation
102 * thread information flag bit numbers
/linux-4.1.27/arch/blackfin/include/asm/
H A Dprocessor.h73 /* Free all resources held by a thread. */ release_thread()
79 * Free current thread data structures etc..
86 * Return saved PC of a blocked thread.
88 #define thread_saved_pc(tsk) (tsk->thread.pc)
95 if ((tsk)->thread.esp0 > PAGE_SIZE && \
96 MAP_NR((tsk)->thread.esp0) < max_mapnr) \
97 eip = ((struct pt_regs *) (tsk)->thread.esp0)->pc; \
99 #define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
/linux-4.1.27/arch/arm/kernel/
H A Dxscale-cp0.c35 struct thread_info *thread = t; dsp_do() local
39 thread->cpu_context.extra[0] = 0; dsp_do()
40 thread->cpu_context.extra[1] = 0; dsp_do()
45 dsp_load_state(thread->cpu_context.extra); dsp_do()
60 struct thread_info *thread = t; iwmmxt_do() local
65 * flush_thread() zeroes thread->fpstate, so no need iwmmxt_do()
73 iwmmxt_task_release(thread); iwmmxt_do()
77 iwmmxt_task_switch(thread); iwmmxt_do()
H A Dprocess.c175 * Free current thread data structures etc..
184 struct thread_info *thread = current_thread_info(); flush_thread() local
189 memset(thread->used_cp, 0, sizeof(thread->used_cp)); flush_thread()
190 memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); flush_thread()
191 memset(&thread->fpstate, 0, sizeof(union fp_state)); flush_thread()
195 thread_notify(THREAD_NOTIFY_FLUSH, thread); flush_thread()
208 struct thread_info *thread = task_thread_info(p); copy_thread() local
211 memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); copy_thread()
220 thread->cpu_context.r4 = stk_sz; copy_thread()
221 thread->cpu_context.r5 = stack_start; copy_thread()
224 thread->cpu_context.pc = (unsigned long)ret_from_fork; copy_thread()
225 thread->cpu_context.sp = (unsigned long)childregs; copy_thread()
230 thread->tp_value[0] = childregs->ARM_r3; copy_thread()
231 thread->tp_value[1] = get_tpuser(); copy_thread()
233 thread_notify(THREAD_NOTIFY_COPY, thread); copy_thread()
252 struct thread_info *thread = current_thread_info(); dump_fpu() local
253 int used_math = thread->used_cp[1] | thread->used_cp[2]; dump_fpu()
256 memcpy(fp, &thread->fpstate.soft, sizeof (*fp)); dump_fpu()
H A Dptrace.c305 struct thread_info *thread = task_thread_info(tsk); ptrace_getwmmxregs() local
307 if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT)) ptrace_getwmmxregs()
309 iwmmxt_task_disable(thread); /* force it to ram */ ptrace_getwmmxregs()
310 return copy_to_user(ufp, &thread->fpstate.iwmmxt, IWMMXT_SIZE) ptrace_getwmmxregs()
319 struct thread_info *thread = task_thread_info(tsk); ptrace_setwmmxregs() local
321 if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT)) ptrace_setwmmxregs()
323 iwmmxt_task_release(thread); /* force a reload */ ptrace_setwmmxregs()
324 return copy_from_user(&thread->fpstate.iwmmxt, ufp, IWMMXT_SIZE) ptrace_setwmmxregs()
336 struct thread_info *thread = task_thread_info(tsk); ptrace_getcrunchregs() local
338 crunch_task_disable(thread); /* force it to ram */ ptrace_getcrunchregs()
339 return copy_to_user(ufp, &thread->crunchstate, CRUNCH_SIZE) ptrace_getcrunchregs()
348 struct thread_info *thread = task_thread_info(tsk); ptrace_setcrunchregs() local
350 crunch_task_release(thread); /* force a reload */ ptrace_setcrunchregs()
351 return copy_from_user(&thread->crunchstate, ufp, CRUNCH_SIZE) ptrace_setcrunchregs()
395 if (current->thread.debug.hbp[i] == bp) ptrace_hbptriggered()
415 memset(tsk->thread.debug.hbp, 0, sizeof(tsk->thread.debug.hbp)); clear_ptrace_hw_breakpoint()
425 struct thread_struct *t = &tsk->thread; flush_ptrace_hw_breakpoint()
489 bp = tsk->thread.debug.hbp[idx]; ptrace_gethbpregs()
545 bp = tsk->thread.debug.hbp[idx]; ptrace_sethbpregs()
552 tsk->thread.debug.hbp[idx] = bp; ptrace_sethbpregs()
633 struct thread_info *thread = task_thread_info(target); fpa_set() local
635 thread->used_cp[1] = thread->used_cp[2] = 1; fpa_set()
638 &thread->fpstate, fpa_set()
671 struct thread_info *thread = task_thread_info(target); vfp_get() local
672 struct vfp_hard_struct const *vfp = &thread->vfpstate.hard; vfp_get()
676 vfp_sync_hwstate(thread); vfp_get()
708 struct thread_info *thread = task_thread_info(target); vfp_set() local
713 vfp_sync_hwstate(thread); vfp_set()
714 new_vfp = thread->vfpstate.hard; vfp_set()
736 thread->vfpstate.hard = new_vfp; vfp_set()
737 vfp_flush_hwstate(thread); vfp_set()
968 * current thread. This isn't a problem because it will have syscall_trace_exit()
H A Dpj4-cp0.c24 struct thread_info *thread = t; iwmmxt_do() local
29 * flush_thread() zeroes thread->fpstate, so no need iwmmxt_do()
37 iwmmxt_task_release(thread); iwmmxt_do()
41 iwmmxt_task_switch(thread); iwmmxt_do()
/linux-4.1.27/tools/perf/arch/arm/util/
H A Dunwind-libdw.c5 bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg) libdw__arch_set_initial_registers() argument
34 return dwfl_thread_state_registers(thread, 0, PERF_REG_ARM_MAX, libdw__arch_set_initial_registers()
/linux-4.1.27/arch/x86/kernel/
H A Dprocess_32.c62 * Return saved PC of a blocked thread.
66 return ((unsigned long *)tsk->thread.sp)[3]; thread_saved_pc()
139 p->thread.sp = (unsigned long) childregs; copy_thread()
140 p->thread.sp0 = (unsigned long) (childregs+1); copy_thread()
141 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); copy_thread()
144 /* kernel thread */ copy_thread()
146 p->thread.ip = (unsigned long) ret_from_kernel_thread; copy_thread()
156 p->thread.io_bitmap_ptr = NULL; copy_thread()
164 p->thread.ip = (unsigned long) ret_from_fork; copy_thread()
167 p->thread.io_bitmap_ptr = NULL; copy_thread()
172 p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, copy_thread()
174 if (!p->thread.io_bitmap_ptr) { copy_thread()
175 p->thread.io_bitmap_max = 0; copy_thread()
184 * Set a new TLS for the child thread? copy_thread()
190 if (err && p->thread.io_bitmap_ptr) { copy_thread()
191 kfree(p->thread.io_bitmap_ptr); copy_thread()
192 p->thread.io_bitmap_max = 0; copy_thread()
244 struct thread_struct *prev = &prev_p->thread, __switch_to()
245 *next = &next_p->thread; __switch_to()
267 * Load the per-thread Thread-Local Storage descriptor. __switch_to()
340 sp = p->thread.sp; get_wchan()
H A Dprocess_64.c145 struct desc_struct *desc = t->thread.tls_array; set_32bit_tls()
152 return get_desc_base(&t->thread.tls_array[tls]); read_32bit_tls()
162 p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE; copy_thread()
164 p->thread.sp = (unsigned long) childregs; copy_thread()
166 p->thread.io_bitmap_ptr = NULL; copy_thread()
168 savesegment(gs, p->thread.gsindex); copy_thread()
169 p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs; copy_thread()
170 savesegment(fs, p->thread.fsindex); copy_thread()
171 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs; copy_thread()
172 savesegment(es, p->thread.es); copy_thread()
173 savesegment(ds, p->thread.ds); copy_thread()
174 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); copy_thread()
177 /* kernel thread */ copy_thread()
196 p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr, copy_thread()
198 if (!p->thread.io_bitmap_ptr) { copy_thread()
199 p->thread.io_bitmap_max = 0; copy_thread()
206 * Set a new TLS for the child thread? copy_thread()
221 if (err && p->thread.io_bitmap_ptr) { copy_thread()
222 kfree(p->thread.io_bitmap_ptr); copy_thread()
223 p->thread.io_bitmap_max = 0; copy_thread()
276 struct thread_struct *prev = &prev_p->thread; __switch_to()
277 struct thread_struct *next = &next_p->thread; __switch_to()
354 * the base address if next thread expects it to be overridden. __switch_to()
554 sp = READ_ONCE(p->thread.sp); get_wchan()
586 load_TLS(&task->thread, cpu); do_arch_prctl()
589 task->thread.gsindex = GS_TLS_SEL; do_arch_prctl()
590 task->thread.gs = 0; do_arch_prctl()
592 task->thread.gsindex = 0; do_arch_prctl()
593 task->thread.gs = addr; do_arch_prctl()
612 load_TLS(&task->thread, cpu); do_arch_prctl()
615 task->thread.fsindex = FS_TLS_SEL; do_arch_prctl()
616 task->thread.fs = 0; do_arch_prctl()
618 task->thread.fsindex = 0; do_arch_prctl()
619 task->thread.fs = addr; do_arch_prctl()
631 if (task->thread.fsindex == FS_TLS_SEL) do_arch_prctl()
636 base = task->thread.fs; do_arch_prctl()
643 if (task->thread.gsindex == GS_TLS_SEL) do_arch_prctl()
650 base = task->thread.gs; do_arch_prctl()
652 base = task->thread.gs; do_arch_prctl()
H A Dvm86_32.c84 #define VFLAGS (*(unsigned short *)&(current->thread.v86flags))
85 #define VEFLAGS (current->thread.v86flags)
141 if (!current->thread.vm86_info) { save_v86_state()
145 set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | current->thread.v86mask); save_v86_state()
146 tmp = copy_vm86_regs_to_user(&current->thread.vm86_info->regs, regs); save_v86_state()
147 tmp += put_user(current->thread.screen_bitmap, &current->thread.vm86_info->screen_bitmap); save_v86_state()
154 current->thread.sp0 = current->thread.saved_sp0; save_v86_state()
155 current->thread.sysenter_cs = __KERNEL_CS; save_v86_state()
156 load_sp0(tss, &current->thread); save_v86_state()
157 current->thread.saved_sp0 = 0; save_v86_state()
162 ret->fs = current->thread.saved_fs; save_v86_state()
163 set_user_gs(ret, current->thread.saved_gs); save_v86_state()
215 if (tsk->thread.saved_sp0) SYSCALL_DEFINE1()
224 tsk->thread.vm86_info = v86; SYSCALL_DEFINE1()
259 if (tsk->thread.saved_sp0) SYSCALL_DEFINE2()
269 tsk->thread.vm86_info = (struct vm86_struct __user *)v86; SYSCALL_DEFINE2()
300 tsk->thread.v86mask = 0; do_sys_vm86()
303 tsk->thread.v86mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL; do_sys_vm86()
306 tsk->thread.v86mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; do_sys_vm86()
309 tsk->thread.v86mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; do_sys_vm86()
317 tsk->thread.saved_sp0 = tsk->thread.sp0; do_sys_vm86()
318 tsk->thread.saved_fs = info->regs32->fs; do_sys_vm86()
319 tsk->thread.saved_gs = get_user_gs(info->regs32); do_sys_vm86()
322 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0; do_sys_vm86()
324 tsk->thread.sysenter_cs = 0; do_sys_vm86()
325 load_sp0(tss, &tsk->thread); do_sys_vm86()
328 tsk->thread.screen_bitmap = info->screen_bitmap; do_sys_vm86()
398 set_flags(VEFLAGS, flags, current->thread.v86mask); set_vflags_long()
408 set_flags(VFLAGS, flags, current->thread.v86mask); set_vflags_short()
423 return flags | (VEFLAGS & current->thread.v86mask); get_vflags()
564 current->thread.trap_nr = trapno; handle_vm86_trap()
565 current->thread.error_code = error_code; handle_vm86_trap()
H A Dptrace.c285 return task->thread.fsindex; offsetof()
291 return task->thread.gsindex; offsetof()
297 return task->thread.ds; offsetof()
303 return task->thread.es;
327 if ((value == FS_TLS_SEL && task->thread.fsindex == 0 && set_segment_reg()
328 task->thread.fs != 0) || set_segment_reg()
329 (value == 0 && task->thread.fsindex == FS_TLS_SEL && set_segment_reg()
330 task->thread.fs == 0)) set_segment_reg()
332 task->thread.fsindex = value; set_segment_reg()
334 loadsegment(fs, task->thread.fsindex); set_segment_reg()
341 if ((value == GS_TLS_SEL && task->thread.gsindex == 0 && set_segment_reg()
342 task->thread.gs != 0) || set_segment_reg()
343 (value == 0 && task->thread.gsindex == GS_TLS_SEL && set_segment_reg()
344 task->thread.gs == 0)) set_segment_reg()
346 task->thread.gsindex = value; set_segment_reg()
348 load_gs_index(task->thread.gsindex); set_segment_reg()
351 task->thread.ds = value; set_segment_reg()
353 loadsegment(ds, task->thread.ds); set_segment_reg()
356 task->thread.es = value; set_segment_reg()
358 loadsegment(es, task->thread.es); set_segment_reg()
434 * to set either thread.fs or thread.fsindex and the putreg()
437 if (child->thread.fs != value) putreg()
446 if (child->thread.gs != value) putreg()
477 unsigned int seg = task->thread.fsindex; offsetof()
478 if (task->thread.fs != 0) offsetof()
479 return task->thread.fs; offsetof()
484 return get_desc_base(&task->thread.tls_array[FS_TLS]); offsetof()
490 unsigned int seg = task->thread.gsindex; offsetof()
491 if (task->thread.gs != 0) offsetof()
492 return task->thread.gs; offsetof()
497 return get_desc_base(&task->thread.tls_array[GS_TLS]); offsetof()
563 struct thread_struct *thread = &(current->thread); ptrace_triggered() local
567 * was hit so the thread's debugger will see it. ptrace_triggered()
570 if (thread->ptrace_bps[i] == bp) ptrace_triggered()
574 thread->debugreg6 |= (DR_TRAP0 << i); ptrace_triggered()
578 * Walk through every ptrace breakpoints for this thread and
649 struct thread_struct *thread = &tsk->thread; ptrace_write_dr7() local
655 old_dr7 = ptrace_get_dr7(thread->ptrace_bps); ptrace_write_dr7()
662 struct perf_event *bp = thread->ptrace_bps[i]; ptrace_write_dr7()
675 thread->ptrace_bps[i] = bp; ptrace_write_dr7()
700 struct thread_struct *thread = &tsk->thread; ptrace_get_debugreg() local
704 struct perf_event *bp = thread->ptrace_bps[n]; ptrace_get_debugreg()
709 val = thread->debugreg6; ptrace_get_debugreg()
711 val = thread->ptrace_dr7; ptrace_get_debugreg()
719 struct thread_struct *t = &tsk->thread; ptrace_set_breakpoint_addr()
758 struct thread_struct *thread = &tsk->thread; ptrace_set_debugreg() local
765 thread->debugreg6 = val; ptrace_set_debugreg()
770 thread->ptrace_dr7 = val; ptrace_set_debugreg()
782 return target->thread.io_bitmap_max / regset->size; ioperm_active()
790 if (!target->thread.io_bitmap_ptr) ioperm_get()
794 target->thread.io_bitmap_ptr, ioperm_get()
1412 tsk->thread.trap_nr = X86_TRAP_DB; fill_sigtrap_info()
1413 tsk->thread.error_code = error_code; fill_sigtrap_info()
H A Di387.c40 * pair does nothing at all: the thread must not have fpu (so
43 * visible in the interrupted kernel thread).
46 * the thread has FPU but we are not going to set/clear TS.
277 ret = fpu_alloc(&tsk->thread.fpu); init_fpu()
281 fpu_finit(&tsk->thread.fpu); init_fpu()
319 &target->thread.fpu.state->fxsave, 0, -1); xfpregs_get()
338 &target->thread.fpu.state->fxsave, 0, -1); xfpregs_set()
343 target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask; xfpregs_set()
350 target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE; xfpregs_set()
369 xsave = &target->thread.fpu.state->xsave; xstateregs_get()
373 * memory layout in the thread struct, so that we can copy the entire xstateregs_get()
399 xsave = &target->thread.fpu.state->xsave; xstateregs_set()
489 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave; convert_from_fxsr()
509 env->fos = tsk->thread.ds; convert_from_fxsr()
527 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave; convert_to_fxsr()
567 &target->thread.fpu.state->fsave, 0, fpregs_get()
600 &target->thread.fpu.state->fsave, 0, fpregs_set()
615 target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FP; fpregs_set()
/linux-4.1.27/arch/unicore32/kernel/
H A Dprocess.c205 * Free current thread data structures etc..
213 struct thread_info *thread = current_thread_info(); flush_thread() local
216 memset(thread->used_cp, 0, sizeof(thread->used_cp)); flush_thread()
217 memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); flush_thread()
219 memset(&thread->fpstate, 0, sizeof(struct fp_state)); flush_thread()
234 struct thread_info *thread = task_thread_info(p); copy_thread() local
237 memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); copy_thread()
238 thread->cpu_context.sp = (unsigned long)childregs; copy_thread()
240 thread->cpu_context.pc = (unsigned long)ret_from_kernel_thread; copy_thread()
241 thread->cpu_context.r4 = stack_start; copy_thread()
242 thread->cpu_context.r5 = stk_sz; copy_thread()
245 thread->cpu_context.pc = (unsigned long)ret_from_fork; copy_thread()
271 struct thread_info *thread = current_thread_info(); dump_fpu() local
272 int used_math = thread->used_cp[1] | thread->used_cp[2]; dump_fpu()
276 memcpy(fp, &thread->fpstate, sizeof(*fp)); dump_fpu()
/linux-4.1.27/arch/xtensa/kernel/
H A Dprocess.c114 * This is called when the thread calls exit().
124 * Flush thread state. This is called when a thread does an execve()
138 * copy the current task into the new thread.
150 * Copy thread.
153 * 1) Userspace thread creation,
157 * 2) Kernel thread creation,
158 * regs == NULL, usp_thread_fn is the function to run in the new thread
162 * The stack layout for the new thread looks like this:
166 * +------------------------+ <- thread.sp = sp in dummy-frame
173 * sp points to itself (thread.sp)
180 * The fun part: if we're keeping the same VM (i.e. cloning a thread,
204 p->thread.sp = (unsigned long)childregs; copy_thread()
211 p->thread.ra = MAKE_RA_FOR_CALL( copy_thread()
222 /* When sharing memory with the parent thread, the child copy_thread()
225 (Note that such a new thread is required to always create copy_thread()
227 The exception is vfork, where the new thread continues to copy_thread()
253 /* The thread pointer is passed in the '4th argument' (= a5) */ copy_thread()
257 p->thread.ra = MAKE_RA_FOR_CALL( copy_thread()
293 sp = p->thread.sp; get_wchan()
294 pc = MAKE_PC_FROM_RA(p->thread.ra, p->thread.sp); get_wchan()
/linux-4.1.27/arch/powerpc/math-emu/
H A Dmath.c331 op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f); do_mathemu()
332 op1 = (void *)&current->thread.TS_FPR((insn >> 16) & 0x1f); do_mathemu()
333 op2 = (void *)&current->thread.TS_FPR((insn >> 11) & 0x1f); do_mathemu()
337 op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f); do_mathemu()
338 op1 = (void *)&current->thread.TS_FPR((insn >> 16) & 0x1f); do_mathemu()
339 op2 = (void *)&current->thread.TS_FPR((insn >> 6) & 0x1f); do_mathemu()
343 op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f); do_mathemu()
344 op1 = (void *)&current->thread.TS_FPR((insn >> 16) & 0x1f); do_mathemu()
345 op2 = (void *)&current->thread.TS_FPR((insn >> 11) & 0x1f); do_mathemu()
346 op3 = (void *)&current->thread.TS_FPR((insn >> 6) & 0x1f); do_mathemu()
352 op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f); do_mathemu()
362 op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f); do_mathemu()
367 op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f); do_mathemu()
371 op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f); do_mathemu()
372 op1 = (void *)&current->thread.TS_FPR((insn >> 16) & 0x1f); do_mathemu()
376 op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f); do_mathemu()
377 op1 = (void *)&current->thread.TS_FPR((insn >> 11) & 0x1f); do_mathemu()
382 op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f); do_mathemu()
391 op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f); do_mathemu()
399 op2 = (void *)&current->thread.TS_FPR((insn >> 16) & 0x1f); do_mathemu()
400 op3 = (void *)&current->thread.TS_FPR((insn >> 11) & 0x1f); do_mathemu()
420 op1 = (void *)&current->thread.TS_FPR((insn >> 11) & 0x1f); do_mathemu()
/linux-4.1.27/arch/x86/include/asm/
H A Dfpu-internal.h75 * but if the current thread owns the FPU, it will still be saved by. finit_soft_fpu()
89 tsk->thread.fpu.last_cpu = ~0; task_disable_lazy_fpu_restore()
95 cpu == new->thread.fpu.last_cpu; fpu_lazy_restore()
305 return fpu_save_init(&tsk->thread.fpu); __save_init_fpu()
330 : : [addr] "m" (tsk->thread.fpu.has_fpu)); restore_fpu_checking()
333 return fpu_restore_checking(&tsk->thread.fpu); restore_fpu_checking()
343 return tsk->thread.fpu.has_fpu; __thread_has_fpu()
349 tsk->thread.fpu.has_fpu = 0; __thread_clear_has_fpu()
356 tsk->thread.fpu.has_fpu = 1; __thread_set_has_fpu()
387 tsk->thread.fpu_counter = 0; drop_fpu()
444 (use_eager_fpu() || new->thread.fpu_counter > 5); switch_fpu_prepare()
450 old->thread.fpu.last_cpu = cpu; switch_fpu_prepare()
453 old->thread.fpu.has_fpu = 0; switch_fpu_prepare()
457 new->thread.fpu_counter++; switch_fpu_prepare()
459 prefetch(new->thread.fpu.state); switch_fpu_prepare()
463 old->thread.fpu_counter = 0; switch_fpu_prepare()
466 new->thread.fpu_counter++; switch_fpu_prepare()
470 prefetch(new->thread.fpu.state); switch_fpu_prepare()
535 xsave_state_booting(&tsk->thread.fpu.state->xsave, -1); __save_fpu()
537 xsave_state(&tsk->thread.fpu.state->xsave, -1); __save_fpu()
539 fpu_fxsave(&tsk->thread.fpu); __save_fpu()
548 return tsk->thread.fpu.state->fxsave.cwd; get_fpu_cwd()
550 return (unsigned short)tsk->thread.fpu.state->fsave.cwd; get_fpu_cwd()
557 return tsk->thread.fpu.state->fxsave.swd; get_fpu_swd()
559 return (unsigned short)tsk->thread.fpu.state->fsave.swd; get_fpu_swd()
566 return tsk->thread.fpu.state->fxsave.mxcsr; get_fpu_mxcsr()
599 memset(&dst->thread.fpu.state->xsave, 0, xstate_size); fpu_copy()
602 struct fpu *dfpu = &dst->thread.fpu; fpu_copy()
603 struct fpu *sfpu = &src->thread.fpu; fpu_copy()
H A Dsuspend_64.h40 #define loaddebug(thread,register) \
41 set_debugreg((thread)->debugreg##register, register)
H A Dswitch_to.h55 : [prev_sp] "=m" (prev->thread.sp), \
56 [prev_ip] "=m" (prev->thread.ip), \
66 : [next_sp] "m" (next->thread.sp), \
67 [next_ip] "m" (next->thread.ip), \
123 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
/linux-4.1.27/drivers/dma/
H A Ddmatest.c176 struct dmatest_thread *thread; is_threaded_test_run() local
178 list_for_each_entry(thread, &dtc->threads, node) { is_threaded_test_run()
179 if (!thread->done) is_threaded_test_run()
402 struct dmatest_thread *thread = data; dmatest_func() local
428 info = thread->info; dmatest_func()
430 chan = thread->chan; dmatest_func()
432 if (thread->type == DMA_MEMCPY) dmatest_func()
434 else if (thread->type == DMA_XOR) { dmatest_func()
438 } else if (thread->type == DMA_PQ) { dmatest_func()
452 thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL); dmatest_func()
453 if (!thread->srcs) dmatest_func()
456 thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL); dmatest_func()
457 if (!thread->srcs[i]) dmatest_func()
460 thread->srcs[i] = NULL; dmatest_func()
462 thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL); dmatest_func()
463 if (!thread->dsts) dmatest_func()
466 thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL); dmatest_func()
467 if (!thread->dsts[i]) dmatest_func()
470 thread->dsts[i] = NULL; dmatest_func()
492 if (thread->type == DMA_MEMCPY) dmatest_func()
494 else if (thread->type == DMA_XOR) dmatest_func()
496 else if (thread->type == DMA_PQ) dmatest_func()
526 dmatest_init_srcs(thread->srcs, src_off, len, dmatest_func()
528 dmatest_init_dsts(thread->dsts, dst_off, len, dmatest_func()
543 void *buf = thread->srcs[i]; dmatest_func()
563 void *buf = thread->dsts[i]; dmatest_func()
580 if (thread->type == DMA_MEMCPY) dmatest_func()
584 else if (thread->type == DMA_XOR) dmatest_func()
589 else if (thread->type == DMA_PQ) { dmatest_func()
661 error_count = dmatest_verify(thread->srcs, 0, src_off, dmatest_func()
663 error_count += dmatest_verify(thread->srcs, src_off, dmatest_func()
666 error_count += dmatest_verify(thread->srcs, src_off + len, dmatest_func()
671 error_count += dmatest_verify(thread->dsts, 0, dst_off, dmatest_func()
673 error_count += dmatest_verify(thread->dsts, dst_off, dmatest_func()
676 error_count += dmatest_verify(thread->dsts, dst_off + len, dmatest_func()
693 for (i = 0; thread->dsts[i]; i++) dmatest_func()
694 kfree(thread->dsts[i]); dmatest_func()
695 kfree(thread->dsts); dmatest_func()
698 for (i = 0; thread->srcs[i]; i++) dmatest_func()
699 kfree(thread->srcs[i]); dmatest_func()
700 kfree(thread->srcs); dmatest_func()
713 thread->done = true; dmatest_func()
721 struct dmatest_thread *thread; dmatest_cleanup_channel() local
725 list_for_each_entry_safe(thread, _thread, &dtc->threads, node) { dmatest_cleanup_channel()
726 ret = kthread_stop(thread->task); dmatest_cleanup_channel()
727 pr_debug("thread %s exited with status %d\n", dmatest_cleanup_channel()
728 thread->task->comm, ret); dmatest_cleanup_channel()
729 list_del(&thread->node); dmatest_cleanup_channel()
730 put_task_struct(thread->task); dmatest_cleanup_channel()
731 kfree(thread); dmatest_cleanup_channel()
744 struct dmatest_thread *thread; dmatest_add_threads() local
759 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); dmatest_add_threads()
760 if (!thread) { dmatest_add_threads()
765 thread->info = info; dmatest_add_threads()
766 thread->chan = dtc->chan; dmatest_add_threads()
767 thread->type = type; dmatest_add_threads()
769 thread->task = kthread_create(dmatest_func, thread, "%s-%s%u", dmatest_add_threads()
771 if (IS_ERR(thread->task)) { dmatest_add_threads()
772 pr_warn("Failed to create thread %s-%s%u\n", dmatest_add_threads()
774 kfree(thread); dmatest_add_threads()
778 /* srcbuf and dstbuf are allocated by the thread itself */ dmatest_add_threads()
779 get_task_struct(thread->task); dmatest_add_threads()
780 list_add_tail(&thread->node, &dtc->threads); dmatest_add_threads()
781 wake_up_process(thread->task); dmatest_add_threads()
/linux-4.1.27/drivers/media/pci/saa7134/
H A Dsaa7134-tvaudio.c318 if (dev->thread.scan1 == dev->thread.scan2 && tvaudio_sleep()
328 return dev->thread.scan1 != dev->thread.scan2; tvaudio_sleep()
491 dev->thread.scan1 = dev->thread.scan2; tvaudio_thread()
492 dprintk("tvaudio thread scan start [%d]\n",dev->thread.scan1); tvaudio_thread()
531 if (dev->thread.scan1 != dev->thread.scan2) tvaudio_thread()
607 if (UNSET == dev->thread.mode) { tvaudio_thread()
611 mode = dev->thread.mode; tvaudio_thread()
621 dev->thread.stopped = 1; tvaudio_thread()
788 dev->thread.scan1 = dev->thread.scan2; tvaudio_thread_ddep()
789 dprintk("tvaudio thread scan start [%d]\n",dev->thread.scan1); tvaudio_thread_ddep()
839 dprintk("tvaudio thread status: 0x%x [%s%s%s]\n", tvaudio_thread_ddep()
865 dev->thread.stopped = 1; tvaudio_thread_ddep()
1027 dev->thread.thread = NULL; saa7134_tvaudio_init2()
1028 dev->thread.scan1 = dev->thread.scan2 = 0; saa7134_tvaudio_init2()
1031 /* start tvaudio thread */ saa7134_tvaudio_init2()
1032 dev->thread.thread = kthread_run(my_thread, dev, "%s", dev->name); saa7134_tvaudio_init2()
1033 if (IS_ERR(dev->thread.thread)) { saa7134_tvaudio_init2()
1053 /* shutdown tvaudio thread */ saa7134_tvaudio_fini()
1054 if (dev->thread.thread && !dev->thread.stopped) saa7134_tvaudio_fini()
1055 kthread_stop(dev->thread.thread); saa7134_tvaudio_fini()
1067 } else if (dev->thread.thread) { saa7134_tvaudio_do_scan()
1068 dev->thread.mode = UNSET; saa7134_tvaudio_do_scan()
1069 dev->thread.scan2++; saa7134_tvaudio_do_scan()
1071 if (!dev->insuspend && !dev->thread.stopped) saa7134_tvaudio_do_scan()
1072 wake_up_process(dev->thread.thread); saa7134_tvaudio_do_scan()
/linux-4.1.27/drivers/staging/speakup/
H A DMakefile29 thread.o \
/linux-4.1.27/arch/parisc/include/asm/
H A Dftrace.h8 * Stack of return addresses for functions of a thread.
H A Dthread_info.h30 /* how to get the thread information struct from C */
35 /* thread information allocation */
44 * thread information flags
/linux-4.1.27/arch/xtensa/include/asm/
H A Dthread_info.h48 unsigned long status; /* thread-synchronous flags */
52 mm_segment_t addr_limit; /* thread address space */
66 * macros/functions for gaining access to the thread information structure
83 /* how to get the thread information struct from C */ current_thread_info()
94 /* how to get the thread information struct from ASM */
102 * thread information flags
127 * ever touches our thread-synchronous status, so we don't
/linux-4.1.27/arch/cris/arch-v10/kernel/
H A Dprocess.c39 * Free current thread data structures etc..
79 * Return saved PC of a blocked thread.
88 * new thread is scheduled.
90 * also setup the thread switching structure which is used to keep
91 * thread-specific data during _resumes.
114 p->thread.ksp = (unsigned long) swstack; copy_thread()
115 p->thread.usp = 0; copy_thread()
132 p->thread.usp = usp ?: rdusp(); copy_thread()
136 p->thread.ksp = (unsigned long) swstack; copy_thread()
157 esp = p->thread.esp; get_wchan()
/linux-4.1.27/tools/virtio/virtio-trace/
H A Dtrace-agent-rw.c2 * Read/write thread of a guest agent for virtio-trace
70 pr_err("Could not create pipe in rw-thread(%d)\n", cpu); rw_thread_init()
79 pr_err("Could not change pipe size in rw-thread(%d)\n", cpu); rw_thread_init()
93 /* Bind a thread to a cpu */ bind_cpu()
101 /* bind my thread to cpu_num by assigning zero to the first argument */ bind_cpu()
126 * Each thread read trace_pipe_raw of each cpu bounding the rw_thread_main()
127 * thread, so contention of multi-threads does not occur. rw_thread_main()
133 pr_err("Splice_read in rw-thread(%d)\n", ts->cpu_num); rw_thread_main()
155 pr_err("Splice_write in rw-thread(%d)\n", rw_thread_main()
187 pr_err("Could not create a rw thread(%d)\n", rw_ti->cpu_num); rw_thread_run()
H A Dtrace-agent.h26 * rw_thread_info - structure managing a read/write thread a cpu
27 * @cpu_num: cpu number operating this read/write thread
55 /* for trace read/write thread */
/linux-4.1.27/drivers/net/wireless/rsi/
H A Drsi_common.h61 struct rsi_thread *thread, rsi_create_kthread()
65 init_completion(&thread->completion); rsi_create_kthread()
66 thread->task = kthread_run(func_ptr, common, "%s", name); rsi_create_kthread()
67 if (IS_ERR(thread->task)) rsi_create_kthread()
68 return (int)PTR_ERR(thread->task); rsi_create_kthread()
60 rsi_create_kthread(struct rsi_common *common, struct rsi_thread *thread, void *func_ptr, u8 *name) rsi_create_kthread() argument
/linux-4.1.27/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_process.c59 static struct kfd_process *find_process(const struct task_struct *thread);
60 static struct kfd_process *create_process(const struct task_struct *thread);
77 struct kfd_process *kfd_create_process(const struct task_struct *thread) kfd_create_process() argument
83 if (thread->mm == NULL) kfd_create_process()
87 if (thread->group_leader->mm != thread->mm) kfd_create_process()
91 down_write(&thread->mm->mmap_sem); kfd_create_process()
101 process = find_process(thread); kfd_create_process()
106 process = create_process(thread); kfd_create_process()
110 up_write(&thread->mm->mmap_sem); kfd_create_process()
115 struct kfd_process *kfd_get_process(const struct task_struct *thread) kfd_get_process() argument
119 if (thread->mm == NULL) kfd_get_process()
123 if (thread->group_leader->mm != thread->mm) kfd_get_process()
126 process = find_process(thread); kfd_get_process()
143 static struct kfd_process *find_process(const struct task_struct *thread) find_process() argument
149 p = find_process_by_mm(thread->mm); find_process()
254 static struct kfd_process *create_process(const struct task_struct *thread) create_process() argument
275 process->mm = thread->mm; create_process()
286 process->lead_thread = thread->group_leader; create_process()
/linux-4.1.27/drivers/staging/lustre/lustre/llite/
H A Dstatahead.c227 * 2) All other threads, including statahead thread and ptlrpcd thread, ll_sa_entry_alloc()
235 * by the statahead thread, following the rule 2). ll_sa_entry_alloc()
717 * thread enqueues lock on parent in readdir and another ll_statahead_interpret()
755 * when statahead thread tries to enqueue lock on parent ll_statahead_interpret()
976 struct ptlrpc_thread *thread = &sai->sai_agl_thread; ll_agl_thread() local
979 thread->t_pid = current_pid(); ll_agl_thread()
980 CDEBUG(D_READA, "agl thread started: sai %p, parent %pd\n", ll_agl_thread()
986 if (thread_is_init(thread)) ll_agl_thread()
987 /* If someone else has changed the thread state ll_agl_thread()
990 thread_set_flags(thread, SVC_RUNNING); ll_agl_thread()
992 wake_up(&thread->t_ctl_waitq); ll_agl_thread()
995 l_wait_event(thread->t_ctl_waitq, ll_agl_thread()
997 !thread_is_running(thread), ll_agl_thread()
1000 if (!thread_is_running(thread)) ll_agl_thread()
1004 /* The statahead thread maybe help to process AGL entries, ll_agl_thread()
1026 thread_set_flags(thread, SVC_STOPPED); ll_agl_thread()
1028 wake_up(&thread->t_ctl_waitq); ll_agl_thread()
1030 CDEBUG(D_READA, "agl thread stopped: sai %p, parent %pd\n", ll_agl_thread()
1037 struct ptlrpc_thread *thread = &sai->sai_agl_thread; ll_start_agl() local
1042 CDEBUG(D_READA, "start agl thread: sai %p, parent %pd\n", ll_start_agl()
1049 CERROR("can't start ll_agl thread, rc: %ld\n", PTR_ERR(task)); ll_start_agl()
1050 thread_set_flags(thread, SVC_STOPPED); ll_start_agl()
1054 l_wait_event(thread->t_ctl_waitq, ll_start_agl()
1055 thread_is_running(thread) || thread_is_stopped(thread), ll_start_agl()
1067 struct ptlrpc_thread *thread = &sai->sai_thread; ll_statahead_thread() local
1076 thread->t_pid = current_pid(); ll_statahead_thread()
1077 CDEBUG(D_READA, "statahead thread starting: sai %p, parent %pd\n", ll_statahead_thread()
1085 if (thread_is_init(thread)) ll_statahead_thread()
1086 /* If someone else has changed the thread state ll_statahead_thread()
1089 thread_set_flags(thread, SVC_RUNNING); ll_statahead_thread()
1091 wake_up(&thread->t_ctl_waitq); ll_statahead_thread()
1157 l_wait_event(thread->t_ctl_waitq, ll_statahead_thread()
1161 !thread_is_running(thread), ll_statahead_thread()
1168 if (unlikely(!thread_is_running(thread))) { ll_statahead_thread()
1190 !thread_is_running(thread))) { ll_statahead_thread()
1216 l_wait_event(thread->t_ctl_waitq, ll_statahead_thread()
1219 !thread_is_running(thread), ll_statahead_thread()
1225 if (unlikely(!thread_is_running(thread))) { ll_statahead_thread()
1237 thread_is_running(thread)) { ll_statahead_thread()
1272 CDEBUG(D_READA, "stop agl thread: sai %p pid %u\n", ll_statahead_thread()
1284 thread_set_flags(thread, SVC_STOPPING); ll_statahead_thread()
1293 thread_set_flags(thread, SVC_STOPPED); ll_statahead_thread()
1296 wake_up(&thread->t_ctl_waitq); ll_statahead_thread()
1299 CDEBUG(D_READA, "statahead thread stopped: sai %p, parent %pd\n", ll_statahead_thread()
1324 struct ptlrpc_thread *thread = &lli->lli_sai->sai_thread; ll_stop_statahead() local
1326 if (!thread_is_stopped(thread)) { ll_stop_statahead()
1327 thread_set_flags(thread, SVC_STOPPING); ll_stop_statahead()
1329 wake_up(&thread->t_ctl_waitq); ll_stop_statahead()
1331 CDEBUG(D_READA, "stop statahead thread: sai %p pid %u\n", ll_stop_statahead()
1332 lli->lli_sai, (unsigned int)thread->t_pid); ll_stop_statahead()
1333 l_wait_event(thread->t_ctl_waitq, ll_stop_statahead()
1334 thread_is_stopped(thread), ll_stop_statahead()
1481 struct ptlrpc_thread *thread = &sai->sai_thread; ll_sai_unplug() local
1500 if (sa_low_hit(sai) && thread_is_running(thread)) { ll_sai_unplug()
1502 CDEBUG(D_READA, "Statahead for dir " DFID " hit ratio too low: hit/miss %llu/%llu, sent/replied %llu/%llu, stopping statahead thread\n", ll_sai_unplug()
1507 if (!thread_is_stopped(thread)) ll_sai_unplug()
1508 thread_set_flags(thread, SVC_STOPPING); ll_sai_unplug()
1513 if (!thread_is_stopped(thread)) ll_sai_unplug()
1514 wake_up(&thread->t_ctl_waitq); ll_sai_unplug()
1518 * Start statahead thread if this is the first dir entry.
1519 * Otherwise if a thread is started already, wait it until it is ahead of me.
1533 struct ptlrpc_thread *thread; do_statahead_enter() local
1541 thread = &sai->sai_thread; do_statahead_enter()
1542 if (unlikely(thread_is_stopped(thread) && do_statahead_enter()
1554 * thread does not skip so many hidden dentries do_statahead_enter()
1588 thread_is_stopped(thread), do_statahead_enter()
1681 CDEBUG(D_READA, "start statahead thread: sai %p, parent %pd\n", do_statahead_enter()
1686 * default reference can be dropped by another thread calling do_statahead_enter()
1695 thread = &sai->sai_thread; do_statahead_enter()
1697 CERROR("can't start ll_sa thread, rc: %d\n", rc); do_statahead_enter()
1700 thread_set_flags(thread, SVC_STOPPED); do_statahead_enter()
1710 l_wait_event(thread->t_ctl_waitq, do_statahead_enter()
1711 thread_is_running(thread) || thread_is_stopped(thread), do_statahead_enter()
/linux-4.1.27/arch/cris/arch-v32/kernel/
H A Dprocess.c32 * Free current thread data structures etc..
85 * Return saved PC of a blocked thread.
94 * It will be unnested during _resume and _ret_from_sys_call when the new thread
97 * Also setup the thread switching structure which is used to keep
98 * thread-specific data during _resumes.
123 p->thread.ksp = (unsigned long) swstack; copy_thread()
124 p->thread.usp = 0; copy_thread()
149 p->thread.usp = usp ?: rdusp(); copy_thread()
150 p->thread.ksp = (unsigned long) swstack; copy_thread()
/linux-4.1.27/drivers/staging/lustre/lustre/ptlrpc/
H A Dsec_gc.c89 /* signal before list_del to make iteration in gc thread safe */ sptlrpc_gc_del_sec()
166 struct ptlrpc_thread *thread = (struct ptlrpc_thread *) arg; sec_gc_main() local
171 /* Record that the thread is running */ sec_gc_main()
172 thread_set_flags(thread, SVC_RUNNING); sec_gc_main()
173 wake_up(&thread->t_ctl_waitq); sec_gc_main()
178 thread_clear_flags(thread, SVC_SIGNAL); sec_gc_main()
205 l_wait_event(thread->t_ctl_waitq, sec_gc_main()
206 thread_is_stopping(thread) || sec_gc_main()
207 thread_is_signal(thread), sec_gc_main()
210 if (thread_test_and_clear_flags(thread, SVC_STOPPING)) sec_gc_main()
214 thread_set_flags(thread, SVC_STOPPED); sec_gc_main()
215 wake_up(&thread->t_ctl_waitq); sec_gc_main()
228 /* initialize thread control */ sptlrpc_gc_init()
234 CERROR("can't start gc thread: %ld\n", PTR_ERR(task)); sptlrpc_gc_init()
H A Dservice.c133 /* NB: we might allow more than one thread in the future */ ptlrpc_grow_req_bufs()
144 /* NB: another thread might have recycled enough rqbds, we ptlrpc_grow_req_bufs()
208 int hrt_id; /* thread ID */
222 /* round-robin rotor for choosing thread */
271 * Choose an hr thread to dispatch requests to.
498 * CPT affinity service could have percpt thread-pool instead ptlrpc_server_nthreads_check()
499 * of a global thread-pool, which means user might not always ptlrpc_server_nthreads_check()
550 * one thread/core because service threads are supposed to ptlrpc_server_nthreads_check()
561 /* depress thread factor for hyper-thread */ ptlrpc_server_nthreads_check()
1599 /* leave just 1 thread for normal RPCs */ ptlrpc_server_allow_high()
1626 * requests), or if there are enough idle threads that a later thread can do
1637 /* leave just 1 thread for normal RPCs */ ptlrpc_server_allow_normal()
1727 struct ptlrpc_thread *thread) ptlrpc_server_handle_req_in()
1845 req->rq_svc_thread = thread; ptlrpc_server_handle_req_in()
1869 struct ptlrpc_thread *thread) ptlrpc_server_handle_request()
1917 request->rq_session.lc_thread = thread; ptlrpc_server_handle_request()
1923 request->rq_svc_thread = thread; ptlrpc_server_handle_request()
1924 if (thread) ptlrpc_server_handle_request()
2190 ptlrpc_thread_stopping(struct ptlrpc_thread *thread) ptlrpc_thread_stopping() argument
2192 return thread_is_stopping(thread) || ptlrpc_thread_stopping()
2193 thread->t_svcpt->scp_service->srv_is_stopping; ptlrpc_thread_stopping()
2222 struct ptlrpc_thread *thread) ptlrpc_wait_event()
2229 lc_watchdog_disable(thread->t_watchdog); ptlrpc_wait_event()
2235 ptlrpc_thread_stopping(thread) || ptlrpc_wait_event()
2241 if (ptlrpc_thread_stopping(thread)) ptlrpc_wait_event()
2245 lc_watchdog_touch(thread->t_watchdog, ptlrpc_wait_event()
2252 * Main thread body for service threads.
2259 struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg; ptlrpc_main() local
2260 struct ptlrpc_service_part *svcpt = thread->t_svcpt; ptlrpc_main()
2267 thread->t_pid = current_pid(); ptlrpc_main()
2276 svc->srv_name, thread->t_name, svcpt->scp_cpt); ptlrpc_main()
2289 rc = svc->srv_ops.so_thr_init(thread); ptlrpc_main()
2305 thread->t_env = env; ptlrpc_main()
2306 env->le_ctx.lc_thread = thread; ptlrpc_main()
2328 LASSERT(thread_is_starting(thread)); ptlrpc_main()
2329 thread_clear_flags(thread, SVC_STARTING); ptlrpc_main()
2335 * to stop the service while this new thread has been dynamically ptlrpc_main()
2338 thread_add_flags(thread, SVC_RUNNING); ptlrpc_main()
2343 wake_up(&thread->t_ctl_waitq); ptlrpc_main()
2346 thread->t_watchdog = lc_watchdog_add(ptlrpc_server_get_timeout(svcpt), ptlrpc_main()
2355 CDEBUG(D_NET, "service thread %d (#%d) started\n", thread->t_id, ptlrpc_main()
2359 while (!ptlrpc_thread_stopping(thread)) { ptlrpc_main()
2360 if (ptlrpc_wait_event(svcpt, thread)) ptlrpc_main()
2374 ptlrpc_server_handle_req_in(svcpt, thread); ptlrpc_main()
2388 ptlrpc_server_handle_request(svcpt, thread); ptlrpc_main()
2404 lc_watchdog_delete(thread->t_watchdog); ptlrpc_main()
2405 thread->t_watchdog = NULL; ptlrpc_main()
2413 svc->srv_ops.so_thr_done(thread); ptlrpc_main()
2420 CDEBUG(D_RPCTRACE, "service thread [ %p : %u ] %d exiting: rc %d\n", ptlrpc_main()
2421 thread, thread->t_pid, thread->t_id, rc); ptlrpc_main()
2424 if (thread_test_and_clear_flags(thread, SVC_STARTING)) ptlrpc_main()
2427 if (thread_test_and_clear_flags(thread, SVC_RUNNING)) { ptlrpc_main()
2432 thread->t_id = rc; ptlrpc_main()
2433 thread_add_flags(thread, SVC_STOPPED); ptlrpc_main()
2435 wake_up(&thread->t_ctl_waitq); ptlrpc_main()
2548 CERROR("Reply handling thread %d:%d Failed on starting: rc = %d\n", cfs_percpt_for_each()
2559 struct ptlrpc_thread *thread; ptlrpc_svcpt_stop_threads() local
2566 /* let the thread know that we would like it to stop asap */ ptlrpc_svcpt_stop_threads()
2567 list_for_each_entry(thread, &svcpt->scp_threads, t_link) { ptlrpc_svcpt_stop_threads()
2568 CDEBUG(D_INFO, "Stopping thread %s #%u\n", ptlrpc_svcpt_stop_threads()
2569 svcpt->scp_service->srv_thread_name, thread->t_id); ptlrpc_svcpt_stop_threads()
2570 thread_add_flags(thread, SVC_STOPPING); ptlrpc_svcpt_stop_threads()
2576 thread = list_entry(svcpt->scp_threads.next, ptlrpc_svcpt_stop_threads()
2578 if (thread_is_stopped(thread)) { ptlrpc_svcpt_stop_threads()
2579 list_del(&thread->t_link); ptlrpc_svcpt_stop_threads()
2580 list_add(&thread->t_link, &zombie); ptlrpc_svcpt_stop_threads()
2585 CDEBUG(D_INFO, "waiting for stopping-thread %s #%u\n", ptlrpc_svcpt_stop_threads()
2586 svcpt->scp_service->srv_thread_name, thread->t_id); ptlrpc_svcpt_stop_threads()
2587 l_wait_event(thread->t_ctl_waitq, ptlrpc_svcpt_stop_threads()
2588 thread_is_stopped(thread), &lwi); ptlrpc_svcpt_stop_threads()
2596 thread = list_entry(zombie.next, ptlrpc_svcpt_stop_threads()
2598 list_del(&thread->t_link); ptlrpc_svcpt_stop_threads()
2599 OBD_FREE_PTR(thread); ptlrpc_svcpt_stop_threads()
2642 CERROR("cannot start %s thread #%d_%d: rc %d\n", ptlrpc_start_threads()
2652 struct ptlrpc_thread *thread; ptlrpc_start_thread() local
2673 OBD_CPT_ALLOC_PTR(thread, svc->srv_cptable, svcpt->scp_cpt); ptlrpc_start_thread()
2674 if (thread == NULL) ptlrpc_start_thread()
2676 init_waitqueue_head(&thread->t_ctl_waitq); ptlrpc_start_thread()
2681 OBD_FREE_PTR(thread); ptlrpc_start_thread()
2690 OBD_FREE_PTR(thread); ptlrpc_start_thread()
2692 CDEBUG(D_INFO, "Waiting for creating thread %s #%d\n", ptlrpc_start_thread()
2698 CDEBUG(D_INFO, "Creating thread %s #%d race, retry later\n", ptlrpc_start_thread()
2704 thread->t_id = svcpt->scp_thr_nextid++; ptlrpc_start_thread()
2705 thread_add_flags(thread, SVC_STARTING); ptlrpc_start_thread()
2706 thread->t_svcpt = svcpt; ptlrpc_start_thread()
2708 list_add(&thread->t_link, &svcpt->scp_threads); ptlrpc_start_thread()
2712 snprintf(thread->t_name, sizeof(thread->t_name), "%s%02d_%03d", ptlrpc_start_thread()
2713 svc->srv_thread_name, svcpt->scp_cpt, thread->t_id); ptlrpc_start_thread()
2715 snprintf(thread->t_name, sizeof(thread->t_name), "%s_%04d", ptlrpc_start_thread()
2716 svc->srv_thread_name, thread->t_id); ptlrpc_start_thread()
2719 CDEBUG(D_RPCTRACE, "starting thread '%s'\n", thread->t_name); ptlrpc_start_thread()
2720 rc = PTR_ERR(kthread_run(ptlrpc_main, thread, "%s", thread->t_name)); ptlrpc_start_thread()
2722 CERROR("cannot start thread '%s': rc %d\n", ptlrpc_start_thread()
2723 thread->t_name, rc); ptlrpc_start_thread()
2726 if (thread_is_stopping(thread)) { ptlrpc_start_thread()
2730 thread_add_flags(thread, SVC_STOPPED); ptlrpc_start_thread()
2731 wake_up(&thread->t_ctl_waitq); ptlrpc_start_thread()
2734 list_del(&thread->t_link); ptlrpc_start_thread()
2736 OBD_FREE_PTR(thread); ptlrpc_start_thread()
2744 l_wait_event(thread->t_ctl_waitq, ptlrpc_start_thread()
2745 thread_is_running(thread) || thread_is_stopped(thread), ptlrpc_start_thread()
2748 rc = thread_is_stopped(thread) ? thread->t_id : 0; ptlrpc_start_thread()
2943 * thread noodling the request queue now */ ptlrpc_service_for_each_part()
1726 ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt, struct ptlrpc_thread *thread) ptlrpc_server_handle_req_in() argument
1868 ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt, struct ptlrpc_thread *thread) ptlrpc_server_handle_request() argument
2221 ptlrpc_wait_event(struct ptlrpc_service_part *svcpt, struct ptlrpc_thread *thread) ptlrpc_wait_event() argument
/linux-4.1.27/arch/avr32/include/asm/
H A Dprocessor.h126 * Do necessary setup to start up a newly executed thread.
138 /* Free all resources held by a thread */
141 /* Return saved PC of a blocked thread */
142 #define thread_saved_pc(tsk) ((tsk)->thread.cpu_context.pc)
153 #define KSTK_EIP(tsk) ((tsk)->thread.cpu_context.pc)
154 #define KSTK_ESP(tsk) ((tsk)->thread.cpu_context.ksp)
H A Dswitch_to.h41 last = __switch_to(prev, &prev->thread.cpu_context + 1, \
42 &next->thread.cpu_context); \
/linux-4.1.27/tools/testing/selftests/timers/
H A Dthreadtest.c89 /* The shared thread shares a global list
90 * that each thread fills while holding the lock.
112 /* Each independent thread fills in its own
141 void *(*thread)(void *) = shared_thread; main()
156 thread = independent_thread; main()
181 pthread_create(&pth[i], 0, thread, 0); main()
/linux-4.1.27/kernel/
H A Dkthread.c1 /* Kernel thread helper functions.
92 * Similar to kthread_should_stop(), but this keeps the thread alive
93 * and in a park position. kthread_unpark() "restarts" the thread and
94 * calls the thread function again.
250 * @namefmt: printf-style name for the thread.
253 * thread. The thread will be stopped: use wake_up_process() to start
256 * If thread is going to be bound on a particular cpu, give its node
258 * When woken, the thread will run @threadfn() with @data as its
260 * standalone thread for which no one will call kthread_stop(), or
292 * new kernel thread. kthread_create_on_node()
296 * If I was SIGKILLed before kthreadd (or new kernel thread) kthread_create_on_node()
298 * that thread. kthread_create_on_node()
303 * kthreadd (or new kernel thread) will call complete() kthread_create_on_node()
318 * The kernel thread should not inherit these properties. kthread_create_on_node()
342 * @p: thread created by kthread_create().
346 * except that @cpu doesn't need to be online, and the thread must be
359 * @cpu: The cpu on which the thread should be bound,
360 * @namefmt: printf-style name for the thread. Format is restricted
363 * Description: This helper function creates and names a kernel thread
364 * The thread will be woken and put into park mode.
378 /* Park the thread to get it out of TASK_UNINTERRUPTIBLE state */ kthread_create_on_cpu()
400 * kthread_unpark - unpark a thread created by kthread_create().
401 * @k: thread created by kthread_create().
404 * waits for it to return. If the thread is marked percpu then its
416 * kthread_park - park a thread created by kthread_create().
417 * @k: thread created by kthread_create().
421 * instead of calling wake_up_process(): the thread will park without
424 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
446 * kthread_stop - stop a thread created by kthread_create().
447 * @k: thread created by kthread_create().
451 * instead of calling wake_up_process(): the thread will exit without
H A Dsmpboot.c44 * idle_init - Initialize the idle thread for a cpu
45 * @cpu: The cpu for which the idle thread should be initialized
47 * Creates the thread if it does not exist.
96 * smpboot_thread_fn - percpu hotplug thread loop function
97 * @data: thread data pointer
99 * Checks for thread stop and park conditions. Calls the necessary
101 * thread.
103 * Returns 1 when the thread should exit, 0 otherwise.
194 * callback. At least the migration thread callback __smpboot_create_thread()
274 * smpboot_register_percpu_thread - Register a per_cpu thread related to hotplug
275 * @plug_thread: Hotplug thread descriptor
303 * smpboot_unregister_percpu_thread - Unregister a per_cpu thread related to hotplug
304 * @plug_thread: Hotplug thread descriptor
/linux-4.1.27/arch/microblaze/include/asm/
H A Dprocessor.h30 /* Do necessary setup to start up a newly executed thread. */
68 /* Free all resources held by a thread. */ release_thread()
73 /* Free all resources held by a thread. */ exit_thread()
125 /* Free all resources held by a thread. */ release_thread()
130 /* Free current thread data structures etc. */ exit_thread()
135 /* Return saved (kernel) PC of a blocked thread. */
137 ((tsk)->thread.regs ? (tsk)->thread.regs->r15 : 0)
H A Dthread_info.h69 unsigned long status; /* thread-synchronous flags */
72 mm_segment_t addr_limit; /* thread address space */
78 * macros/functions for gaining access to the thread information structure
92 /* how to get the thread information struct from C */ current_thread_info()
100 /* thread information allocation */
104 * thread information flags
146 * ever touches our thread-synchronous status, so we don't
/linux-4.1.27/arch/mips/cavium-octeon/
H A Dcpu.c29 prefetch(&current->thread.cp2); cnmips_cu2_call()
34 octeon_cop2_restore(&(current->thread.cp2)); cnmips_cu2_call()
/linux-4.1.27/arch/mips/netlogic/common/
H A Dsmpboot.S109 andi t2, t0, 0x3 /* thread num */
118 bnez t2, 1f /* skip thread programming */
119 nop /* for thread id != 0 */
122 * XLR MMU setup only for first thread in core
126 li t2, 6 /* XLR thread mode mask */
128 and t2, t1, t2 /* t2 - current thread mode */
130 lw v1, BOOT_THREAD_MODE(v0) /* v1 - new thread mode */
135 and t2, t1, t3 /* mask out old thread mode */
/linux-4.1.27/samples/pktgen/
H A Dpktgen.conf-1-120 # thread config
21 # Each CPU has its own thread. One CPU example. We add eth1.
H A Dpktgen.conf-1-1-flows20 # thread config
21 # Each CPU has its own thread. One CPU example. We add eth1.
H A Dpktgen.conf-1-1-ip620 # thread config
21 # Each CPU has its own thread. One CPU example. We add eth1.
H A Dpktgen.conf-1-1-ip6-rdos20 # thread config
21 # Each CPU has its own thread. One CPU example. We add eth1.
H A Dpktgen.conf-1-1-rdos20 # thread config
21 # Each CPU has its own thread. One CPU example. We add eth1.
H A Dpktgen.conf-1-220 # thread config
21 # One CPU means one thread. One CPU example. We add eth1, eth2 respectivly.
H A Dpktgen.conf-2-120 # thread config
21 # Each CPU has its own thread. Two CPU example. We add eth1 to the first
30 # We need to remove old config since we dont use this thread. We can only
/linux-4.1.27/tools/perf/arch/x86/util/
H A Dunwind-libdw.c5 bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg) libdw__arch_set_initial_registers() argument
50 return dwfl_thread_state_registers(thread, 0, nregs, dwarf_regs); libdw__arch_set_initial_registers()
/linux-4.1.27/drivers/media/usb/pvrusb2/
H A Dpvrusb2-dvb.h25 struct task_struct *thread; member in struct:pvr2_dvb_adapter
/linux-4.1.27/arch/metag/tbx/
H A Dtbiroot.S11 * interrupt and background processing on the current thread.
56 * Return identifier of the current thread in TBI segment or signal format with
57 * secondary mask to indicate privilege and interrupt level of thread
69 MOV D0Re0,TXENABLE /* Which thread are we? */
H A Dtbisoft.S43 * the calling thread being saved in the rpSaveCtx location with a drop-thru
64 * A0StP is then saved as the base of the TBICTX of the thread
80 * of a thread will restore.
84 SETD [D1Ar3],A0StP /* Record pCtx of this thread */
85 MOVT D0Re0,#TBICTX_SOFT_BIT /* Only soft thread state */
89 MOV D1Re0,#0 /* resume of the thread */
166 * Generate a new soft thread context ready for it's first outing.
168 * D1Ar1 - Region of memory to be used as the new soft thread stack
169 * D0Ar2 - Main line routine for new soft thread
171 * The routine returns the initial PTBICTX value for the new thread
197 MOV D0Re0,A0.2 /* Return pCtx for new thread */
204 MOVT D0Ar2,#TBICTX_SOFT_BIT /* Only soft thread state */
/linux-4.1.27/init/
H A Dinit_task.c22 * Initial thread structure. Alignment of this is handled by a special
/linux-4.1.27/drivers/staging/unisys/uislib/
H A Duisthread.c41 /* used to stop the thread */ uisthread_start()
59 return; /* thread not running */ uisthread_stop()
62 /* give up if the thread has NOT died in 1 minute */ uisthread_stop()
/linux-4.1.27/arch/alpha/include/asm/
H A Dprocessor.h42 /* Return saved PC of a blocked thread. */
46 /* Do necessary setup to start up a newly executed thread. */
50 /* Free all resources held by a thread. */
/linux-4.1.27/include/linux/iio/
H A Dtrigger_consumer.h24 * @thread: threaded interrupt part
36 irqreturn_t (*thread)(int irq, void *p); member in struct:iio_poll_func
46 irqreturn_t (*thread)(int irq, void *p),
/linux-4.1.27/arch/sh/kernel/cpu/sh2a/
H A Dfpu.c55 : "0" ((char *)(&tsk->thread.xstate->hardfpu.status)), save_fpu()
87 : "0" (tsk->thread.xstate), "r" (FPSCR_RCHG) restore_fpu()
459 if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_FPU_ERROR)) { ieee_fpe_handler()
461 denormal_to_double (&tsk->thread.xstate->hardfpu, ieee_fpe_handler()
476 hx = tsk->thread.xstate->hardfpu.fp_regs[n]; ieee_fpe_handler()
477 hy = tsk->thread.xstate->hardfpu.fp_regs[m]; ieee_fpe_handler()
478 fpscr = tsk->thread.xstate->hardfpu.fpscr; ieee_fpe_handler()
488 | tsk->thread.xstate->hardfpu.fp_regs[n+1]; ieee_fpe_handler()
490 | tsk->thread.xstate->hardfpu.fp_regs[m+1]; ieee_fpe_handler()
495 tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32; ieee_fpe_handler()
496 tsk->thread.xstate->hardfpu.fp_regs[n+1] = llx & 0xffffffff; ieee_fpe_handler()
505 tsk->thread.xstate->hardfpu.fp_regs[n] = hx; ieee_fpe_handler()
519 hx = tsk->thread.xstate->hardfpu.fp_regs[n]; ieee_fpe_handler()
520 hy = tsk->thread.xstate->hardfpu.fp_regs[m]; ieee_fpe_handler()
521 fpscr = tsk->thread.xstate->hardfpu.fpscr; ieee_fpe_handler()
531 | tsk->thread.xstate->hardfpu.fp_regs[n+1]; ieee_fpe_handler()
533 | tsk->thread.xstate->hardfpu.fp_regs[m+1]; ieee_fpe_handler()
538 tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32; ieee_fpe_handler()
539 tsk->thread.xstate->hardfpu.fp_regs[n+1] = llx & 0xffffffff; ieee_fpe_handler()
548 tsk->thread.xstate->hardfpu.fp_regs[n] = hx; ieee_fpe_handler()
566 tsk->thread.xstate->hardfpu.fpscr &= BUILD_TRAP_HANDLER()
/linux-4.1.27/tools/usb/
H A Dffs-test.c286 struct thread;
288 static ssize_t read_wrap(struct thread *t, void *buf, size_t nbytes);
289 static ssize_t write_wrap(struct thread *t, const void *buf, size_t nbytes);
290 static ssize_t ep0_consume(struct thread *t, const void *buf, size_t nbytes);
291 static ssize_t fill_in_buf(struct thread *t, void *buf, size_t nbytes);
292 static ssize_t empty_out_buf(struct thread *t, const void *buf, size_t nbytes);
295 static struct thread { struct
299 ssize_t (*in)(struct thread *, void *, size_t);
302 ssize_t (*out)(struct thread *, const void *, size_t);
331 static void init_thread(struct thread *t) init_thread()
342 struct thread *t = arg; cleanup_thread()
374 struct thread *t = arg; start_thread_helper()
416 static void start_thread(struct thread *t) start_thread()
424 static void join_thread(struct thread *t) join_thread()
429 err("%s: joining thread", t->filename); join_thread()
435 static ssize_t read_wrap(struct thread *t, void *buf, size_t nbytes) read_wrap()
440 static ssize_t write_wrap(struct thread *t, const void *buf, size_t nbytes) write_wrap()
453 fill_in_buf(struct thread *ignore, void *buf, size_t nbytes) fill_in_buf()
478 empty_out_buf(struct thread *ignore, const void *buf, size_t nbytes) empty_out_buf()
540 ep0_consume(struct thread *ignore, const void *buf, size_t nbytes) ep0_consume()
578 static void ep0_init(struct thread *t, bool legacy_descriptors) ep0_init()
/linux-4.1.27/arch/arm64/kernel/
H A Dprocess.c210 * Free current thread data structures etc..
221 current->thread.tp_value = 0; tls_thread_flush()
257 unsigned long tls = p->thread.tp_value; copy_thread()
259 memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); copy_thread()
282 * for the new thread. copy_thread()
289 p->thread.cpu_context.x19 = stack_start; copy_thread()
290 p->thread.cpu_context.x20 = stk_sz; copy_thread()
292 p->thread.cpu_context.pc = (unsigned long)ret_from_fork; copy_thread()
293 p->thread.cpu_context.sp = (unsigned long)childregs; copy_thread()
294 p->thread.tp_value = tls; copy_thread()
307 current->thread.tp_value = tpidr; tls_thread_switch()
312 tpidrro = next->thread.tp_value; tls_thread_switch()
314 tpidr = next->thread.tp_value; tls_thread_switch()
339 * the thread migrates to a different CPU. __switch_to()
343 /* the actual thread switch */ __switch_to()
/linux-4.1.27/arch/tile/kernel/
H A Dprocess.c48 * one thread per core and we want to get threads out of futex waits fast.
89 * thread in a process dies, we will reclaim all user arch_release_thread_info()
123 p->thread.ksp = ksp; copy_thread()
126 p->thread.creator_pid = current->pid; copy_thread()
129 /* kernel thread */ copy_thread()
136 p->thread.pc = (unsigned long) ret_from_kernel_thread; copy_thread()
141 * Start new thread in ret_from_fork so it schedules properly copy_thread()
144 p->thread.pc = (unsigned long) ret_from_fork; copy_thread()
147 * Do not clone step state from the parent; each thread copy_thread()
154 * Do not clone unalign jit fixup from the parent; each thread copy_thread()
172 p->thread.usp0 = childregs->sp; copy_thread()
184 * No DMA in the new thread. We model this on the fact that copy_thread()
187 memset(&p->thread.tile_dma_state, 0, sizeof(struct tile_dma_state)); copy_thread()
188 memset(&p->thread.dma_async_tlb, 0, sizeof(struct async_tlb)); copy_thread()
191 /* New thread has its miscellaneous processor state bits clear. */ copy_thread()
192 p->thread.proc_status = 0; copy_thread()
195 /* New thread does not own any networks. */ copy_thread()
196 memset(&p->thread.hardwall[0], 0, copy_thread()
202 * Start the new thread with the current architecture state copy_thread()
205 save_arch_state(&p->thread); copy_thread()
245 (tsk->thread.creator_pid << _SIM_CONTROL_OPERATOR_BITS)); sim_notify_fork()
413 struct tile_dma_state *dma = &current->thread.tile_dma_state; _prepare_arch_switch()
424 save_arch_state(&prev->thread); _switch_to()
433 if (next->thread.tile_dma_state.enabled) { _switch_to()
434 restore_tile_dma_state(&next->thread); _switch_to()
442 restore_arch_state(&next->thread); _switch_to()
464 * re-disable interrupts, reload the thread flags, and call back
527 /* Flush thread state. */ flush_thread()
534 * Free current thread data structures etc..
/linux-4.1.27/fs/jffs2/
H A Dbackground.c34 /* This must only ever be called when no GC thread is currently running */ jffs2_start_garbage_collect_thread()
47 pr_warn("fork failed for JFFS2 garbage collect thread: %ld\n", jffs2_start_garbage_collect_thread()
53 jffs2_dbg(1, "Garbage collect thread is pid %d\n", tsk->pid); jffs2_start_garbage_collect_thread()
113 * the GC thread get there first. */ jffs2_garbage_collect_thread()
159 pr_notice("No space for garbage collection. Aborting GC thread\n"); jffs2_garbage_collect_thread()
/linux-4.1.27/arch/openrisc/include/asm/
H A Dthread_info.h55 mm_segment_t addr_limit; /* thread address space:
57 0-0xFFFFFFFF for kernel-thread
67 * macros/functions for gaining access to the thread information structure
84 /* how to get the thread information struct from C */
94 * thread information flags
/linux-4.1.27/arch/arm64/include/asm/
H A Dthread_info.h70 * how to get the thread information struct from C
81 ((unsigned long)(tsk->thread.cpu_context.pc))
83 ((unsigned long)(tsk->thread.cpu_context.sp))
85 ((unsigned long)(tsk->thread.cpu_context.fp))
90 * thread information flags:
/linux-4.1.27/drivers/staging/rts5208/
H A Drtsx.h110 struct task_struct *ctl_thread; /* the control thread */
111 struct task_struct *polling_thread; /* the polling thread */
114 struct completion cmnd_ready; /* to sleep thread on */
115 struct completion control_exit; /* control thread exit */
116 struct completion polling_exit; /* polling thread exit */
117 struct completion notify; /* thread begin/end */
118 struct completion scanning_done; /* wait for scan thread */
/linux-4.1.27/arch/microblaze/kernel/
H A Dprocess.c61 /* if we're creating a new kernel thread then just zeroing all copy_thread()
62 * the registers. That's OK for a brand new thread.*/ copy_thread()
110 * r21 is the thread reg, r10 is 6th arg to clone copy_thread()
121 * Return saved PC of a blocked thread.
128 /* Check whether the thread is blocked in resume() */ thread_saved_pc()
142 /* Set up a thread for executing a new program */ start_thread()
157 * Set up a thread for executing a new program
/linux-4.1.27/drivers/vfio/
H A Dvirqfd.c55 virqfd->thread) virqfd_wakeup()
103 if (virqfd->thread) virqfd_inject()
104 virqfd->thread(virqfd->opaque, virqfd->data); virqfd_inject()
109 void (*thread)(void *, void *), vfio_virqfd_enable()
125 virqfd->thread = thread; vfio_virqfd_enable()
176 if ((!handler || handler(opaque, data)) && thread) vfio_virqfd_enable()
/linux-4.1.27/arch/m32r/kernel/
H A Dprocess.c40 * Return saved PC of a blocked thread.
44 return tsk->thread.lr; thread_saved_pc()
105 * Free current thread data structures etc..
116 memset(&current->thread.debug_trap, 0, sizeof(struct debug_trap)); flush_thread()
143 tsk->thread.lr = (unsigned long)ret_from_kernel_thread; copy_thread()
150 tsk->thread.lr = (unsigned long)ret_from_fork; copy_thread()
152 tsk->thread.sp = (unsigned long)childregs; copy_thread()
/linux-4.1.27/drivers/base/
H A Ddevtmpfs.c29 static struct task_struct *thread; variable in typeref:struct:task_struct
87 if (!thread) devtmpfs_create_node()
113 wake_up_process(thread); devtmpfs_create_node()
126 if (!thread) devtmpfs_delete_node()
143 wake_up_process(thread); devtmpfs_delete_node()
163 d_inode(dentry)->i_private = &thread; dev_mkdir()
223 d_inode(dentry)->i_private = &thread; handle_create()
239 if (d_inode(dentry)->i_private == &thread) dev_rmdir()
280 if (inode->i_private != &thread) dev_mynode()
354 if (!thread) devtmpfs_mount()
427 thread = kthread_run(devtmpfsd, &err, "kdevtmpfs"); devtmpfs_init()
428 if (!IS_ERR(thread)) { devtmpfs_init()
431 err = PTR_ERR(thread); devtmpfs_init()
432 thread = NULL; devtmpfs_init()
/linux-4.1.27/tools/perf/bench/
H A Dnuma.c160 OPT_STRING('T', "mb_thread" , &p0.mb_thread_str, "MB", "thread memory (MBs)"),
176 OPT_INTEGER('x', "perturb_secs", &p0.perturb_secs, "perturb thread 0/0 every X secs, to test convergence stability"),
184 OPT_BOOLEAN('S', "serialize-startup", &p0.serialize_startup,"serialize thread startup"),
404 * threads of this process, or only be accessed by this thread:
1049 set_taskname("thread %d/%d", process_nr, thread_nr); worker_thread()
1066 printf("# thread %2d / %2d global mem: %p, process mem: %p, thread mem: %p\n", worker_thread()
1224 * Pick up the memory policy and the CPU binding of our first thread, worker_process()
1278 printf(" # %5dx %5ldMB thread local mem operations\n", print_summary()
1469 /* This mutex is locked - the last started thread will wake us: */ __bench_numa()
1532 "secs,", "runtime-max/thread", "secs slowest (max) thread-runtime"); __bench_numa()
1535 "secs,", "runtime-min/thread", "secs fastest (min) thread-runtime"); __bench_numa()
1538 "secs,", "runtime-avg/thread", "secs average thread-runtime"); __bench_numa()
1542 "%,", "spread-runtime/thread", "% difference between max/avg runtime"); __bench_numa()
1545 "GB,", "data/thread", "GB data processed, per thread"); __bench_numa()
1551 "nsecs,", "runtime/byte/thread","nsecs/byte/thread runtime"); __bench_numa()
1554 "GB/sec,", "thread-speed", "GB/sec/thread speed"); __bench_numa()
1683 /* Various NUMA process/thread layout bandwidth measurements: */
1692 { " 4x1-bw-thread,", "mem", "-p", "1", "-t", "4", "-T", "256", OPT_BW },
1693 { " 8x1-bw-thread,", "mem", "-p", "1", "-t", "8", "-T", "256", OPT_BW },
1694 { "16x1-bw-thread,", "mem", "-p", "1", "-t", "16", "-T", "128", OPT_BW },
1695 { "32x1-bw-thread,", "mem", "-p", "1", "-t", "32", "-T", "64", OPT_BW },
1697 { " 2x3-bw-thread,", "mem", "-p", "2", "-t", "3", "-P", "512", OPT_BW },
1698 { " 4x4-bw-thread,", "mem", "-p", "4", "-t", "4", "-P", "512", OPT_BW },
1699 { " 4x6-bw-thread,", "mem", "-p", "4", "-t", "6", "-P", "512", OPT_BW },
1700 { " 4x8-bw-thread,", "mem", "-p", "4", "-t", "8", "-P", "512", OPT_BW },
1701 { " 4x8-bw-thread-NOTHP,",
1703 { " 3x3-bw-thread,", "mem", "-p", "3", "-t", "3", "-P", "512", OPT_BW },
1704 { " 5x5-bw-thread,", "mem", "-p", "5", "-t", "5", "-P", "512", OPT_BW },
1706 { "2x16-bw-thread,", "mem", "-p", "2", "-t", "16", "-P", "512", OPT_BW },
1707 { "1x32-bw-thread,", "mem", "-p", "1", "-t", "32", "-P", "2048", OPT_BW },
1711 { "numa01-bw-thread,", "mem", "-p", "2", "-t", "16", "-T", "192", OPT_BW },
1712 { "numa01-bw-thread-NOTHP,",

Completed in 3569 milliseconds

12345678910