Searched refs:threads (Results 1 - 200 of 705) sorted by relevance

1234

/linux-4.1.27/tools/perf/util/
H A Dthread_map.c25 struct thread_map *threads; thread_map__new_by_pid() local
36 threads = malloc(sizeof(*threads) + sizeof(pid_t) * items); thread_map__new_by_pid()
37 if (threads != NULL) { thread_map__new_by_pid()
39 threads->map[i] = atoi(namelist[i]->d_name); thread_map__new_by_pid()
40 threads->nr = items; thread_map__new_by_pid()
47 return threads; thread_map__new_by_pid()
52 struct thread_map *threads = malloc(sizeof(*threads) + sizeof(pid_t)); thread_map__new_by_tid() local
54 if (threads != NULL) { thread_map__new_by_tid()
55 threads->map[0] = tid; thread_map__new_by_tid()
56 threads->nr = 1; thread_map__new_by_tid()
59 return threads; thread_map__new_by_tid()
68 struct thread_map *threads = malloc(sizeof(*threads) + thread_map__new_by_uid() local
70 if (threads == NULL) thread_map__new_by_uid()
77 threads->nr = 0; thread_map__new_by_uid()
101 while (threads->nr + items >= max_threads) { thread_map__new_by_uid()
109 tmp = realloc(threads, (sizeof(*threads) + thread_map__new_by_uid()
114 threads = tmp; thread_map__new_by_uid()
118 threads->map[threads->nr + i] = atoi(namelist[i]->d_name); thread_map__new_by_uid()
124 threads->nr += items; thread_map__new_by_uid()
130 return threads; thread_map__new_by_uid()
133 free(threads); thread_map__new_by_uid()
142 zfree(&threads); thread_map__new_by_uid()
159 struct thread_map *threads = NULL, *nt; thread_map__new_by_pid_str() local
188 nt = realloc(threads, (sizeof(*threads) + strlist__for_each()
193 threads = nt; strlist__for_each()
196 threads->map[j++] = atoi(namelist[i]->d_name); strlist__for_each()
199 threads->nr = total_tasks; strlist__for_each()
205 return threads;
213 zfree(&threads);
219 struct thread_map *threads = malloc(sizeof(*threads) + sizeof(pid_t)); thread_map__new_dummy() local
221 if (threads != NULL) { thread_map__new_dummy()
222 threads->map[0] = -1; thread_map__new_dummy()
223 threads->nr = 1; thread_map__new_dummy()
225 return threads; thread_map__new_dummy()
230 struct thread_map *threads = NULL, *nt; thread_map__new_by_tid_str() local
237 /* perf-stat expects threads to be generated even if tid not given */ thread_map__new_by_tid_str()
256 nt = realloc(threads, sizeof(*threads) + sizeof(pid_t) * ntasks); strlist__for_each()
261 threads = nt; strlist__for_each()
262 threads->map[ntasks - 1] = tid; strlist__for_each()
263 threads->nr = ntasks; strlist__for_each()
266 return threads;
269 zfree(&threads);
285 void thread_map__delete(struct thread_map *threads) thread_map__delete() argument
287 free(threads); thread_map__delete()
290 size_t thread_map__fprintf(struct thread_map *threads, FILE *fp) thread_map__fprintf() argument
294 threads->nr, threads->nr > 1 ? "s" : ""); thread_map__fprintf()
295 for (i = 0; i < threads->nr; ++i) thread_map__fprintf()
296 printed += fprintf(fp, "%s%d", i ? ", " : "", threads->map[i]); thread_map__fprintf()
H A Dthread_map.h21 void thread_map__delete(struct thread_map *threads);
23 size_t thread_map__fprintf(struct thread_map *threads, FILE *fp);
25 static inline int thread_map__nr(struct thread_map *threads) thread_map__nr() argument
27 return threads ? threads->nr : 1; thread_map__nr()
H A Dvalues.c13 die("failed to allocate read_values threads arrays"); perf_read_values_init()
14 values->threads = 0; perf_read_values_init()
33 for (i = 0; i < values->threads; i++) perf_read_values_destroy()
54 die("failed to enlarge read_values threads arrays"); perf_read_values__enlarge_threads()
62 for (i = 0; i < values->threads; i++) perf_read_values__findnew_thread()
66 if (values->threads == values->threads_max) perf_read_values__findnew_thread()
69 i = values->threads++; perf_read_values__findnew_thread()
91 for (i = 0; i < values->threads; i++) { perf_read_values__enlarge_counters()
144 for (i = 0; i < values->threads; i++) { perf_read_values__display_pretty()
165 for (i = 0; i < values->threads; i++) { perf_read_values__display_pretty()
188 for (i = 0; i < values->threads; i++) { perf_read_values__display_raw()
204 for (i = 0; i < values->threads; i++) { perf_read_values__display_raw()
216 for (i = 0; i < values->threads; i++) perf_read_values__display_raw()
H A Dvalues.h7 int threads; member in struct:perf_read_values
H A Devlist.c36 struct thread_map *threads) perf_evlist__init()
43 perf_evlist__set_maps(evlist, cpus, threads); perf_evlist__init()
118 thread_map__delete(evlist->threads); perf_evlist__delete()
120 evlist->threads = NULL; perf_evlist__delete()
280 return thread_map__nr(evlist->threads); perf_evlist__nr_threads()
413 int nr_threads = thread_map__nr(evlist->threads); perf_evlist__alloc_pollfd()
541 if (!evsel->system_wide && evlist->threads && thread >= 0) perf_evlist__set_sid_idx()
542 sid->tid = evlist->threads->map[thread]; perf_evlist__set_sid_idx()
754 evlist->nr_mmaps = thread_map__nr(evlist->threads); perf_evlist__alloc_mmap()
850 int nr_threads = thread_map__nr(evlist->threads); perf_evlist__mmap_per_cpu()
875 int nr_threads = thread_map__nr(evlist->threads); perf_evlist__mmap_per_thread()
1000 const struct thread_map *threads = evlist->threads; perf_evlist__mmap() local
1019 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0) evlist__for_each()
1031 evlist->threads = thread_map__new_str(target->pid, target->tid, perf_evlist__create_maps()
1034 if (evlist->threads == NULL) perf_evlist__create_maps()
1048 thread_map__delete(evlist->threads); perf_evlist__create_maps()
1049 evlist->threads = NULL; perf_evlist__create_maps()
1058 nthreads = thread_map__nr(evlist->threads); perf_evlist__apply_filters()
1079 nthreads = thread_map__nr(evlist->threads); perf_evlist__set_filter()
1247 int nthreads = thread_map__nr(evlist->threads); perf_evlist__close()
1273 evlist->threads = thread_map__new_dummy(); perf_evlist__create_syswide_maps()
1274 if (evlist->threads == NULL) perf_evlist__create_syswide_maps()
1292 * Default: one fd per CPU, all threads, aka systemwide perf_evlist__open()
1295 if (evlist->threads == NULL && evlist->cpus == NULL) { perf_evlist__open()
1304 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads); evlist__for_each()
1398 if (evlist->threads == NULL) { perf_evlist__prepare_workload()
1399 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n", perf_evlist__prepare_workload()
1403 evlist->threads->map[0] = evlist->workload.pid; perf_evlist__prepare_workload()
35 perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, struct thread_map *threads) perf_evlist__init() argument
H A Dpython.c435 struct thread_map *threads; member in struct:pyrf_thread_map
448 pthreads->threads = thread_map__new(pid, tid, uid); pyrf_thread_map__init()
449 if (pthreads->threads == NULL) pyrf_thread_map__init()
456 thread_map__delete(pthreads->threads); pyrf_thread_map__delete()
464 return pthreads->threads->nr; pyrf_thread_map__length()
471 if (i >= pthreads->threads->nr) pyrf_thread_map__item()
474 return Py_BuildValue("i", pthreads->threads->map[i]); pyrf_thread_map__item()
623 struct thread_map *threads = NULL; pyrf_evsel__open() local
626 static char *kwlist[] = { "cpus", "threads", "group", "inherit", NULL }; pyrf_evsel__open()
633 threads = ((struct pyrf_thread_map *)pthreads)->threads; pyrf_evsel__open()
643 if (perf_evsel__open(evsel, cpus, threads) < 0) { pyrf_evsel__open()
692 struct thread_map *threads; pyrf_evlist__init() local
697 threads = ((struct pyrf_thread_map *)pthreads)->threads; pyrf_evlist__init()
699 perf_evlist__init(&pevlist->evlist, cpus, threads); pyrf_evlist__init()
H A Devlist.h51 struct thread_map *threads; member in struct:perf_evlist
65 struct thread_map *threads);
148 struct thread_map *threads) perf_evlist__set_maps()
151 evlist->threads = threads; perf_evlist__set_maps()
146 perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus, struct thread_map *threads) perf_evlist__set_maps() argument
H A Dmachine.h32 struct rb_root threads; member in struct:machine
213 struct target *target, struct thread_map *threads,
217 struct thread_map *threads, bool data_mmap) machine__synthesize_threads()
219 return __machine__synthesize_threads(machine, NULL, target, threads, machine__synthesize_threads()
216 machine__synthesize_threads(struct machine *machine, struct target *target, struct thread_map *threads, bool data_mmap) machine__synthesize_threads() argument
H A Devent.c188 * threads set parent pid to main thread. ie., assume main thread perf_event__synthesize_fork()
189 * spawns all threads in a process perf_event__synthesize_fork()
453 struct thread_map *threads, perf_event__synthesize_thread_map()
474 for (thread = 0; thread < threads->nr; ++thread) { perf_event__synthesize_thread_map()
477 threads->map[thread], 0, perf_event__synthesize_thread_map()
488 if ((int) comm_event->comm.pid != threads->map[thread]) { perf_event__synthesize_thread_map()
492 for (j = 0; j < threads->nr; ++j) { perf_event__synthesize_thread_map()
493 if ((int) comm_event->comm.pid == threads->map[j]) { perf_event__synthesize_thread_map()
452 perf_event__synthesize_thread_map(struct perf_tool *tool, struct thread_map *threads, perf_event__handler_t process, struct machine *machine, bool mmap_data) perf_event__synthesize_thread_map() argument
H A Dtarget.c2 * Helper functions for handling target threads/cpus
H A Dmachine.c30 machine->threads = RB_ROOT; machine__init()
94 struct rb_node *nd = rb_first(&machine->threads); machine__delete_threads()
343 struct rb_node **p = &machine->threads.rb_node; __machine__findnew_thread()
384 rb_insert_color(&th->rb_node, &machine->threads); __machine__findnew_thread()
395 rb_erase(&th->rb_node, &machine->threads); __machine__findnew_thread()
578 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { machine__fprintf()
1262 rb_erase(&th->rb_node, &machine->threads); machine__remove_thread()
1757 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { machine__for_each_thread()
1773 struct target *target, struct thread_map *threads, __machine__synthesize_threads()
1777 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap); __machine__synthesize_threads()
1772 __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, struct target *target, struct thread_map *threads, perf_event__handler_t process, bool data_mmap) __machine__synthesize_threads() argument
/linux-4.1.27/tools/perf/tests/
H A Dopen-syscall.c11 struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); test__open_syscall_event() local
14 if (threads == NULL) { test__open_syscall_event()
30 if (perf_evsel__open_per_thread(evsel, threads) < 0) { test__open_syscall_event()
55 perf_evsel__close_fd(evsel, 1, threads->nr); test__open_syscall_event()
59 thread_map__delete(threads); test__open_syscall_event()
H A Dmmap-thread-lookup.c26 static struct thread_data threads[THREADS]; variable in typeref:struct:thread_data
75 struct thread_data *td = &threads[i]; thread_create()
95 struct thread_data *td0 = &threads[0]; threads_create()
112 struct thread_data *td0 = &threads[0]; threads_destroy()
121 err = pthread_join(threads[i].pt, NULL); threads_destroy()
157 * The threads_create will not return before all threads mmap_events()
163 TEST_ASSERT_VAL("failed to create threads", !threads_create()); mmap_events()
174 TEST_ASSERT_VAL("failed to destroy threads", !threads_destroy()); mmap_events()
182 struct thread_data *td = &threads[i]; mmap_events()
209 * This test creates 'THREADS' number of threads (including
212 * When threads are created, we synthesize them with both
H A Dthread-mg-share.c23 * with several threads and checks they properly share and test__thread_mg_share()
33 /* create process with 4 threads */ test__thread_mg_share()
42 TEST_ASSERT_VAL("failed to create threads", test__thread_mg_share()
86 * because we've already released all the threads. test__thread_mg_share()
H A Dmmap-basic.c22 struct thread_map *threads; test__basic_mmap() local
36 threads = thread_map__new(-1, getpid(), UINT_MAX); test__basic_mmap()
37 if (threads == NULL) { test__basic_mmap()
63 perf_evlist__set_maps(evlist, cpus, threads); test__basic_mmap()
80 if (perf_evsel__open(evsels[i], cpus, threads) < 0) { test__basic_mmap()
143 threads = NULL;
147 thread_map__delete(threads);
H A Dkeep-tracking.c63 struct thread_map *threads = NULL; test__keep_tracking() local
70 threads = thread_map__new(-1, getpid(), UINT_MAX); test__keep_tracking()
71 CHECK_NOT_NULL__(threads); test__keep_tracking()
79 perf_evlist__set_maps(evlist, cpus, threads); test__keep_tracking()
148 thread_map__delete(threads); test__keep_tracking()
H A Dopen-syscall-all-cpus.c14 struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); test__open_syscall_event_on_all_cpus() local
17 if (threads == NULL) { test__open_syscall_event_on_all_cpus()
41 if (perf_evsel__open(evsel, cpus, threads) < 0) { test__open_syscall_event_on_all_cpus()
109 perf_evsel__close_fd(evsel, 1, threads->nr); test__open_syscall_event_on_all_cpus()
113 thread_map__delete(threads); test__open_syscall_event_on_all_cpus()
H A Dtask-exit.c56 * Create maps of threads and cpus to monitor. In this case test__task_exit()
57 * we start with all threads and cpus (-1, -1) but then in test__task_exit()
62 evlist->threads = thread_map__new_by_tid(-1); test__task_exit()
63 if (!evlist->cpus || !evlist->threads) { test__task_exit()
H A Dperf-time-to-tsc.c48 struct thread_map *threads = NULL; test__perf_time_to_tsc() local
60 threads = thread_map__new(-1, getpid(), UINT_MAX); test__perf_time_to_tsc()
61 CHECK_NOT_NULL__(threads); test__perf_time_to_tsc()
69 perf_evlist__set_maps(evlist, cpus, threads); test__perf_time_to_tsc()
H A Dsw-clock.c54 evlist->threads = thread_map__new_by_tid(getpid()); __test__sw_clock_freq()
55 if (!evlist->cpus || !evlist->threads) { __test__sw_clock_freq()
H A Dcode-reading.c402 struct thread_map *threads = NULL; do_test_code_reading() local
444 threads = thread_map__new_by_tid(pid); do_test_code_reading()
445 if (!threads) { do_test_code_reading()
450 ret = perf_event__synthesize_thread_map(NULL, threads, do_test_code_reading()
478 perf_evlist__set_maps(evlist, cpus, threads); do_test_code_reading()
543 thread_map__delete(threads); do_test_code_reading()
H A Dopen-syscall-tp-fields.c48 evlist->threads->map[0] = getpid(); test__syscall_open_tp_fields()
H A Dswitch-tracking.c321 struct thread_map *threads = NULL; test__switch_tracking() local
329 threads = thread_map__new(-1, getpid(), UINT_MAX); test__switch_tracking()
330 if (!threads) { test__switch_tracking()
347 perf_evlist__set_maps(evlist, cpus, threads); test__switch_tracking()
564 thread_map__delete(threads);
H A Dperf-record.c70 * Create maps of threads and cpus to monitor. In this case test__PERF_RECORD()
71 * we start with all threads and cpus (-1, -1) but then in test__PERF_RECORD()
/linux-4.1.27/arch/sparc/include/asm/
H A Dcpudata.h6 #include <linux/threads.h>
H A Dsmp_64.h9 #include <linux/threads.h>
H A Dptrace.h9 #include <linux/threads.h>
H A Dbackoff.h7 * When multiple threads compete on an atomic operation, it is
H A Dsmp_32.h9 #include <linux/threads.h>
/linux-4.1.27/arch/sh/include/asm/
H A Dhardirq.h4 #include <linux/threads.h>
H A Dmmu.h37 #include <linux/threads.h>
H A Dfixmap.h17 #include <linux/threads.h>
/linux-4.1.27/arch/powerpc/include/asm/
H A Dcputhreads.h7 * Mapping of threads to cores
10 * threads per core and the same number for each core in the system
11 * (though it would work if some processors had less threads as long
34 * @threads: a cpumask of threads
42 static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads) cpu_thread_mask_to_cores() argument
50 if (cpumask_intersects(threads, &tmp)) cpu_thread_mask_to_cores()
H A Dhardirq.h4 #include <linux/threads.h>
H A Ddbell.h16 #include <linux/threads.h>
H A Dfixmap.h21 #include <linux/threads.h>
H A Dirq.h13 #include <linux/threads.h>
H A Dpgalloc-32.h4 #include <linux/threads.h>
H A Dpaca.h114 /* Shared by all threads of a core -- points to tcd of first thread */
156 /* Per-core mask tracking idle threads and a lock bit-[L][TTTTTTTT] */
161 /* Mask to denote subcore sibling threads */
/linux-4.1.27/tools/virtio/virtio-trace/
H A Dtrace-agent-ctl.c2 * Controller of read/write threads for virtio-trace
65 /* Wakes rw-threads when they are sleeping */ wait_order()
89 * contol read/write threads by handling global_run_operation
117 * this controller wakes all read/write threads. rw_ctl_loop()
121 pr_debug("Wake up all read/write threads\n"); rw_ctl_loop()
125 * threads will wait for notification from Host. rw_ctl_loop()
128 pr_debug("Stop all read/write threads\n"); rw_ctl_loop()
H A Dtrace-agent.h15 * @rw_ti: structure managing information of read/write threads
43 /* use for stopping rw threads */
51 /* for controller of read/write threads */
H A Dtrace-agent.c65 /* read/write threads init */ agent_info_new()
158 /* init read/write threads */ agent_info_init()
178 /* init controller of read/write threads */ agent_info_init()
225 /* Start all read/write threads */ agent_main_loop()
231 /* Finish all read/write threads */ agent_main_loop()
/linux-4.1.27/tools/perf/bench/
H A Dfutex-wake.c4 * futex-wake: Block a bunch of threads on a futex and wake'em up, N at a time.
24 /* all threads will block on the same futex */
42 OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"),
43 OPT_UINTEGER('w', "nwakes", &nwakes, "Specify amount of threads to wake at once"),
73 printf("Wokeup %d of %d threads in %.4f ms (+-%.2f%%)\n", print_summary()
88 /* create and block all threads */ block_threads()
138 printf("Run summary [PID %d]: blocking on %d threads (at [%s] futex %p), " bench_futex_wake()
153 /* create, launch & block all threads */ bench_futex_wake()
156 /* make sure all threads are already blocked */ bench_futex_wake()
165 /* Ok, all threads are patiently blocked, start waking folks up */ bench_futex_wake()
176 printf("[Run %d]: Wokeup %d of %d threads in %.4f ms\n", bench_futex_wake()
H A Dsched-pipe.c46 OPT_BOOLEAN('T', "threaded", &threaded, "Specify threads/process based task setup"),
80 struct thread_data threads[2], *td; bench_sched_pipe() local
103 td = threads + t; bench_sched_pipe()
120 td = threads + t; bench_sched_pipe()
127 td = threads + t; bench_sched_pipe()
138 worker_thread(threads + 0); bench_sched_pipe()
141 worker_thread(threads + 1); bench_sched_pipe()
154 loops, threaded ? "threads" : "processes"); bench_sched_pipe()
H A Dfutex-requeue.c4 * futex-requeue: Block a bunch of threads on futex1 and requeue them
41 OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"),
42 OPT_UINTEGER('q', "nrequeue", &nrequeue, "Specify amount of threads to requeue at once"),
59 printf("Requeued %d of %d threads in %.4f ms (+-%.2f%%)\n", print_summary()
87 /* create and block all threads */ block_threads()
138 printf("Run summary [PID %d]: Requeuing %d threads (from [%s] %p to %p), " bench_futex_requeue()
153 /* create, launch & block all threads */ bench_futex_requeue()
156 /* make sure all threads are already blocked */ bench_futex_requeue()
165 /* Ok, all threads are patiently blocked, start requeueing */ bench_futex_requeue()
183 printf("[Run %d]: Requeued %d of %d threads in %.4f ms\n", bench_futex_requeue()
H A Dfutex-hash.c8 * many threads and futexes as possible.
45 OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"),
47 OPT_UINTEGER('f', "futexes", &nfutexes, "Specify amount of futexes per threads"),
93 /* inform all threads that we're done for the day */ toggle_done()
141 printf("Run summary [PID %d]: %d threads, each operating on %d [%s] futexes for %d secs.\n\n", bench_futex_hash()
H A Dnuma.c119 /* Global, read-writable area, accessible to all processes and threads: */
135 struct thread_data *threads; member in struct:global_info
155 OPT_INTEGER('t', "nr_threads" , &p0.nr_threads, "number of threads per process"),
404 * threads of this process, or only be accessed by this thread:
521 td = g->threads + t; parse_setup_cpu_list()
643 td = g->threads + t; parse_setup_node_list()
802 g->threads[task_nr].curr_cpu = cpu; update_curr_cpu()
809 * Count the number of nodes a process's threads
828 td = g->threads + task_nr; count_process_nodes()
846 * Count the number of distinct process-threads a node contains.
864 td = g->threads + task_nr; count_node_processes()
897 /* Strong convergence: all threads compress on a single node: */ calc_convergence_compression()
931 struct thread_data *td = g->threads + t; calc_convergence()
936 /* Not all threads have written it yet: */ calc_convergence()
1157 this_cpu = g->threads[task_nr].curr_cpu; worker_thread()
1208 * A worker process starts a couple of threads:
1228 td = g->threads + task_nr; worker_process()
1243 td = g->threads + task_nr; worker_process()
1288 ssize_t size = sizeof(*g->threads)*g->p.nr_tasks; init_thread_data()
1291 g->threads = zalloc_shared_data(size); init_thread_data()
1294 struct thread_data *td = g->threads + t; init_thread_data()
1309 ssize_t size = sizeof(*g->threads)*g->p.nr_tasks; deinit_thread_data()
1311 free_data(g->threads, size); deinit_thread_data()
1431 /* All threads try to acquire it, this way we can wait for them to start up: */ __bench_numa()
1455 /* Wait for all the threads to start up: */ __bench_numa()
1466 /* This will start all threads: */ __bench_numa()
1480 tprintf(" threads initialized in %.6f seconds.\n", startup_sec); __bench_numa()
1503 u64 thread_runtime_ns = g->threads[t].runtime_ns; __bench_numa()
H A Dsched-messaging.c309 num_fds, thread_mode ? "threads" : "processes"); bench_sched_messaging()
312 thread_mode ? "threads" : "processes"); bench_sched_messaging()
/linux-4.1.27/arch/ia64/include/asm/
H A Dhardirq.h17 #include <linux/threads.h>
H A Dpercpu.h16 #include <linux/threads.h>
H A Dnuma.h21 #include <linux/threads.h>
H A Dpgalloc.h20 #include <linux/threads.h>
H A Dswitch_to.h19 * Context switch from one thread to another. If the two threads have
H A Dsmp.h14 #include <linux/threads.h>
/linux-4.1.27/arch/ia64/kernel/
H A Dnr-irqs.c11 #include <linux/threads.h>
/linux-4.1.27/arch/m68k/include/asm/
H A Dhardirq.h4 #include <linux/threads.h>
/linux-4.1.27/net/rds/
H A DMakefile3 recv.o send.o stats.o sysctl.o threads.o transport.o \
/linux-4.1.27/tools/perf/python/
H A Dtwatch.py20 threads = perf.thread_map()
25 evsel.open(cpus = cpus, threads = threads);
26 evlist = perf.evlist(cpus, threads)
/linux-4.1.27/include/asm-generic/
H A Dhardirq.h5 #include <linux/threads.h>
/linux-4.1.27/include/linux/
H A Dthreads.h6 * The default limit for the nr of threads is now in
7 * /proc/sys/kernel/threads-max.
H A Dsignal.h303 * Kernel threads handle their own signals. Let the signal code allow_signal()
330 * terminate - kill the process, i.e. all threads in the group,
333 * coredump - write a core dump file describing all threads using
334 * the same mm and then kill all those threads
335 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
384 * When SIGCONT is sent, it resumes the process (all threads in the group)
H A Dpid_namespace.h8 #include <linux/threads.h>
H A Dkernel_stat.h5 #include <linux/threads.h>
H A Dpercpu_counter.h12 #include <linux/threads.h>
H A Dtorture.h29 #include <linux/threads.h>
H A Dcompletion.h17 * Completions currently use a FIFO to queue threads that have to wait for
H A Dkthread.h3 /* Simple interface for creating and stopping kernel threads without mess. */
H A Dirqdesc.h38 * @threads_oneshot: bitfield to handle shared oneshot threads
39 * @threads_active: number of irqaction threads currently running
/linux-4.1.27/include/uapi/linux/
H A Dwait.h11 #define __WNOTHREAD 0x20000000 /* Don't wait on children of other threads in this group */
/linux-4.1.27/arch/arm/include/asm/
H A Dhardirq.h5 #include <linux/threads.h>
H A Dsmp.h13 #include <linux/threads.h>
/linux-4.1.27/arch/metag/mm/
H A Dl2cache.c105 * Prevent other threads writing during the writeback, otherwise the meta_l2c_disable()
131 * threads are safe to continue executing, however we must not init the meta_l2c_enable()
133 * this operation should still be atomic with other threads. meta_l2c_enable()
157 * atomic with other threads. meta_l2c_pf_enable()
175 * Prevent other threads writing during the writeback. This also meta_l2c_flush()
/linux-4.1.27/tools/testing/selftests/timers/
H A Dthreadtest.c67 /* flag other threads */ checklist()
157 printf("using independent threads\n"); main()
162 printf(" -n: number of threads\n"); main()
163 printf(" -i: use independent threads\n"); main()
177 printf("Testing consistency with %i threads for %ld seconds: ", thread_count, runtime); main()
/linux-4.1.27/kernel/locking/
H A Drtmutex-tester.c36 static struct task_struct *threads[MAX_RT_TEST_THREADS]; variable in typeref:struct:task_struct
133 * Schedule replacement for rtsem_down(). Only called for threads with
146 if (threads[tid] == current) schedule_rt_mutex_test()
306 ret = sched_setscheduler(threads[tid], SCHED_NORMAL, &schedpar); sysfs_test_command()
314 ret = sched_setscheduler(threads[tid], SCHED_FIFO, &schedpar); sysfs_test_command()
320 send_sig(SIGHUP, threads[tid], 0); sysfs_test_command()
328 wake_up_process(threads[tid]); sysfs_test_command()
348 tsk = threads[td->dev.id]; sysfs_test_status()
383 threads[id] = kthread_run(test_func, &thread_data[id], "rt-test-%d", id); init_test_thread()
384 if (IS_ERR(threads[id])) init_test_thread()
385 return PTR_ERR(threads[id]); init_test_thread()
H A Drtmutex_common.h21 * threads to provoke lock stealing and testing of complex boosting scenarios.
/linux-4.1.27/block/
H A Dblk-mq-cpumap.c7 #include <linux/threads.h>
72 * threads per cores. Map sibling threads to the same for_each_possible_cpu()
/linux-4.1.27/arch/alpha/include/asm/
H A Dtopology.h5 #include <linux/threads.h>
H A Dsmp.h4 #include <linux/threads.h>
/linux-4.1.27/fs/nfsd/
H A Dstats.h23 unsigned int th_cnt; /* number of available threads */
25 * of available threads were in use */
H A Dstats.c13 * th <threads> <fullcnt> <10%-20%> <20%-30%> ... <90%-100%> <100%>
15 * and number of times that all threads were in use
H A Dnfsctl.c405 * write_threads - Start NFSD, or report the current number of running threads
413 * running NFSD threads;
422 * number of NFSD threads to start
428 * running NFSD threads;
455 * write_pool_threads - Set or report the current number of threads per pool
467 * threads to start in each pool
472 * number of NFSD threads in each pool;
478 /* if size > 0, look for an array of number of threads per node write_pool_threads()
479 * and apply them then write out number of threads per node as reply write_pool_threads()
494 * writing to the threads file but NOT the pool_threads write_pool_threads()
495 * file, sorry. Report zero threads. write_pool_threads()
1138 [NFSD_Threads] = {"threads", &transaction_ops, S_IWUSR|S_IRUSR}, nfsd_fill_super()
H A Dnfssvc.c38 * of nfsd threads must exist and each must listed in ->sp_all_threads in each
221 * threads is modified after nfsd start.) nfsd_startup_generic()
315 * any threads--if we get shut down before any threads are nfsd_last_thread()
474 /* enforce a global maximum number of threads */ nfsd_set_nrthreads()
513 * Adjust the number of threads and return the new number of threads.
H A Dnetns.h108 * to '0' which is means that it bases this on the number of threads.
/linux-4.1.27/tools/usb/
H A Dffs-test.c309 } threads[] = { variable in typeref:struct:thread
351 if (t != threads) { cleanup_thread()
618 init_thread(threads); main()
619 ep0_init(threads, legacy_descriptors); main()
621 for (i = 1; i < sizeof threads / sizeof *threads; ++i) main()
622 init_thread(threads + i); main()
624 for (i = 1; i < sizeof threads / sizeof *threads; ++i) main()
625 start_thread(threads + i); main()
627 start_thread_helper(threads); main()
629 for (i = 1; i < sizeof threads / sizeof *threads; ++i) main()
630 join_thread(threads + i); main()
/linux-4.1.27/mm/
H A Dvmacache.c9 * Flush vma caches for threads that share a given mm.
12 * exclusively and other threads accessing the vma cache will
26 * to worry about other threads' seqnum. Current's vmacache_flush_all()
H A Dballoon_compaction.c148 * compaction threads can race against page migration functions balloon_page_isolate()
160 * Prevent concurrent compaction threads from isolating balloon_page_isolate()
180 * concurrent isolation threads attempting to re-isolate it. balloon_page_putback()
/linux-4.1.27/arch/tile/include/asm/
H A Dhardirq.h18 #include <linux/threads.h>
H A Dswitch_to.h53 * Kernel threads can check to see if they need to migrate their
55 * threads, we defer until they are returning to user-space.
H A Dhighmem.h25 #include <linux/threads.h>
H A Dkgdb.h40 * Longer buffer is needed to list all threads.
H A Dfixmap.h24 #include <linux/threads.h>
/linux-4.1.27/arch/x86/include/asm/
H A Dhardirq.h4 #include <linux/threads.h>
H A Dpgtable_32.h18 #include <linux/threads.h>
H A Dkgdb.h12 * Longer buffer is needed to list all threads
H A Dhighmem.h24 #include <linux/threads.h>
H A Dpgtable_64.h15 #include <linux/threads.h>
/linux-4.1.27/arch/mn10300/mm/
H A Dcache.c14 #include <linux/threads.h>
H A Dcache-smp.c14 #include <linux/threads.h>
/linux-4.1.27/arch/parisc/include/asm/
H A Dhardirq.h11 #include <linux/threads.h>
H A Dsmp.h15 #include <linux/threads.h> /* for NR_CPUS */
H A Dtlbflush.h55 /* Except for very small threads, flushing the whole TLB is flush_tlb_mm()
/linux-4.1.27/arch/powerpc/kernel/
H A Ddbell.c15 #include <linux/threads.h>
H A Depapr_hcalls.S10 #include <linux/threads.h>
H A Didle_power4.S10 #include <linux/threads.h>
H A Dmachine_kexec_64.c222 * the device tree and assume primary threads are online and query secondary
223 * threads via RTAS to online them if required. If we don't online primary
224 * threads, they will be stuck. However, we also online secondary threads as we
226 * threads as offline -- and again, these CPUs will be stuck.
228 * So, we online all CPUs that should be running, including secondary threads.
H A Didle_book3e.S12 #include <linux/threads.h>
H A Didle_e500.S13 #include <linux/threads.h>
H A Didle_power7.S10 #include <linux/threads.h>
55 * Used by threads when the lock bit of core_idle_state is set.
193 common_enter: /* common code for all the threads entering sleep or winkle */
411 * Common to all threads.
H A Dpaca.c141 * On systems with hardware multi-threading, there are two threads allocate_slb_shadows()
177 /* For now -- if we have threads this will be adjusted later */ initialise_paca()
/linux-4.1.27/arch/blackfin/include/asm/
H A Dsmp.h12 #include <linux/threads.h>
H A Dcplbinit.h14 #include <linux/threads.h>
H A Dkgdb.h16 * Longer buffer is needed to list all threads.
/linux-4.1.27/arch/cris/include/asm/
H A Dpgalloc.h4 #include <linux/threads.h>
/linux-4.1.27/kernel/power/
H A Dprocess.c157 * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
159 * On success, returns 0. On failure, -errno and only the kernel threads are
203 /* No other threads should have PF_SUSPEND_TASK set */ for_each_process_thread()
224 pr_info("Restarting kernel threads ... "); thaw_kernel_threads()
H A Dswap.c428 /* Maximum number of threads for compression/decompression. */
489 unsigned run_threads; /* nr current threads */
593 * We'll limit the number of threads for compression to limit memory save_image_lzo()
624 * Start the compression threads. save_image_lzo()
636 "PM: Cannot start compression threads\n"); save_image_lzo()
1084 * We'll limit the number of threads for decompression to limit memory load_image_lzo()
1115 * Start the decompression threads. load_image_lzo()
1127 "PM: Cannot start decompression threads\n"); load_image_lzo()
H A Duser.c279 * It is necessary to thaw kernel threads here, because snapshot_ioctl()
281 * SNAPSHOT_FREE. In that case, if kernel threads were not snapshot_ioctl()
/linux-4.1.27/lib/
H A Dsmp_processor_id.c22 * Kernel threads bound to a single CPU can safely use check_preemption_disabled()
/linux-4.1.27/arch/powerpc/platforms/powernv/
H A Dsubcore.c44 * threads is as follows:
86 * unsplit while all other threads NAP.
89 * the hardware that if all threads except 0 are napping, the hardware should
92 * Non-zero threads are sent to a NAP loop, they don't exit the loop until they
95 * Core 0 spins waiting for the hardware to see all the other threads napping
98 * Once thread 0 sees the unsplit, it IPIs the secondary threads to wake them
113 * To begin with secondary threads are sent to an assembly routine. There they
414 * We need all threads in a core to be present to split/unsplit so subcore_init()
/linux-4.1.27/tools/perf/scripts/python/
H A Dexport-to-postgresql.py108 do_query(query, 'CREATE TABLE threads ('
205 '(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
206 '(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
299 copy_output_file(thread_file, "threads")
327 do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)')
339 do_query(query, 'ALTER TABLE threads '
341 'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)')
344 'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id)')
352 'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),'
363 'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),'
H A Dsched-migration.py29 threads = { 0 : "idle"} variable
32 return "%s:%d" % (threads[pid], pid)
342 threads[prev_pid] = prev_comm
343 threads[next_pid] = next_comm
/linux-4.1.27/drivers/staging/lustre/lustre/include/
H A Dlustre_net.h144 * Constants determine how threads are created for ptlrpc service.
146 * ?_NTHRS_INIT # threads to create for each service partition on
149 * threads for the service while initializing.
150 * ?_NTHRS_BASE # threads should be created at least for each
152 * It's the low-water mark of threads upper-limit
154 * ?_THR_FACTOR # threads can be added on threads upper-limit for
158 * ?_NTHRS_MAX # overall threads can be created for a service,
162 * has ?_NTHRS_BASE threads, which means total threads
176 * partition has 4 cores, then actual number of service threads on each
180 * Total number of threads for the service is:
186 * partition has 8 cores, then actual number of service threads on each
190 * Total number of threads for the service is:
196 * partition has 12 cores, then actual number of service threads on each
200 * Total number of threads for the service is:
204 * as upper limit of threads number for each partition:
213 * to guarantee that each partition has at least MDS_NTHRS_BASE(64) threads
214 * to keep service healthy, so total number of threads will just be 2048.
224 * b) bind service threads on a few partitions, see modparameters of
234 * LDLM threads constants:
236 * Given 8 as factor and 24 as base threads number
239 * On 4-core machine we will have 24 + 8 * 4 = 56 threads.
243 * threads for each partition and total threads number will be 112.
247 * threads for each partition to keep service healthy, so total threads
250 * So with these constants, threads number will be at the similar level
1797 * List of active threads in svc->srv_threads
1895 * notifies wakes one of the service threads to process new incoming request.
1977 /** threads # should be created for each partition on initializing */
1979 /** limit of threads number for each partition */
2050 /** # of starting threads */
2052 /** # of stopping threads, reserved for shrinking threads */
2054 /** # running threads */
2056 /** service threads list */
2062 * threads starting & stopping are also protected by this lock.
2082 * all threads sleep on this. This wait-queue is signalled when new
2470 /* threads increasing factor for each CPU */
2472 /* service threads # to start on each partition while initializing */
2475 * low water of threads # upper-limit on each partition while running,
2476 * service availability may be impacted if threads number is lower
2481 /* "soft" limit for total threads number */
2483 /* user specified threads number, it will be validated due to
2486 /* set NUMA node affinity for service threads */
2905 /* all ptlrpcd threads are free mode */
2907 /* all ptlrpcd threads are bound mode */
2913 * If kernel supports NUMA, pthrpcd threads are binded and
/linux-4.1.27/drivers/staging/lustre/lustre/libcfs/linux/
H A Dlinux-prim.c51 * waiting threads, which is not always desirable because all threads will
54 * be polluted by different threads.
/linux-4.1.27/arch/s390/appldata/
H A Dappldata_os.c69 u32 nr_running; /* number of runnable threads */
70 u32 nr_threads; /* number of threads */
75 u32 nr_iowait; /* number of blocked threads
/linux-4.1.27/include/drm/ttm/
H A Dttm_execbuf_util.h77 * If the function detects a deadlock due to multiple threads trying to
78 * reserve the same buffers in reverse order, all threads except one will
80 * CPU write reservations to be cleared, and for other threads to
/linux-4.1.27/arch/mips/netlogic/common/
H A Dreset.S80 * in this region. Called from all HW threads.
90 * L1D cache has to be flushed before enabling threads in XLP.
212 * Wake up sibling threads from the initial thread in a core.
215 /* core L1D flush before enable threads */
220 /* Enable hw threads by writing to MAP_THREADMODE of the core */
240 * when running 4 threads per core
H A Dsmpboot.S57 /* Called by the boot cpu to wake up its sibling threads */
59 /* CPU register contents lost when enabling threads, save them first */
/linux-4.1.27/arch/um/os-Linux/
H A Dutil.c79 * has no effect within UML's kernel threads.
98 * UML helper threads must not handle SIGWINCH/INT/TERM
/linux-4.1.27/arch/metag/kernel/perf/
H A Dperf_event.h25 * itself; each counter can be assigned to multiple hardware threads at any
28 * threads' events, regardless of the thread selected.
/linux-4.1.27/arch/mips/include/asm/
H A Dcdmm.h37 * Any CPU pinned threads/timers should be disabled.
39 * CPU pinned threads/timers can be restarted.
H A Dfixmap.h19 #include <linux/threads.h>
H A Dsmp.h17 #include <linux/threads.h>
/linux-4.1.27/arch/powerpc/platforms/cell/
H A Dsmp.c63 * At boot time, there is nothing to do for primary threads which were
150 /* Mark threads which are still spinning in hold loops. */ smp_init_cell()
/linux-4.1.27/drivers/oprofile/
H A Doprofile_stats.c13 #include <linux/threads.h>
/linux-4.1.27/arch/x86/kernel/apic/
H A Dprobe_64.c11 #include <linux/threads.h>
H A Dx2apic_phys.c1 #include <linux/threads.h>
H A Dapic_noop.c12 #include <linux/threads.h>
H A Dbigsmp_32.c6 #include <linux/threads.h>
/linux-4.1.27/arch/um/include/asm/
H A Dfixmap.h8 #include <linux/threads.h>
/linux-4.1.27/arch/um/kernel/
H A Dtime.c10 #include <linux/threads.h>
/linux-4.1.27/arch/metag/include/asm/
H A Dtlbflush.h35 /* flush TLB entries for all hardware threads */ __flush_tlb()
H A Dfixmap.h18 #include <linux/threads.h>
H A Dpgalloc.h4 #include <linux/threads.h>
H A Dbarrier.h61 * external reordering of writes before the fence on other threads with writes
H A Dglobal_lock.h85 * This immediately allows other hardware threads to continue executing and one
/linux-4.1.27/arch/microblaze/include/asm/
H A Dfixmap.h25 #include <linux/threads.h>
H A Dtlbflush.h17 #include <linux/threads.h>
/linux-4.1.27/include/linux/sched/
H A Dprio.h16 * user-space. This allows kernel threads to set their
/linux-4.1.27/arch/mn10300/include/asm/
H A Dhardirq.h15 #include <linux/threads.h>
H A Dkgdb.h18 * Longer buffer is needed to list all threads
H A Dpgalloc.h15 #include <linux/threads.h>
H A Dsmp.h25 #include <linux/threads.h>
/linux-4.1.27/arch/mn10300/kernel/
H A Dmn10300-watchdog-low.S19 #include <linux/threads.h>
/linux-4.1.27/arch/arm/vfp/
H A Dentry.S23 @ r10 = this threads thread_info structure
/linux-4.1.27/arch/arm64/include/asm/
H A Dhardirq.h20 #include <linux/threads.h>
H A Dsmp.h19 #include <linux/threads.h>
H A Dcpu_ops.h20 #include <linux/threads.h>
/linux-4.1.27/arch/c6x/include/asm/
H A Dirq.h17 #include <linux/threads.h>
/linux-4.1.27/arch/arm/mach-shmobile/
H A Dheadsmp.S15 #include <linux/threads.h>
/linux-4.1.27/include/linux/sunrpc/
H A Dsvc.h39 * Pool of threads and temporary sockets. Generally there is only
49 unsigned int sp_nrthreads; /* # of threads in pool */
50 struct list_head sp_all_threads; /* all server threads */
63 * a list of idle threads waiting for input.
71 unsigned int sv_nrthreads; /* # of server threads */
74 * on number of threads. */
96 * adding threads */
97 svc_thread_fn sv_function; /* main function for threads */
112 * change the number of threads. Horrible, but there it is.
223 struct list_head rq_all; /* all threads list */
/linux-4.1.27/arch/powerpc/mm/
H A Dicswx.c109 * If this is a threaded process then there might be other threads use_cop()
149 * If this is a threaded process then there might be other threads drop_cop()
235 * the threads (see smp_call_function(sync_cop, mm, 1)), but acop_handle_fault()
237 * of threads. acop_handle_fault()
239 * Given the number of threads on some of these systems, acop_handle_fault()
/linux-4.1.27/drivers/crypto/qat/qat_dh895xcc/
H A Dadf_hw_arbiter.c115 /* Map worker threads to service arbiters */ adf_init_arb()
152 /* Unmap worker threads to service arbiters */ adf_exit_arb()
/linux-4.1.27/tools/power/cpupower/utils/helpers/
H A Dhelpers.h106 /* Amount of CPU cores, packages and threads per core in the system */
109 unsigned int threads; /* per core */ member in struct:cpupower_topology
/linux-4.1.27/arch/xtensa/include/asm/
H A Dfixmap.h18 #include <linux/threads.h>
/linux-4.1.27/arch/xtensa/kernel/
H A Dprocess.c160 * childregs are not used for the kernel threads.
174 * a2, a3 are unused for userspace threads,
175 * a2 points to thread_fn, a3 holds thread_fn arg for kernel threads.
183 * the two threads (parent and child) will overflow the same frames onto the
/linux-4.1.27/arch/sh/mm/
H A Dcache-sh3.c13 #include <linux/threads.h>
/linux-4.1.27/arch/tile/include/uapi/asm/
H A Dcachectl.h24 * then allow arbitrary other threads in the same address space to see
/linux-4.1.27/arch/microblaze/kernel/
H A Dprom.c21 #include <linux/threads.h>
/linux-4.1.27/drivers/usb/usbip/
H A Dstub_dev.c53 * is used to transfer usbip requests by kernel threads. -1 is a magic number
171 /* 1. stop threads */ stub_shutdown_connection()
184 * tcp_socket is freed after threads are killed so that usbip_xmit does stub_shutdown_connection()
439 * NOTE: rx/tx threads are invoked for each usb_device. stub_disconnect()
/linux-4.1.27/arch/score/include/asm/
H A Dprocessor.h5 #include <linux/threads.h>
/linux-4.1.27/arch/openrisc/include/asm/
H A Dpgalloc.h23 #include <linux/threads.h>
/linux-4.1.27/arch/powerpc/boot/
H A Dps3-head.S34 * The PS3 has a single processor with two threads.
/linux-4.1.27/arch/m32r/include/asm/
H A Dsmp.h9 #include <linux/threads.h>
/linux-4.1.27/kernel/
H A Dfork.c92 * Minimum number of threads to boot the kernel
97 * Maximum number of threads
105 int nr_threads; /* The idle threads do not count.. */
272 u64 threads; set_max_threads() local
275 * The number of threads shall be limited such that the thread set_max_threads()
279 threads = MAX_THREADS; set_max_threads()
281 threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE, set_max_threads()
284 if (threads > max_threads_suggested) set_max_threads()
285 threads = max_threads_suggested; set_max_threads()
287 max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS); set_max_threads()
1167 * all threads in the group. Holding cred_guard_mutex is not copy_seccomp()
1256 * Thread groups must share signals as well, and detached threads copy_process()
1323 * If multiple threads are within copy_process(), then this check copy_process()
2067 int threads = max_threads; sysctl_max_threads() local
2072 t.data = &threads; sysctl_max_threads()
2080 set_max_threads(threads); sysctl_max_threads()
H A Dsmpboot.c63 * idle_threads_init - Initialize idle threads for all cpus
261 /* We need to destroy also the parked threads of offline cpus */ for_each_possible_cpu()
277 * Creates and starts the threads on all online cpus.
306 * Stops all threads on all possible cpus.
/linux-4.1.27/arch/metag/tbx/
H A Dtbisoft.S10 * Support for soft threads and soft context switches
59 * Software syncronous context switch between soft threads, save only the
63 * A1GbP is global to all soft threads so not virtualised
/linux-4.1.27/arch/mips/netlogic/xlp/
H A Dwakeup.c36 #include <linux/threads.h>
193 /* spin until the hw threads sets their ready */ xlp_enable_secondary_cores()
204 * first wakeup core 0 threads xlp_wakeup_secondary_cpus()
/linux-4.1.27/arch/powerpc/kvm/
H A Dbook3s_hv_builtin.c207 /* On POWER8 for IPIs to threads in the same core, use msgsnd */ kvmhv_rm_send_ipi()
243 /* Set our bit in the threads-exiting-guest map in the 0xff00 kvmhv_commence_exit()
255 * Trigger the other threads in this vcore to exit the guest. kvmhv_commence_exit()
/linux-4.1.27/drivers/staging/lustre/lustre/ptlrpc/
H A Dservice.c216 /* # of started threads */
218 /* # of stopped threads */
224 /* total number of threads on this partition */
226 /* threads table */
297 * dedicated reply handling threads.
497 * Common code for estimating & validating threads number. ptlrpc_server_nthreads_check()
500 * get the threads number they give it in conf::tc_nthrs_user ptlrpc_server_nthreads_check()
501 * even they did set. It's because we need to validate threads ptlrpc_server_nthreads_check()
503 * threads to keep the service healthy. ptlrpc_server_nthreads_check()
514 * threads, we give a less strict check here, it can ptlrpc_server_nthreads_check()
524 /* don't care about base threads number per partition, ptlrpc_server_nthreads_check()
548 * User wants to increase number of threads with for ptlrpc_server_nthreads_check()
550 * one thread/core because service threads are supposed to ptlrpc_server_nthreads_check()
556 * have too many threads no matter how many cores/HTs ptlrpc_server_nthreads_check()
582 CDEBUG(D_OTHER, "%s: This service may have more threads (%d) than the given soft limit (%d)\n", ptlrpc_server_nthreads_check()
679 * This includes starting serving threads , allocating and posting rqbds and
814 CERROR("Failed to start threads for service %s: %d\n", ptlrpc_register_service()
1625 * already being processed (i.e. those threads can service more high-priority
1626 * requests), or if there are enough idle threads that a later thread can do
2051 * HRT threads and further commit callbacks by checking rs_committed ptlrpc_handle_rs()
2167 * allowed to create more threads
2180 * too many requests and allowed to create more threads
2252 * Main thread body for service threads.
2255 * is woken up and one of the threads will handle it.
2270 /* NB: we will call cfs_cpt_bind() for all threads, because we ptlrpc_main()
2562 CDEBUG(D_INFO, "Stopping threads for service %s\n", ptlrpc_svcpt_stop_threads()
2604 * Stops all threads of a particular service \a svc
2624 /* We require 2 threads min, see note in ptlrpc_server_handle_request */ ptlrpc_start_threads()
2635 /* We have enough threads, don't start more. b=15759 */ ptlrpc_start_threads()
2942 * all unlinked) and no service threads, so I'm the only ptlrpc_service_for_each_part()
H A Dptlrpcd.c84 MODULE_PARM_DESC(ptlrpcd_bind_policy, "Ptlrpcd threads binding mode.");
328 * work from our partner threads. */ ptlrpcd_check()
453 * ptlrpcd threads. We also want to reduce the ptlrpcd overhead caused by
455 * CPU core. But binding all ptlrpcd threads maybe cause response delay
467 * compromise: divide the ptlrpcd threads pool into two parts. One part is
469 * core. The other part is for free mode, all the ptlrpcd threads in the
553 * setup partnership only with ptlrpcd threads ptlrpcd_bind()
/linux-4.1.27/kernel/sched/
H A Dwait.c80 * __wake_up - wake up threads blocked on a waitqueue.
82 * @mode: which threads
83 * @nr_exclusive: how many wake-one or wake-many threads to wake up
116 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
118 * @mode: which threads
119 * @nr_exclusive: how many wake-one or wake-many threads to wake up
124 * be migrated to another CPU - ie. the two threads are 'synchronized'
H A Dcompletion.c6 * interface also makes it easy to 'complete' multiple waiting threads,
41 * complete_all: - signals all threads waiting on this completion
44 * This will wake up all threads waiting on this particular completion event.
/linux-4.1.27/drivers/md/
H A Ddm-bufio.h38 * - Each other threads can hold at most one buffer.
/linux-4.1.27/drivers/dma/
H A Ddmatest.c42 "Number of threads to start per channel (default: 1)");
82 * @threads_per_chan: number of threads to start per channel
165 struct list_head threads; member in struct:dmatest_chan
178 list_for_each_entry(thread, &dtc->threads, node) { is_threaded_test_run()
388 * kthread_stop(). There may be multiple threads running this function
725 list_for_each_entry_safe(thread, _thread, &dtc->threads, node) { dmatest_cleanup_channel()
780 list_add_tail(&thread->node, &dtc->threads); dmatest_add_threads()
802 INIT_LIST_HEAD(&dtc->threads); dmatest_add_channel()
817 pr_info("Started %u threads using %s\n", dmatest_add_channel()
/linux-4.1.27/arch/sparc/kernel/
H A Ddevices.c11 #include <linux/threads.h>
H A Dwindows.c108 /* Try to push the windows in a threads window buffer to the
/linux-4.1.27/arch/tile/kernel/
H A Dproc.c17 #include <linux/threads.h>
/linux-4.1.27/arch/s390/include/asm/
H A Dpgalloc.h14 #include <linux/threads.h>
/linux-4.1.27/arch/mips/kernel/
H A Dprocess.c613 /* Prevent any threads from obtaining live FP context */ mips_set_process_fp_mode()
618 * If there are multiple online CPUs then wait until all threads whose mips_set_process_fp_mode()
646 * There are now no threads of the process with live FP context, so it
665 /* Allow threads to use FP again */
/linux-4.1.27/arch/mips/netlogic/xlr/
H A Dwakeup.c36 #include <linux/threads.h>
/linux-4.1.27/net/sunrpc/
H A Dsvc.c8 * Multiple threads pools and NUMAisation
542 printk("svc_destroy: no threads for serv=%p!\n", serv); svc_destroy()
698 * Create or destroy enough new threads to make the number
699 * of threads the given number. If `pool' is non-NULL, applies
700 * only to threads in that pool, otherwise round-robins between
704 * Destroying threads relies on the service threads filling in
730 /* create new threads */ svc_set_num_threads()
759 /* destroy old threads */ svc_set_num_threads()
H A Dsvc_xprt.c565 * on the number of threads
580 "number of threads"); svc_check_conn_limits()
684 /* As there is a shortage of threads and this request svc_get_next_xprt()
990 * We expect svc_close_xprt() to work even when no threads are svc_close_xprt()
992 * any threads), so if the transport isn't busy, we delete svc_close_xprt()
1050 * Server threads may still be running (especially in the case where the
1055 * the close. In the case there are no such other threads,
1056 * threads running, svc_clean_up_xprts() does a simple version of a
1058 * threads, we may need to wait a little while and then check again to
1339 seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n"); svc_pool_stats_show()
/linux-4.1.27/samples/kprobes/
H A Dkretprobe_example.c44 return 1; /* Skip kernel threads */ entry_handler()
/linux-4.1.27/drivers/staging/lustre/lnet/klnds/o2iblnd/
H A Do2iblnd_modparams.c55 /* Number of threads in each scheduler pool which is percpt,
59 MODULE_PARM_DESC(nscheds, "number of threads in each scheduler pool");
/linux-4.1.27/arch/um/drivers/
H A Dchan_user.c195 * These are synchronization calls between various UML threads on the winch_thread()
196 * host - since they are not different kernel threads, we cannot use winch_thread()
/linux-4.1.27/include/uapi/linux/android/
H A Dbinder.h247 * threads waiting to service incoming transactions. When a process
328 * of looping threads it has available.
/linux-4.1.27/arch/powerpc/platforms/pseries/
H A Dsmp.c89 * At boot time, there is nothing to do for primary threads which were
236 * Mark threads which are still spinning in hold loops smp_init_pseries()
/linux-4.1.27/arch/arm64/kernel/
H A Dtopology.c81 pr_err("%s: Core has both threads and CPU\n", parse_core()
255 /* Multiprocessor system : Multi-threads per core */ store_cpu_topology()
/linux-4.1.27/drivers/staging/lustre/lnet/klnds/socklnd/
H A Dsocklnd.h76 int ksi_nthreads_max; /* max allowed threads */
77 int ksi_nthreads; /* number of threads */
99 /* # scheduler threads in each pool while starting */
153 int ksnd_nthreads; /* # live threads */
154 int ksnd_shuttingdown; /* tell threads to exit */
/linux-4.1.27/arch/x86/kernel/cpu/
H A Dperf_event.h120 * Used to coordinate shared registers between HT threads or
125 int refcnt; /* per-core: #HT threads */
131 INTEL_EXCL_SHARED = 1, /* counter can be used by both threads */
151 int refcnt; /* per-core: #HT threads */
400 * a PMU and sometimes between PMU of sibling HT threads.
/linux-4.1.27/drivers/acpi/acpica/
H A Ddsmethod.c736 * 2) There are other threads executing the method, in which case we acpi_ds_terminate_control_method()
773 /* Are there any other threads currently executing this method? */ acpi_ds_terminate_control_method()
777 * Additional threads. Do not release the owner_id in this case, acpi_ds_terminate_control_method()
781 "*** Completed execution of one thread, %u threads remaining\n", acpi_ds_terminate_control_method()
824 /* No more threads, we can free the owner_id */ acpi_ds_terminate_control_method()

Completed in 2890 milliseconds

1234