/linux-4.1.27/tools/perf/util/ |
H A D | thread_map.c | 25 struct thread_map *threads; thread_map__new_by_pid() local 36 threads = malloc(sizeof(*threads) + sizeof(pid_t) * items); thread_map__new_by_pid() 37 if (threads != NULL) { thread_map__new_by_pid() 39 threads->map[i] = atoi(namelist[i]->d_name); thread_map__new_by_pid() 40 threads->nr = items; thread_map__new_by_pid() 47 return threads; thread_map__new_by_pid() 52 struct thread_map *threads = malloc(sizeof(*threads) + sizeof(pid_t)); thread_map__new_by_tid() local 54 if (threads != NULL) { thread_map__new_by_tid() 55 threads->map[0] = tid; thread_map__new_by_tid() 56 threads->nr = 1; thread_map__new_by_tid() 59 return threads; thread_map__new_by_tid() 68 struct thread_map *threads = malloc(sizeof(*threads) + thread_map__new_by_uid() local 70 if (threads == NULL) thread_map__new_by_uid() 77 threads->nr = 0; thread_map__new_by_uid() 101 while (threads->nr + items >= max_threads) { thread_map__new_by_uid() 109 tmp = realloc(threads, (sizeof(*threads) + thread_map__new_by_uid() 114 threads = tmp; thread_map__new_by_uid() 118 threads->map[threads->nr + i] = atoi(namelist[i]->d_name); thread_map__new_by_uid() 124 threads->nr += items; thread_map__new_by_uid() 130 return threads; thread_map__new_by_uid() 133 free(threads); thread_map__new_by_uid() 142 zfree(&threads); thread_map__new_by_uid() 159 struct thread_map *threads = NULL, *nt; thread_map__new_by_pid_str() local 188 nt = realloc(threads, (sizeof(*threads) + strlist__for_each() 193 threads = nt; strlist__for_each() 196 threads->map[j++] = atoi(namelist[i]->d_name); strlist__for_each() 199 threads->nr = total_tasks; strlist__for_each() 205 return threads; 213 zfree(&threads); 219 struct thread_map *threads = malloc(sizeof(*threads) + sizeof(pid_t)); thread_map__new_dummy() local 221 if (threads != NULL) { thread_map__new_dummy() 222 threads->map[0] = -1; thread_map__new_dummy() 223 threads->nr = 1; thread_map__new_dummy() 225 return threads; thread_map__new_dummy() 230 struct thread_map *threads = NULL, *nt; thread_map__new_by_tid_str() local 237 /* perf-stat expects threads to be generated even if tid not given */ thread_map__new_by_tid_str() 256 nt = realloc(threads, sizeof(*threads) + sizeof(pid_t) * ntasks); strlist__for_each() 261 threads = nt; strlist__for_each() 262 threads->map[ntasks - 1] = tid; strlist__for_each() 263 threads->nr = ntasks; strlist__for_each() 266 return threads; 269 zfree(&threads); 285 void thread_map__delete(struct thread_map *threads) thread_map__delete() argument 287 free(threads); thread_map__delete() 290 size_t thread_map__fprintf(struct thread_map *threads, FILE *fp) thread_map__fprintf() argument 294 threads->nr, threads->nr > 1 ? "s" : ""); thread_map__fprintf() 295 for (i = 0; i < threads->nr; ++i) thread_map__fprintf() 296 printed += fprintf(fp, "%s%d", i ? ", " : "", threads->map[i]); thread_map__fprintf()
|
H A D | thread_map.h | 21 void thread_map__delete(struct thread_map *threads); 23 size_t thread_map__fprintf(struct thread_map *threads, FILE *fp); 25 static inline int thread_map__nr(struct thread_map *threads) thread_map__nr() argument 27 return threads ? threads->nr : 1; thread_map__nr()
|
H A D | values.c | 13 die("failed to allocate read_values threads arrays"); perf_read_values_init() 14 values->threads = 0; perf_read_values_init() 33 for (i = 0; i < values->threads; i++) perf_read_values_destroy() 54 die("failed to enlarge read_values threads arrays"); perf_read_values__enlarge_threads() 62 for (i = 0; i < values->threads; i++) perf_read_values__findnew_thread() 66 if (values->threads == values->threads_max) perf_read_values__findnew_thread() 69 i = values->threads++; perf_read_values__findnew_thread() 91 for (i = 0; i < values->threads; i++) { perf_read_values__enlarge_counters() 144 for (i = 0; i < values->threads; i++) { perf_read_values__display_pretty() 165 for (i = 0; i < values->threads; i++) { perf_read_values__display_pretty() 188 for (i = 0; i < values->threads; i++) { perf_read_values__display_raw() 204 for (i = 0; i < values->threads; i++) { perf_read_values__display_raw() 216 for (i = 0; i < values->threads; i++) perf_read_values__display_raw()
|
H A D | values.h | 7 int threads; member in struct:perf_read_values
|
H A D | evlist.c | 36 struct thread_map *threads) perf_evlist__init() 43 perf_evlist__set_maps(evlist, cpus, threads); perf_evlist__init() 118 thread_map__delete(evlist->threads); perf_evlist__delete() 120 evlist->threads = NULL; perf_evlist__delete() 280 return thread_map__nr(evlist->threads); perf_evlist__nr_threads() 413 int nr_threads = thread_map__nr(evlist->threads); perf_evlist__alloc_pollfd() 541 if (!evsel->system_wide && evlist->threads && thread >= 0) perf_evlist__set_sid_idx() 542 sid->tid = evlist->threads->map[thread]; perf_evlist__set_sid_idx() 754 evlist->nr_mmaps = thread_map__nr(evlist->threads); perf_evlist__alloc_mmap() 850 int nr_threads = thread_map__nr(evlist->threads); perf_evlist__mmap_per_cpu() 875 int nr_threads = thread_map__nr(evlist->threads); perf_evlist__mmap_per_thread() 1000 const struct thread_map *threads = evlist->threads; perf_evlist__mmap() local 1019 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0) evlist__for_each() 1031 evlist->threads = thread_map__new_str(target->pid, target->tid, perf_evlist__create_maps() 1034 if (evlist->threads == NULL) perf_evlist__create_maps() 1048 thread_map__delete(evlist->threads); perf_evlist__create_maps() 1049 evlist->threads = NULL; perf_evlist__create_maps() 1058 nthreads = thread_map__nr(evlist->threads); perf_evlist__apply_filters() 1079 nthreads = thread_map__nr(evlist->threads); perf_evlist__set_filter() 1247 int nthreads = thread_map__nr(evlist->threads); perf_evlist__close() 1273 evlist->threads = thread_map__new_dummy(); perf_evlist__create_syswide_maps() 1274 if (evlist->threads == NULL) perf_evlist__create_syswide_maps() 1292 * Default: one fd per CPU, all threads, aka systemwide perf_evlist__open() 1295 if (evlist->threads == NULL && evlist->cpus == NULL) { perf_evlist__open() 1304 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads); evlist__for_each() 1398 if (evlist->threads == NULL) { perf_evlist__prepare_workload() 1399 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n", perf_evlist__prepare_workload() 1403 evlist->threads->map[0] = evlist->workload.pid; perf_evlist__prepare_workload() 35 perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, struct thread_map *threads) perf_evlist__init() argument
|
H A D | python.c | 435 struct thread_map *threads; member in struct:pyrf_thread_map 448 pthreads->threads = thread_map__new(pid, tid, uid); pyrf_thread_map__init() 449 if (pthreads->threads == NULL) pyrf_thread_map__init() 456 thread_map__delete(pthreads->threads); pyrf_thread_map__delete() 464 return pthreads->threads->nr; pyrf_thread_map__length() 471 if (i >= pthreads->threads->nr) pyrf_thread_map__item() 474 return Py_BuildValue("i", pthreads->threads->map[i]); pyrf_thread_map__item() 623 struct thread_map *threads = NULL; pyrf_evsel__open() local 626 static char *kwlist[] = { "cpus", "threads", "group", "inherit", NULL }; pyrf_evsel__open() 633 threads = ((struct pyrf_thread_map *)pthreads)->threads; pyrf_evsel__open() 643 if (perf_evsel__open(evsel, cpus, threads) < 0) { pyrf_evsel__open() 692 struct thread_map *threads; pyrf_evlist__init() local 697 threads = ((struct pyrf_thread_map *)pthreads)->threads; pyrf_evlist__init() 699 perf_evlist__init(&pevlist->evlist, cpus, threads); pyrf_evlist__init()
|
H A D | evlist.h | 51 struct thread_map *threads; member in struct:perf_evlist 65 struct thread_map *threads); 148 struct thread_map *threads) perf_evlist__set_maps() 151 evlist->threads = threads; perf_evlist__set_maps() 146 perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus, struct thread_map *threads) perf_evlist__set_maps() argument
|
H A D | machine.h | 32 struct rb_root threads; member in struct:machine 213 struct target *target, struct thread_map *threads, 217 struct thread_map *threads, bool data_mmap) machine__synthesize_threads() 219 return __machine__synthesize_threads(machine, NULL, target, threads, machine__synthesize_threads() 216 machine__synthesize_threads(struct machine *machine, struct target *target, struct thread_map *threads, bool data_mmap) machine__synthesize_threads() argument
|
H A D | event.c | 188 * threads set parent pid to main thread. ie., assume main thread perf_event__synthesize_fork() 189 * spawns all threads in a process perf_event__synthesize_fork() 453 struct thread_map *threads, perf_event__synthesize_thread_map() 474 for (thread = 0; thread < threads->nr; ++thread) { perf_event__synthesize_thread_map() 477 threads->map[thread], 0, perf_event__synthesize_thread_map() 488 if ((int) comm_event->comm.pid != threads->map[thread]) { perf_event__synthesize_thread_map() 492 for (j = 0; j < threads->nr; ++j) { perf_event__synthesize_thread_map() 493 if ((int) comm_event->comm.pid == threads->map[j]) { perf_event__synthesize_thread_map() 452 perf_event__synthesize_thread_map(struct perf_tool *tool, struct thread_map *threads, perf_event__handler_t process, struct machine *machine, bool mmap_data) perf_event__synthesize_thread_map() argument
|
H A D | target.c | 2 * Helper functions for handling target threads/cpus
|
H A D | machine.c | 30 machine->threads = RB_ROOT; machine__init() 94 struct rb_node *nd = rb_first(&machine->threads); machine__delete_threads() 343 struct rb_node **p = &machine->threads.rb_node; __machine__findnew_thread() 384 rb_insert_color(&th->rb_node, &machine->threads); __machine__findnew_thread() 395 rb_erase(&th->rb_node, &machine->threads); __machine__findnew_thread() 578 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { machine__fprintf() 1262 rb_erase(&th->rb_node, &machine->threads); machine__remove_thread() 1757 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { machine__for_each_thread() 1773 struct target *target, struct thread_map *threads, __machine__synthesize_threads() 1777 return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap); __machine__synthesize_threads() 1772 __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, struct target *target, struct thread_map *threads, perf_event__handler_t process, bool data_mmap) __machine__synthesize_threads() argument
|
/linux-4.1.27/tools/perf/tests/ |
H A D | open-syscall.c | 11 struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); test__open_syscall_event() local 14 if (threads == NULL) { test__open_syscall_event() 30 if (perf_evsel__open_per_thread(evsel, threads) < 0) { test__open_syscall_event() 55 perf_evsel__close_fd(evsel, 1, threads->nr); test__open_syscall_event() 59 thread_map__delete(threads); test__open_syscall_event()
|
H A D | mmap-thread-lookup.c | 26 static struct thread_data threads[THREADS]; variable in typeref:struct:thread_data 75 struct thread_data *td = &threads[i]; thread_create() 95 struct thread_data *td0 = &threads[0]; threads_create() 112 struct thread_data *td0 = &threads[0]; threads_destroy() 121 err = pthread_join(threads[i].pt, NULL); threads_destroy() 157 * The threads_create will not return before all threads mmap_events() 163 TEST_ASSERT_VAL("failed to create threads", !threads_create()); mmap_events() 174 TEST_ASSERT_VAL("failed to destroy threads", !threads_destroy()); mmap_events() 182 struct thread_data *td = &threads[i]; mmap_events() 209 * This test creates 'THREADS' number of threads (including 212 * When threads are created, we synthesize them with both
|
H A D | thread-mg-share.c | 23 * with several threads and checks they properly share and test__thread_mg_share() 33 /* create process with 4 threads */ test__thread_mg_share() 42 TEST_ASSERT_VAL("failed to create threads", test__thread_mg_share() 86 * because we've already released all the threads. test__thread_mg_share()
|
H A D | mmap-basic.c | 22 struct thread_map *threads; test__basic_mmap() local 36 threads = thread_map__new(-1, getpid(), UINT_MAX); test__basic_mmap() 37 if (threads == NULL) { test__basic_mmap() 63 perf_evlist__set_maps(evlist, cpus, threads); test__basic_mmap() 80 if (perf_evsel__open(evsels[i], cpus, threads) < 0) { test__basic_mmap() 143 threads = NULL; 147 thread_map__delete(threads);
|
H A D | keep-tracking.c | 63 struct thread_map *threads = NULL; test__keep_tracking() local 70 threads = thread_map__new(-1, getpid(), UINT_MAX); test__keep_tracking() 71 CHECK_NOT_NULL__(threads); test__keep_tracking() 79 perf_evlist__set_maps(evlist, cpus, threads); test__keep_tracking() 148 thread_map__delete(threads); test__keep_tracking()
|
H A D | open-syscall-all-cpus.c | 14 struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); test__open_syscall_event_on_all_cpus() local 17 if (threads == NULL) { test__open_syscall_event_on_all_cpus() 41 if (perf_evsel__open(evsel, cpus, threads) < 0) { test__open_syscall_event_on_all_cpus() 109 perf_evsel__close_fd(evsel, 1, threads->nr); test__open_syscall_event_on_all_cpus() 113 thread_map__delete(threads); test__open_syscall_event_on_all_cpus()
|
H A D | task-exit.c | 56 * Create maps of threads and cpus to monitor. In this case test__task_exit() 57 * we start with all threads and cpus (-1, -1) but then in test__task_exit() 62 evlist->threads = thread_map__new_by_tid(-1); test__task_exit() 63 if (!evlist->cpus || !evlist->threads) { test__task_exit()
|
H A D | perf-time-to-tsc.c | 48 struct thread_map *threads = NULL; test__perf_time_to_tsc() local 60 threads = thread_map__new(-1, getpid(), UINT_MAX); test__perf_time_to_tsc() 61 CHECK_NOT_NULL__(threads); test__perf_time_to_tsc() 69 perf_evlist__set_maps(evlist, cpus, threads); test__perf_time_to_tsc()
|
H A D | sw-clock.c | 54 evlist->threads = thread_map__new_by_tid(getpid()); __test__sw_clock_freq() 55 if (!evlist->cpus || !evlist->threads) { __test__sw_clock_freq()
|
H A D | code-reading.c | 402 struct thread_map *threads = NULL; do_test_code_reading() local 444 threads = thread_map__new_by_tid(pid); do_test_code_reading() 445 if (!threads) { do_test_code_reading() 450 ret = perf_event__synthesize_thread_map(NULL, threads, do_test_code_reading() 478 perf_evlist__set_maps(evlist, cpus, threads); do_test_code_reading() 543 thread_map__delete(threads); do_test_code_reading()
|
H A D | open-syscall-tp-fields.c | 48 evlist->threads->map[0] = getpid(); test__syscall_open_tp_fields()
|
H A D | switch-tracking.c | 321 struct thread_map *threads = NULL; test__switch_tracking() local 329 threads = thread_map__new(-1, getpid(), UINT_MAX); test__switch_tracking() 330 if (!threads) { test__switch_tracking() 347 perf_evlist__set_maps(evlist, cpus, threads); test__switch_tracking() 564 thread_map__delete(threads);
|
H A D | perf-record.c | 70 * Create maps of threads and cpus to monitor. In this case test__PERF_RECORD() 71 * we start with all threads and cpus (-1, -1) but then in test__PERF_RECORD()
|
/linux-4.1.27/arch/sparc/include/asm/ |
H A D | cpudata.h | 6 #include <linux/threads.h>
|
H A D | smp_64.h | 9 #include <linux/threads.h>
|
H A D | ptrace.h | 9 #include <linux/threads.h>
|
H A D | backoff.h | 7 * When multiple threads compete on an atomic operation, it is
|
H A D | smp_32.h | 9 #include <linux/threads.h>
|
/linux-4.1.27/arch/sh/include/asm/ |
H A D | hardirq.h | 4 #include <linux/threads.h>
|
H A D | mmu.h | 37 #include <linux/threads.h>
|
H A D | fixmap.h | 17 #include <linux/threads.h>
|
/linux-4.1.27/arch/powerpc/include/asm/ |
H A D | cputhreads.h | 7 * Mapping of threads to cores 10 * threads per core and the same number for each core in the system 11 * (though it would work if some processors had less threads as long 34 * @threads: a cpumask of threads 42 static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads) cpu_thread_mask_to_cores() argument 50 if (cpumask_intersects(threads, &tmp)) cpu_thread_mask_to_cores()
|
H A D | hardirq.h | 4 #include <linux/threads.h>
|
H A D | dbell.h | 16 #include <linux/threads.h>
|
H A D | fixmap.h | 21 #include <linux/threads.h>
|
H A D | irq.h | 13 #include <linux/threads.h>
|
H A D | pgalloc-32.h | 4 #include <linux/threads.h>
|
H A D | paca.h | 114 /* Shared by all threads of a core -- points to tcd of first thread */ 156 /* Per-core mask tracking idle threads and a lock bit-[L][TTTTTTTT] */ 161 /* Mask to denote subcore sibling threads */
|
/linux-4.1.27/tools/virtio/virtio-trace/ |
H A D | trace-agent-ctl.c | 2 * Controller of read/write threads for virtio-trace 65 /* Wakes rw-threads when they are sleeping */ wait_order() 89 * contol read/write threads by handling global_run_operation 117 * this controller wakes all read/write threads. rw_ctl_loop() 121 pr_debug("Wake up all read/write threads\n"); rw_ctl_loop() 125 * threads will wait for notification from Host. rw_ctl_loop() 128 pr_debug("Stop all read/write threads\n"); rw_ctl_loop()
|
H A D | trace-agent.h | 15 * @rw_ti: structure managing information of read/write threads 43 /* use for stopping rw threads */ 51 /* for controller of read/write threads */
|
H A D | trace-agent.c | 65 /* read/write threads init */ agent_info_new() 158 /* init read/write threads */ agent_info_init() 178 /* init controller of read/write threads */ agent_info_init() 225 /* Start all read/write threads */ agent_main_loop() 231 /* Finish all read/write threads */ agent_main_loop()
|
/linux-4.1.27/tools/perf/bench/ |
H A D | futex-wake.c | 4 * futex-wake: Block a bunch of threads on a futex and wake'em up, N at a time. 24 /* all threads will block on the same futex */ 42 OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"), 43 OPT_UINTEGER('w', "nwakes", &nwakes, "Specify amount of threads to wake at once"), 73 printf("Wokeup %d of %d threads in %.4f ms (+-%.2f%%)\n", print_summary() 88 /* create and block all threads */ block_threads() 138 printf("Run summary [PID %d]: blocking on %d threads (at [%s] futex %p), " bench_futex_wake() 153 /* create, launch & block all threads */ bench_futex_wake() 156 /* make sure all threads are already blocked */ bench_futex_wake() 165 /* Ok, all threads are patiently blocked, start waking folks up */ bench_futex_wake() 176 printf("[Run %d]: Wokeup %d of %d threads in %.4f ms\n", bench_futex_wake()
|
H A D | sched-pipe.c | 46 OPT_BOOLEAN('T', "threaded", &threaded, "Specify threads/process based task setup"), 80 struct thread_data threads[2], *td; bench_sched_pipe() local 103 td = threads + t; bench_sched_pipe() 120 td = threads + t; bench_sched_pipe() 127 td = threads + t; bench_sched_pipe() 138 worker_thread(threads + 0); bench_sched_pipe() 141 worker_thread(threads + 1); bench_sched_pipe() 154 loops, threaded ? "threads" : "processes"); bench_sched_pipe()
|
H A D | futex-requeue.c | 4 * futex-requeue: Block a bunch of threads on futex1 and requeue them 41 OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"), 42 OPT_UINTEGER('q', "nrequeue", &nrequeue, "Specify amount of threads to requeue at once"), 59 printf("Requeued %d of %d threads in %.4f ms (+-%.2f%%)\n", print_summary() 87 /* create and block all threads */ block_threads() 138 printf("Run summary [PID %d]: Requeuing %d threads (from [%s] %p to %p), " bench_futex_requeue() 153 /* create, launch & block all threads */ bench_futex_requeue() 156 /* make sure all threads are already blocked */ bench_futex_requeue() 165 /* Ok, all threads are patiently blocked, start requeueing */ bench_futex_requeue() 183 printf("[Run %d]: Requeued %d of %d threads in %.4f ms\n", bench_futex_requeue()
|
H A D | futex-hash.c | 8 * many threads and futexes as possible. 45 OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"), 47 OPT_UINTEGER('f', "futexes", &nfutexes, "Specify amount of futexes per threads"), 93 /* inform all threads that we're done for the day */ toggle_done() 141 printf("Run summary [PID %d]: %d threads, each operating on %d [%s] futexes for %d secs.\n\n", bench_futex_hash()
|
H A D | numa.c | 119 /* Global, read-writable area, accessible to all processes and threads: */ 135 struct thread_data *threads; member in struct:global_info 155 OPT_INTEGER('t', "nr_threads" , &p0.nr_threads, "number of threads per process"), 404 * threads of this process, or only be accessed by this thread: 521 td = g->threads + t; parse_setup_cpu_list() 643 td = g->threads + t; parse_setup_node_list() 802 g->threads[task_nr].curr_cpu = cpu; update_curr_cpu() 809 * Count the number of nodes a process's threads 828 td = g->threads + task_nr; count_process_nodes() 846 * Count the number of distinct process-threads a node contains. 864 td = g->threads + task_nr; count_node_processes() 897 /* Strong convergence: all threads compress on a single node: */ calc_convergence_compression() 931 struct thread_data *td = g->threads + t; calc_convergence() 936 /* Not all threads have written it yet: */ calc_convergence() 1157 this_cpu = g->threads[task_nr].curr_cpu; worker_thread() 1208 * A worker process starts a couple of threads: 1228 td = g->threads + task_nr; worker_process() 1243 td = g->threads + task_nr; worker_process() 1288 ssize_t size = sizeof(*g->threads)*g->p.nr_tasks; init_thread_data() 1291 g->threads = zalloc_shared_data(size); init_thread_data() 1294 struct thread_data *td = g->threads + t; init_thread_data() 1309 ssize_t size = sizeof(*g->threads)*g->p.nr_tasks; deinit_thread_data() 1311 free_data(g->threads, size); deinit_thread_data() 1431 /* All threads try to acquire it, this way we can wait for them to start up: */ __bench_numa() 1455 /* Wait for all the threads to start up: */ __bench_numa() 1466 /* This will start all threads: */ __bench_numa() 1480 tprintf(" threads initialized in %.6f seconds.\n", startup_sec); __bench_numa() 1503 u64 thread_runtime_ns = g->threads[t].runtime_ns; __bench_numa()
|
H A D | sched-messaging.c | 309 num_fds, thread_mode ? "threads" : "processes"); bench_sched_messaging() 312 thread_mode ? "threads" : "processes"); bench_sched_messaging()
|
/linux-4.1.27/arch/ia64/include/asm/ |
H A D | hardirq.h | 17 #include <linux/threads.h>
|
H A D | percpu.h | 16 #include <linux/threads.h>
|
H A D | numa.h | 21 #include <linux/threads.h>
|
H A D | pgalloc.h | 20 #include <linux/threads.h>
|
H A D | switch_to.h | 19 * Context switch from one thread to another. If the two threads have
|
H A D | smp.h | 14 #include <linux/threads.h>
|
/linux-4.1.27/arch/ia64/kernel/ |
H A D | nr-irqs.c | 11 #include <linux/threads.h>
|
/linux-4.1.27/arch/m68k/include/asm/ |
H A D | hardirq.h | 4 #include <linux/threads.h>
|
/linux-4.1.27/net/rds/ |
H A D | Makefile | 3 recv.o send.o stats.o sysctl.o threads.o transport.o \
|
/linux-4.1.27/tools/perf/python/ |
H A D | twatch.py | 20 threads = perf.thread_map() 25 evsel.open(cpus = cpus, threads = threads); 26 evlist = perf.evlist(cpus, threads)
|
/linux-4.1.27/include/asm-generic/ |
H A D | hardirq.h | 5 #include <linux/threads.h>
|
/linux-4.1.27/include/linux/ |
H A D | threads.h | 6 * The default limit for the nr of threads is now in 7 * /proc/sys/kernel/threads-max.
|
H A D | signal.h | 303 * Kernel threads handle their own signals. Let the signal code allow_signal() 330 * terminate - kill the process, i.e. all threads in the group, 333 * coredump - write a core dump file describing all threads using 334 * the same mm and then kill all those threads 335 * stop - stop all the threads in the group, i.e. TASK_STOPPED state 384 * When SIGCONT is sent, it resumes the process (all threads in the group)
|
H A D | pid_namespace.h | 8 #include <linux/threads.h>
|
H A D | kernel_stat.h | 5 #include <linux/threads.h>
|
H A D | percpu_counter.h | 12 #include <linux/threads.h>
|
H A D | torture.h | 29 #include <linux/threads.h>
|
H A D | completion.h | 17 * Completions currently use a FIFO to queue threads that have to wait for
|
H A D | kthread.h | 3 /* Simple interface for creating and stopping kernel threads without mess. */
|
H A D | irqdesc.h | 38 * @threads_oneshot: bitfield to handle shared oneshot threads 39 * @threads_active: number of irqaction threads currently running
|
/linux-4.1.27/include/uapi/linux/ |
H A D | wait.h | 11 #define __WNOTHREAD 0x20000000 /* Don't wait on children of other threads in this group */
|
/linux-4.1.27/arch/arm/include/asm/ |
H A D | hardirq.h | 5 #include <linux/threads.h>
|
H A D | smp.h | 13 #include <linux/threads.h>
|
/linux-4.1.27/arch/metag/mm/ |
H A D | l2cache.c | 105 * Prevent other threads writing during the writeback, otherwise the meta_l2c_disable() 131 * threads are safe to continue executing, however we must not init the meta_l2c_enable() 133 * this operation should still be atomic with other threads. meta_l2c_enable() 157 * atomic with other threads. meta_l2c_pf_enable() 175 * Prevent other threads writing during the writeback. This also meta_l2c_flush()
|
/linux-4.1.27/tools/testing/selftests/timers/ |
H A D | threadtest.c | 67 /* flag other threads */ checklist() 157 printf("using independent threads\n"); main() 162 printf(" -n: number of threads\n"); main() 163 printf(" -i: use independent threads\n"); main() 177 printf("Testing consistency with %i threads for %ld seconds: ", thread_count, runtime); main()
|
/linux-4.1.27/kernel/locking/ |
H A D | rtmutex-tester.c | 36 static struct task_struct *threads[MAX_RT_TEST_THREADS]; variable in typeref:struct:task_struct 133 * Schedule replacement for rtsem_down(). Only called for threads with 146 if (threads[tid] == current) schedule_rt_mutex_test() 306 ret = sched_setscheduler(threads[tid], SCHED_NORMAL, &schedpar); sysfs_test_command() 314 ret = sched_setscheduler(threads[tid], SCHED_FIFO, &schedpar); sysfs_test_command() 320 send_sig(SIGHUP, threads[tid], 0); sysfs_test_command() 328 wake_up_process(threads[tid]); sysfs_test_command() 348 tsk = threads[td->dev.id]; sysfs_test_status() 383 threads[id] = kthread_run(test_func, &thread_data[id], "rt-test-%d", id); init_test_thread() 384 if (IS_ERR(threads[id])) init_test_thread() 385 return PTR_ERR(threads[id]); init_test_thread()
|
H A D | rtmutex_common.h | 21 * threads to provoke lock stealing and testing of complex boosting scenarios.
|
/linux-4.1.27/block/ |
H A D | blk-mq-cpumap.c | 7 #include <linux/threads.h> 72 * threads per cores. Map sibling threads to the same for_each_possible_cpu()
|
/linux-4.1.27/arch/alpha/include/asm/ |
H A D | topology.h | 5 #include <linux/threads.h>
|
H A D | smp.h | 4 #include <linux/threads.h>
|
/linux-4.1.27/fs/nfsd/ |
H A D | stats.h | 23 unsigned int th_cnt; /* number of available threads */ 25 * of available threads were in use */
|
H A D | stats.c | 13 * th <threads> <fullcnt> <10%-20%> <20%-30%> ... <90%-100%> <100%> 15 * and number of times that all threads were in use
|
H A D | nfsctl.c | 405 * write_threads - Start NFSD, or report the current number of running threads 413 * running NFSD threads; 422 * number of NFSD threads to start 428 * running NFSD threads; 455 * write_pool_threads - Set or report the current number of threads per pool 467 * threads to start in each pool 472 * number of NFSD threads in each pool; 478 /* if size > 0, look for an array of number of threads per node write_pool_threads() 479 * and apply them then write out number of threads per node as reply write_pool_threads() 494 * writing to the threads file but NOT the pool_threads write_pool_threads() 495 * file, sorry. Report zero threads. write_pool_threads() 1138 [NFSD_Threads] = {"threads", &transaction_ops, S_IWUSR|S_IRUSR}, nfsd_fill_super()
|
H A D | nfssvc.c | 38 * of nfsd threads must exist and each must listed in ->sp_all_threads in each 221 * threads is modified after nfsd start.) nfsd_startup_generic() 315 * any threads--if we get shut down before any threads are nfsd_last_thread() 474 /* enforce a global maximum number of threads */ nfsd_set_nrthreads() 513 * Adjust the number of threads and return the new number of threads.
|
H A D | netns.h | 108 * to '0' which is means that it bases this on the number of threads.
|
/linux-4.1.27/tools/usb/ |
H A D | ffs-test.c | 309 } threads[] = { variable in typeref:struct:thread 351 if (t != threads) { cleanup_thread() 618 init_thread(threads); main() 619 ep0_init(threads, legacy_descriptors); main() 621 for (i = 1; i < sizeof threads / sizeof *threads; ++i) main() 622 init_thread(threads + i); main() 624 for (i = 1; i < sizeof threads / sizeof *threads; ++i) main() 625 start_thread(threads + i); main() 627 start_thread_helper(threads); main() 629 for (i = 1; i < sizeof threads / sizeof *threads; ++i) main() 630 join_thread(threads + i); main()
|
/linux-4.1.27/mm/ |
H A D | vmacache.c | 9 * Flush vma caches for threads that share a given mm. 12 * exclusively and other threads accessing the vma cache will 26 * to worry about other threads' seqnum. Current's vmacache_flush_all()
|
H A D | balloon_compaction.c | 148 * compaction threads can race against page migration functions balloon_page_isolate() 160 * Prevent concurrent compaction threads from isolating balloon_page_isolate() 180 * concurrent isolation threads attempting to re-isolate it. balloon_page_putback()
|
/linux-4.1.27/arch/tile/include/asm/ |
H A D | hardirq.h | 18 #include <linux/threads.h>
|
H A D | switch_to.h | 53 * Kernel threads can check to see if they need to migrate their 55 * threads, we defer until they are returning to user-space.
|
H A D | highmem.h | 25 #include <linux/threads.h>
|
H A D | kgdb.h | 40 * Longer buffer is needed to list all threads.
|
H A D | fixmap.h | 24 #include <linux/threads.h>
|
/linux-4.1.27/arch/x86/include/asm/ |
H A D | hardirq.h | 4 #include <linux/threads.h>
|
H A D | pgtable_32.h | 18 #include <linux/threads.h>
|
H A D | kgdb.h | 12 * Longer buffer is needed to list all threads
|
H A D | highmem.h | 24 #include <linux/threads.h>
|
H A D | pgtable_64.h | 15 #include <linux/threads.h>
|
/linux-4.1.27/arch/mn10300/mm/ |
H A D | cache.c | 14 #include <linux/threads.h>
|
H A D | cache-smp.c | 14 #include <linux/threads.h>
|
/linux-4.1.27/arch/parisc/include/asm/ |
H A D | hardirq.h | 11 #include <linux/threads.h>
|
H A D | smp.h | 15 #include <linux/threads.h> /* for NR_CPUS */
|
H A D | tlbflush.h | 55 /* Except for very small threads, flushing the whole TLB is flush_tlb_mm()
|
/linux-4.1.27/arch/powerpc/kernel/ |
H A D | dbell.c | 15 #include <linux/threads.h>
|
H A D | epapr_hcalls.S | 10 #include <linux/threads.h>
|
H A D | idle_power4.S | 10 #include <linux/threads.h>
|
H A D | machine_kexec_64.c | 222 * the device tree and assume primary threads are online and query secondary 223 * threads via RTAS to online them if required. If we don't online primary 224 * threads, they will be stuck. However, we also online secondary threads as we 226 * threads as offline -- and again, these CPUs will be stuck. 228 * So, we online all CPUs that should be running, including secondary threads.
|
H A D | idle_book3e.S | 12 #include <linux/threads.h>
|
H A D | idle_e500.S | 13 #include <linux/threads.h>
|
H A D | idle_power7.S | 10 #include <linux/threads.h> 55 * Used by threads when the lock bit of core_idle_state is set. 193 common_enter: /* common code for all the threads entering sleep or winkle */ 411 * Common to all threads.
|
H A D | paca.c | 141 * On systems with hardware multi-threading, there are two threads allocate_slb_shadows() 177 /* For now -- if we have threads this will be adjusted later */ initialise_paca()
|
/linux-4.1.27/arch/blackfin/include/asm/ |
H A D | smp.h | 12 #include <linux/threads.h>
|
H A D | cplbinit.h | 14 #include <linux/threads.h>
|
H A D | kgdb.h | 16 * Longer buffer is needed to list all threads.
|
/linux-4.1.27/arch/cris/include/asm/ |
H A D | pgalloc.h | 4 #include <linux/threads.h>
|
/linux-4.1.27/kernel/power/ |
H A D | process.c | 157 * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator. 159 * On success, returns 0. On failure, -errno and only the kernel threads are 203 /* No other threads should have PF_SUSPEND_TASK set */ for_each_process_thread() 224 pr_info("Restarting kernel threads ... "); thaw_kernel_threads()
|
H A D | swap.c | 428 /* Maximum number of threads for compression/decompression. */ 489 unsigned run_threads; /* nr current threads */ 593 * We'll limit the number of threads for compression to limit memory save_image_lzo() 624 * Start the compression threads. save_image_lzo() 636 "PM: Cannot start compression threads\n"); save_image_lzo() 1084 * We'll limit the number of threads for decompression to limit memory load_image_lzo() 1115 * Start the decompression threads. load_image_lzo() 1127 "PM: Cannot start decompression threads\n"); load_image_lzo()
|
H A D | user.c | 279 * It is necessary to thaw kernel threads here, because snapshot_ioctl() 281 * SNAPSHOT_FREE. In that case, if kernel threads were not snapshot_ioctl()
|
/linux-4.1.27/lib/ |
H A D | smp_processor_id.c | 22 * Kernel threads bound to a single CPU can safely use check_preemption_disabled()
|
/linux-4.1.27/arch/powerpc/platforms/powernv/ |
H A D | subcore.c | 44 * threads is as follows: 86 * unsplit while all other threads NAP. 89 * the hardware that if all threads except 0 are napping, the hardware should 92 * Non-zero threads are sent to a NAP loop, they don't exit the loop until they 95 * Core 0 spins waiting for the hardware to see all the other threads napping 98 * Once thread 0 sees the unsplit, it IPIs the secondary threads to wake them 113 * To begin with secondary threads are sent to an assembly routine. There they 414 * We need all threads in a core to be present to split/unsplit so subcore_init()
|
/linux-4.1.27/tools/perf/scripts/python/ |
H A D | export-to-postgresql.py | 108 do_query(query, 'CREATE TABLE threads (' 205 '(SELECT pid FROM threads WHERE id = thread_id) AS pid,' 206 '(SELECT tid FROM threads WHERE id = thread_id) AS tid,' 299 copy_output_file(thread_file, "threads") 327 do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)') 339 do_query(query, 'ALTER TABLE threads ' 341 'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)') 344 'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id)') 352 'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),' 363 'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),'
|
H A D | sched-migration.py | 29 threads = { 0 : "idle"} variable 32 return "%s:%d" % (threads[pid], pid) 342 threads[prev_pid] = prev_comm 343 threads[next_pid] = next_comm
|
/linux-4.1.27/drivers/staging/lustre/lustre/include/ |
H A D | lustre_net.h | 144 * Constants determine how threads are created for ptlrpc service. 146 * ?_NTHRS_INIT # threads to create for each service partition on 149 * threads for the service while initializing. 150 * ?_NTHRS_BASE # threads should be created at least for each 152 * It's the low-water mark of threads upper-limit 154 * ?_THR_FACTOR # threads can be added on threads upper-limit for 158 * ?_NTHRS_MAX # overall threads can be created for a service, 162 * has ?_NTHRS_BASE threads, which means total threads 176 * partition has 4 cores, then actual number of service threads on each 180 * Total number of threads for the service is: 186 * partition has 8 cores, then actual number of service threads on each 190 * Total number of threads for the service is: 196 * partition has 12 cores, then actual number of service threads on each 200 * Total number of threads for the service is: 204 * as upper limit of threads number for each partition: 213 * to guarantee that each partition has at least MDS_NTHRS_BASE(64) threads 214 * to keep service healthy, so total number of threads will just be 2048. 224 * b) bind service threads on a few partitions, see modparameters of 234 * LDLM threads constants: 236 * Given 8 as factor and 24 as base threads number 239 * On 4-core machine we will have 24 + 8 * 4 = 56 threads. 243 * threads for each partition and total threads number will be 112. 247 * threads for each partition to keep service healthy, so total threads 250 * So with these constants, threads number will be at the similar level 1797 * List of active threads in svc->srv_threads 1895 * notifies wakes one of the service threads to process new incoming request. 1977 /** threads # should be created for each partition on initializing */ 1979 /** limit of threads number for each partition */ 2050 /** # of starting threads */ 2052 /** # of stopping threads, reserved for shrinking threads */ 2054 /** # running threads */ 2056 /** service threads list */ 2062 * threads starting & stopping are also protected by this lock. 2082 * all threads sleep on this. This wait-queue is signalled when new 2470 /* threads increasing factor for each CPU */ 2472 /* service threads # to start on each partition while initializing */ 2475 * low water of threads # upper-limit on each partition while running, 2476 * service availability may be impacted if threads number is lower 2481 /* "soft" limit for total threads number */ 2483 /* user specified threads number, it will be validated due to 2486 /* set NUMA node affinity for service threads */ 2905 /* all ptlrpcd threads are free mode */ 2907 /* all ptlrpcd threads are bound mode */ 2913 * If kernel supports NUMA, pthrpcd threads are binded and
|
/linux-4.1.27/drivers/staging/lustre/lustre/libcfs/linux/ |
H A D | linux-prim.c | 51 * waiting threads, which is not always desirable because all threads will 54 * be polluted by different threads.
|
/linux-4.1.27/arch/s390/appldata/ |
H A D | appldata_os.c | 69 u32 nr_running; /* number of runnable threads */ 70 u32 nr_threads; /* number of threads */ 75 u32 nr_iowait; /* number of blocked threads
|
/linux-4.1.27/include/drm/ttm/ |
H A D | ttm_execbuf_util.h | 77 * If the function detects a deadlock due to multiple threads trying to 78 * reserve the same buffers in reverse order, all threads except one will 80 * CPU write reservations to be cleared, and for other threads to
|
/linux-4.1.27/arch/mips/netlogic/common/ |
H A D | reset.S | 80 * in this region. Called from all HW threads. 90 * L1D cache has to be flushed before enabling threads in XLP. 212 * Wake up sibling threads from the initial thread in a core. 215 /* core L1D flush before enable threads */ 220 /* Enable hw threads by writing to MAP_THREADMODE of the core */ 240 * when running 4 threads per core
|
H A D | smpboot.S | 57 /* Called by the boot cpu to wake up its sibling threads */ 59 /* CPU register contents lost when enabling threads, save them first */
|
/linux-4.1.27/arch/um/os-Linux/ |
H A D | util.c | 79 * has no effect within UML's kernel threads. 98 * UML helper threads must not handle SIGWINCH/INT/TERM
|
/linux-4.1.27/arch/metag/kernel/perf/ |
H A D | perf_event.h | 25 * itself; each counter can be assigned to multiple hardware threads at any 28 * threads' events, regardless of the thread selected.
|
/linux-4.1.27/arch/mips/include/asm/ |
H A D | cdmm.h | 37 * Any CPU pinned threads/timers should be disabled. 39 * CPU pinned threads/timers can be restarted.
|
H A D | fixmap.h | 19 #include <linux/threads.h>
|
H A D | smp.h | 17 #include <linux/threads.h>
|
/linux-4.1.27/arch/powerpc/platforms/cell/ |
H A D | smp.c | 63 * At boot time, there is nothing to do for primary threads which were 150 /* Mark threads which are still spinning in hold loops. */ smp_init_cell()
|
/linux-4.1.27/drivers/oprofile/ |
H A D | oprofile_stats.c | 13 #include <linux/threads.h>
|
/linux-4.1.27/arch/x86/kernel/apic/ |
H A D | probe_64.c | 11 #include <linux/threads.h>
|
H A D | x2apic_phys.c | 1 #include <linux/threads.h>
|
H A D | apic_noop.c | 12 #include <linux/threads.h>
|
H A D | bigsmp_32.c | 6 #include <linux/threads.h>
|
/linux-4.1.27/arch/um/include/asm/ |
H A D | fixmap.h | 8 #include <linux/threads.h>
|
/linux-4.1.27/arch/um/kernel/ |
H A D | time.c | 10 #include <linux/threads.h>
|
/linux-4.1.27/arch/metag/include/asm/ |
H A D | tlbflush.h | 35 /* flush TLB entries for all hardware threads */ __flush_tlb()
|
H A D | fixmap.h | 18 #include <linux/threads.h>
|
H A D | pgalloc.h | 4 #include <linux/threads.h>
|
H A D | barrier.h | 61 * external reordering of writes before the fence on other threads with writes
|
H A D | global_lock.h | 85 * This immediately allows other hardware threads to continue executing and one
|
/linux-4.1.27/arch/microblaze/include/asm/ |
H A D | fixmap.h | 25 #include <linux/threads.h>
|
H A D | tlbflush.h | 17 #include <linux/threads.h>
|
/linux-4.1.27/include/linux/sched/ |
H A D | prio.h | 16 * user-space. This allows kernel threads to set their
|
/linux-4.1.27/arch/mn10300/include/asm/ |
H A D | hardirq.h | 15 #include <linux/threads.h>
|
H A D | kgdb.h | 18 * Longer buffer is needed to list all threads
|
H A D | pgalloc.h | 15 #include <linux/threads.h>
|
H A D | smp.h | 25 #include <linux/threads.h>
|
/linux-4.1.27/arch/mn10300/kernel/ |
H A D | mn10300-watchdog-low.S | 19 #include <linux/threads.h>
|
/linux-4.1.27/arch/arm/vfp/ |
H A D | entry.S | 23 @ r10 = this threads thread_info structure
|
/linux-4.1.27/arch/arm64/include/asm/ |
H A D | hardirq.h | 20 #include <linux/threads.h>
|
H A D | smp.h | 19 #include <linux/threads.h>
|
H A D | cpu_ops.h | 20 #include <linux/threads.h>
|
/linux-4.1.27/arch/c6x/include/asm/ |
H A D | irq.h | 17 #include <linux/threads.h>
|
/linux-4.1.27/arch/arm/mach-shmobile/ |
H A D | headsmp.S | 15 #include <linux/threads.h>
|
/linux-4.1.27/include/linux/sunrpc/ |
H A D | svc.h | 39 * Pool of threads and temporary sockets. Generally there is only 49 unsigned int sp_nrthreads; /* # of threads in pool */ 50 struct list_head sp_all_threads; /* all server threads */ 63 * a list of idle threads waiting for input. 71 unsigned int sv_nrthreads; /* # of server threads */ 74 * on number of threads. */ 96 * adding threads */ 97 svc_thread_fn sv_function; /* main function for threads */ 112 * change the number of threads. Horrible, but there it is. 223 struct list_head rq_all; /* all threads list */
|
/linux-4.1.27/arch/powerpc/mm/ |
H A D | icswx.c | 109 * If this is a threaded process then there might be other threads use_cop() 149 * If this is a threaded process then there might be other threads drop_cop() 235 * the threads (see smp_call_function(sync_cop, mm, 1)), but acop_handle_fault() 237 * of threads. acop_handle_fault() 239 * Given the number of threads on some of these systems, acop_handle_fault()
|
/linux-4.1.27/drivers/crypto/qat/qat_dh895xcc/ |
H A D | adf_hw_arbiter.c | 115 /* Map worker threads to service arbiters */ adf_init_arb() 152 /* Unmap worker threads to service arbiters */ adf_exit_arb()
|
/linux-4.1.27/tools/power/cpupower/utils/helpers/ |
H A D | helpers.h | 106 /* Amount of CPU cores, packages and threads per core in the system */ 109 unsigned int threads; /* per core */ member in struct:cpupower_topology
|
/linux-4.1.27/arch/xtensa/include/asm/ |
H A D | fixmap.h | 18 #include <linux/threads.h>
|
/linux-4.1.27/arch/xtensa/kernel/ |
H A D | process.c | 160 * childregs are not used for the kernel threads. 174 * a2, a3 are unused for userspace threads, 175 * a2 points to thread_fn, a3 holds thread_fn arg for kernel threads. 183 * the two threads (parent and child) will overflow the same frames onto the
|
/linux-4.1.27/arch/sh/mm/ |
H A D | cache-sh3.c | 13 #include <linux/threads.h>
|
/linux-4.1.27/arch/tile/include/uapi/asm/ |
H A D | cachectl.h | 24 * then allow arbitrary other threads in the same address space to see
|
/linux-4.1.27/arch/microblaze/kernel/ |
H A D | prom.c | 21 #include <linux/threads.h>
|
/linux-4.1.27/drivers/usb/usbip/ |
H A D | stub_dev.c | 53 * is used to transfer usbip requests by kernel threads. -1 is a magic number 171 /* 1. stop threads */ stub_shutdown_connection() 184 * tcp_socket is freed after threads are killed so that usbip_xmit does stub_shutdown_connection() 439 * NOTE: rx/tx threads are invoked for each usb_device. stub_disconnect()
|
/linux-4.1.27/arch/score/include/asm/ |
H A D | processor.h | 5 #include <linux/threads.h>
|
/linux-4.1.27/arch/openrisc/include/asm/ |
H A D | pgalloc.h | 23 #include <linux/threads.h>
|
/linux-4.1.27/arch/powerpc/boot/ |
H A D | ps3-head.S | 34 * The PS3 has a single processor with two threads.
|
/linux-4.1.27/arch/m32r/include/asm/ |
H A D | smp.h | 9 #include <linux/threads.h>
|
/linux-4.1.27/kernel/ |
H A D | fork.c | 92 * Minimum number of threads to boot the kernel 97 * Maximum number of threads 105 int nr_threads; /* The idle threads do not count.. */ 272 u64 threads; set_max_threads() local 275 * The number of threads shall be limited such that the thread set_max_threads() 279 threads = MAX_THREADS; set_max_threads() 281 threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE, set_max_threads() 284 if (threads > max_threads_suggested) set_max_threads() 285 threads = max_threads_suggested; set_max_threads() 287 max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS); set_max_threads() 1167 * all threads in the group. Holding cred_guard_mutex is not copy_seccomp() 1256 * Thread groups must share signals as well, and detached threads copy_process() 1323 * If multiple threads are within copy_process(), then this check copy_process() 2067 int threads = max_threads; sysctl_max_threads() local 2072 t.data = &threads; sysctl_max_threads() 2080 set_max_threads(threads); sysctl_max_threads()
|
H A D | smpboot.c | 63 * idle_threads_init - Initialize idle threads for all cpus 261 /* We need to destroy also the parked threads of offline cpus */ for_each_possible_cpu() 277 * Creates and starts the threads on all online cpus. 306 * Stops all threads on all possible cpus.
|
/linux-4.1.27/arch/metag/tbx/ |
H A D | tbisoft.S | 10 * Support for soft threads and soft context switches 59 * Software syncronous context switch between soft threads, save only the 63 * A1GbP is global to all soft threads so not virtualised
|
/linux-4.1.27/arch/mips/netlogic/xlp/ |
H A D | wakeup.c | 36 #include <linux/threads.h> 193 /* spin until the hw threads sets their ready */ xlp_enable_secondary_cores() 204 * first wakeup core 0 threads xlp_wakeup_secondary_cpus()
|
/linux-4.1.27/arch/powerpc/kvm/ |
H A D | book3s_hv_builtin.c | 207 /* On POWER8 for IPIs to threads in the same core, use msgsnd */ kvmhv_rm_send_ipi() 243 /* Set our bit in the threads-exiting-guest map in the 0xff00 kvmhv_commence_exit() 255 * Trigger the other threads in this vcore to exit the guest. kvmhv_commence_exit()
|
/linux-4.1.27/drivers/staging/lustre/lustre/ptlrpc/ |
H A D | service.c | 216 /* # of started threads */ 218 /* # of stopped threads */ 224 /* total number of threads on this partition */ 226 /* threads table */ 297 * dedicated reply handling threads. 497 * Common code for estimating & validating threads number. ptlrpc_server_nthreads_check() 500 * get the threads number they give it in conf::tc_nthrs_user ptlrpc_server_nthreads_check() 501 * even they did set. It's because we need to validate threads ptlrpc_server_nthreads_check() 503 * threads to keep the service healthy. ptlrpc_server_nthreads_check() 514 * threads, we give a less strict check here, it can ptlrpc_server_nthreads_check() 524 /* don't care about base threads number per partition, ptlrpc_server_nthreads_check() 548 * User wants to increase number of threads with for ptlrpc_server_nthreads_check() 550 * one thread/core because service threads are supposed to ptlrpc_server_nthreads_check() 556 * have too many threads no matter how many cores/HTs ptlrpc_server_nthreads_check() 582 CDEBUG(D_OTHER, "%s: This service may have more threads (%d) than the given soft limit (%d)\n", ptlrpc_server_nthreads_check() 679 * This includes starting serving threads , allocating and posting rqbds and 814 CERROR("Failed to start threads for service %s: %d\n", ptlrpc_register_service() 1625 * already being processed (i.e. those threads can service more high-priority 1626 * requests), or if there are enough idle threads that a later thread can do 2051 * HRT threads and further commit callbacks by checking rs_committed ptlrpc_handle_rs() 2167 * allowed to create more threads 2180 * too many requests and allowed to create more threads 2252 * Main thread body for service threads. 2255 * is woken up and one of the threads will handle it. 2270 /* NB: we will call cfs_cpt_bind() for all threads, because we ptlrpc_main() 2562 CDEBUG(D_INFO, "Stopping threads for service %s\n", ptlrpc_svcpt_stop_threads() 2604 * Stops all threads of a particular service \a svc 2624 /* We require 2 threads min, see note in ptlrpc_server_handle_request */ ptlrpc_start_threads() 2635 /* We have enough threads, don't start more. b=15759 */ ptlrpc_start_threads() 2942 * all unlinked) and no service threads, so I'm the only ptlrpc_service_for_each_part()
|
H A D | ptlrpcd.c | 84 MODULE_PARM_DESC(ptlrpcd_bind_policy, "Ptlrpcd threads binding mode."); 328 * work from our partner threads. */ ptlrpcd_check() 453 * ptlrpcd threads. We also want to reduce the ptlrpcd overhead caused by 455 * CPU core. But binding all ptlrpcd threads maybe cause response delay 467 * compromise: divide the ptlrpcd threads pool into two parts. One part is 469 * core. The other part is for free mode, all the ptlrpcd threads in the 553 * setup partnership only with ptlrpcd threads ptlrpcd_bind()
|
/linux-4.1.27/kernel/sched/ |
H A D | wait.c | 80 * __wake_up - wake up threads blocked on a waitqueue. 82 * @mode: which threads 83 * @nr_exclusive: how many wake-one or wake-many threads to wake up 116 * __wake_up_sync_key - wake up threads blocked on a waitqueue. 118 * @mode: which threads 119 * @nr_exclusive: how many wake-one or wake-many threads to wake up 124 * be migrated to another CPU - ie. the two threads are 'synchronized'
|
H A D | completion.c | 6 * interface also makes it easy to 'complete' multiple waiting threads, 41 * complete_all: - signals all threads waiting on this completion 44 * This will wake up all threads waiting on this particular completion event.
|
/linux-4.1.27/drivers/md/ |
H A D | dm-bufio.h | 38 * - Each other threads can hold at most one buffer.
|
/linux-4.1.27/drivers/dma/ |
H A D | dmatest.c | 42 "Number of threads to start per channel (default: 1)"); 82 * @threads_per_chan: number of threads to start per channel 165 struct list_head threads; member in struct:dmatest_chan 178 list_for_each_entry(thread, &dtc->threads, node) { is_threaded_test_run() 388 * kthread_stop(). There may be multiple threads running this function 725 list_for_each_entry_safe(thread, _thread, &dtc->threads, node) { dmatest_cleanup_channel() 780 list_add_tail(&thread->node, &dtc->threads); dmatest_add_threads() 802 INIT_LIST_HEAD(&dtc->threads); dmatest_add_channel() 817 pr_info("Started %u threads using %s\n", dmatest_add_channel()
|
/linux-4.1.27/arch/sparc/kernel/ |
H A D | devices.c | 11 #include <linux/threads.h>
|
H A D | windows.c | 108 /* Try to push the windows in a threads window buffer to the
|
/linux-4.1.27/arch/tile/kernel/ |
H A D | proc.c | 17 #include <linux/threads.h>
|
/linux-4.1.27/arch/s390/include/asm/ |
H A D | pgalloc.h | 14 #include <linux/threads.h>
|
/linux-4.1.27/arch/mips/kernel/ |
H A D | process.c | 613 /* Prevent any threads from obtaining live FP context */ mips_set_process_fp_mode() 618 * If there are multiple online CPUs then wait until all threads whose mips_set_process_fp_mode() 646 * There are now no threads of the process with live FP context, so it 665 /* Allow threads to use FP again */
|
/linux-4.1.27/arch/mips/netlogic/xlr/ |
H A D | wakeup.c | 36 #include <linux/threads.h>
|
/linux-4.1.27/net/sunrpc/ |
H A D | svc.c | 8 * Multiple threads pools and NUMAisation 542 printk("svc_destroy: no threads for serv=%p!\n", serv); svc_destroy() 698 * Create or destroy enough new threads to make the number 699 * of threads the given number. If `pool' is non-NULL, applies 700 * only to threads in that pool, otherwise round-robins between 704 * Destroying threads relies on the service threads filling in 730 /* create new threads */ svc_set_num_threads() 759 /* destroy old threads */ svc_set_num_threads()
|
H A D | svc_xprt.c | 565 * on the number of threads 580 "number of threads"); svc_check_conn_limits() 684 /* As there is a shortage of threads and this request svc_get_next_xprt() 990 * We expect svc_close_xprt() to work even when no threads are svc_close_xprt() 992 * any threads), so if the transport isn't busy, we delete svc_close_xprt() 1050 * Server threads may still be running (especially in the case where the 1055 * the close. In the case there are no such other threads, 1056 * threads running, svc_clean_up_xprts() does a simple version of a 1058 * threads, we may need to wait a little while and then check again to 1339 seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n"); svc_pool_stats_show()
|
/linux-4.1.27/samples/kprobes/ |
H A D | kretprobe_example.c | 44 return 1; /* Skip kernel threads */ entry_handler()
|
/linux-4.1.27/drivers/staging/lustre/lnet/klnds/o2iblnd/ |
H A D | o2iblnd_modparams.c | 55 /* Number of threads in each scheduler pool which is percpt, 59 MODULE_PARM_DESC(nscheds, "number of threads in each scheduler pool");
|
/linux-4.1.27/arch/um/drivers/ |
H A D | chan_user.c | 195 * These are synchronization calls between various UML threads on the winch_thread() 196 * host - since they are not different kernel threads, we cannot use winch_thread()
|
/linux-4.1.27/include/uapi/linux/android/ |
H A D | binder.h | 247 * threads waiting to service incoming transactions. When a process 328 * of looping threads it has available.
|
/linux-4.1.27/arch/powerpc/platforms/pseries/ |
H A D | smp.c | 89 * At boot time, there is nothing to do for primary threads which were 236 * Mark threads which are still spinning in hold loops smp_init_pseries()
|
/linux-4.1.27/arch/arm64/kernel/ |
H A D | topology.c | 81 pr_err("%s: Core has both threads and CPU\n", parse_core() 255 /* Multiprocessor system : Multi-threads per core */ store_cpu_topology()
|
/linux-4.1.27/drivers/staging/lustre/lnet/klnds/socklnd/ |
H A D | socklnd.h | 76 int ksi_nthreads_max; /* max allowed threads */ 77 int ksi_nthreads; /* number of threads */ 99 /* # scheduler threads in each pool while starting */ 153 int ksnd_nthreads; /* # live threads */ 154 int ksnd_shuttingdown; /* tell threads to exit */
|
/linux-4.1.27/arch/x86/kernel/cpu/ |
H A D | perf_event.h | 120 * Used to coordinate shared registers between HT threads or 125 int refcnt; /* per-core: #HT threads */ 131 INTEL_EXCL_SHARED = 1, /* counter can be used by both threads */ 151 int refcnt; /* per-core: #HT threads */ 400 * a PMU and sometimes between PMU of sibling HT threads.
|
/linux-4.1.27/drivers/acpi/acpica/ |
H A D | dsmethod.c | 736 * 2) There are other threads executing the method, in which case we acpi_ds_terminate_control_method() 773 /* Are there any other threads currently executing this method? */ acpi_ds_terminate_control_method() 777 * Additional threads. Do not release the owner_id in this case, acpi_ds_terminate_control_method() 781 "*** Completed execution of one thread, %u threads remaining\n", acpi_ds_terminate_control_method() 824 /* No more threads, we can free the owner_id */ acpi_ds_terminate_control_method()
|