Searched refs:tk (Results 1 - 34 of 34) sorted by relevance

/linux-4.1.27/arch/x86/kernel/
H A Dvsyscall_gtod.c27 void update_vsyscall(struct timekeeper *tk) update_vsyscall() argument
34 vdata->vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode; update_vsyscall()
35 vdata->cycle_last = tk->tkr_mono.cycle_last; update_vsyscall()
36 vdata->mask = tk->tkr_mono.mask; update_vsyscall()
37 vdata->mult = tk->tkr_mono.mult; update_vsyscall()
38 vdata->shift = tk->tkr_mono.shift; update_vsyscall()
40 vdata->wall_time_sec = tk->xtime_sec; update_vsyscall()
41 vdata->wall_time_snsec = tk->tkr_mono.xtime_nsec; update_vsyscall()
43 vdata->monotonic_time_sec = tk->xtime_sec update_vsyscall()
44 + tk->wall_to_monotonic.tv_sec; update_vsyscall()
45 vdata->monotonic_time_snsec = tk->tkr_mono.xtime_nsec update_vsyscall()
46 + ((u64)tk->wall_to_monotonic.tv_nsec update_vsyscall()
47 << tk->tkr_mono.shift); update_vsyscall()
49 (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { update_vsyscall()
51 ((u64)NSEC_PER_SEC) << tk->tkr_mono.shift; update_vsyscall()
55 vdata->wall_time_coarse_sec = tk->xtime_sec; update_vsyscall()
56 vdata->wall_time_coarse_nsec = (long)(tk->tkr_mono.xtime_nsec >> update_vsyscall()
57 tk->tkr_mono.shift); update_vsyscall()
60 vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec; update_vsyscall()
62 vdata->wall_time_coarse_nsec + tk->wall_to_monotonic.tv_nsec; update_vsyscall()
/linux-4.1.27/kernel/trace/
H A Dtrace_kprobe.c43 static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk) trace_kprobe_is_return() argument
45 return tk->rp.handler != NULL; trace_kprobe_is_return()
48 static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk) trace_kprobe_symbol() argument
50 return tk->symbol ? tk->symbol : "unknown"; trace_kprobe_symbol()
53 static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk) trace_kprobe_offset() argument
55 return tk->rp.kp.offset; trace_kprobe_offset()
58 static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk) trace_kprobe_has_gone() argument
60 return !!(kprobe_gone(&tk->rp.kp)); trace_kprobe_has_gone()
63 static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk, trace_kprobe_within_module() argument
67 const char *name = trace_kprobe_symbol(tk); trace_kprobe_within_module()
71 static nokprobe_inline bool trace_kprobe_is_on_module(struct trace_kprobe *tk) trace_kprobe_is_on_module() argument
73 return !!strchr(trace_kprobe_symbol(tk), ':'); trace_kprobe_is_on_module()
76 static int register_kprobe_event(struct trace_kprobe *tk);
77 static int unregister_kprobe_event(struct trace_kprobe *tk);
282 struct trace_kprobe *tk; alloc_trace_kprobe() local
285 tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL); alloc_trace_kprobe()
286 if (!tk) alloc_trace_kprobe()
290 tk->symbol = kstrdup(symbol, GFP_KERNEL); alloc_trace_kprobe()
291 if (!tk->symbol) alloc_trace_kprobe()
293 tk->rp.kp.symbol_name = tk->symbol; alloc_trace_kprobe()
294 tk->rp.kp.offset = offs; alloc_trace_kprobe()
296 tk->rp.kp.addr = addr; alloc_trace_kprobe()
299 tk->rp.handler = kretprobe_dispatcher; alloc_trace_kprobe()
301 tk->rp.kp.pre_handler = kprobe_dispatcher; alloc_trace_kprobe()
308 tk->tp.call.class = &tk->tp.class; alloc_trace_kprobe()
309 tk->tp.call.name = kstrdup(event, GFP_KERNEL); alloc_trace_kprobe()
310 if (!tk->tp.call.name) alloc_trace_kprobe()
318 tk->tp.class.system = kstrdup(group, GFP_KERNEL); alloc_trace_kprobe()
319 if (!tk->tp.class.system) alloc_trace_kprobe()
322 INIT_LIST_HEAD(&tk->list); alloc_trace_kprobe()
323 INIT_LIST_HEAD(&tk->tp.files); alloc_trace_kprobe()
324 return tk; alloc_trace_kprobe()
326 kfree(tk->tp.call.name); alloc_trace_kprobe()
327 kfree(tk->symbol); alloc_trace_kprobe()
328 kfree(tk); alloc_trace_kprobe()
332 static void free_trace_kprobe(struct trace_kprobe *tk) free_trace_kprobe() argument
336 for (i = 0; i < tk->tp.nr_args; i++) free_trace_kprobe()
337 traceprobe_free_probe_arg(&tk->tp.args[i]); free_trace_kprobe()
339 kfree(tk->tp.call.class->system); free_trace_kprobe()
340 kfree(tk->tp.call.name); free_trace_kprobe()
341 kfree(tk->symbol); free_trace_kprobe()
342 kfree(tk); free_trace_kprobe()
348 struct trace_kprobe *tk; find_trace_kprobe() local
350 list_for_each_entry(tk, &probe_list, list) find_trace_kprobe()
351 if (strcmp(ftrace_event_name(&tk->tp.call), event) == 0 && find_trace_kprobe()
352 strcmp(tk->tp.call.class->system, group) == 0) find_trace_kprobe()
353 return tk; find_trace_kprobe()
362 enable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file) enable_trace_kprobe() argument
376 list_add_tail_rcu(&link->list, &tk->tp.files); enable_trace_kprobe()
378 tk->tp.flags |= TP_FLAG_TRACE; enable_trace_kprobe()
380 tk->tp.flags |= TP_FLAG_PROFILE; enable_trace_kprobe()
382 if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) { enable_trace_kprobe()
383 if (trace_kprobe_is_return(tk)) enable_trace_kprobe()
384 ret = enable_kretprobe(&tk->rp); enable_trace_kprobe()
386 ret = enable_kprobe(&tk->rp.kp); enable_trace_kprobe()
397 disable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file) disable_trace_kprobe() argument
404 link = find_event_file_link(&tk->tp, file); disable_trace_kprobe()
412 if (!list_empty(&tk->tp.files)) disable_trace_kprobe()
415 tk->tp.flags &= ~TP_FLAG_TRACE; disable_trace_kprobe()
417 tk->tp.flags &= ~TP_FLAG_PROFILE; disable_trace_kprobe()
419 if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) { disable_trace_kprobe()
420 if (trace_kprobe_is_return(tk)) disable_trace_kprobe()
421 disable_kretprobe(&tk->rp); disable_trace_kprobe()
423 disable_kprobe(&tk->rp.kp); disable_trace_kprobe()
444 static int __register_trace_kprobe(struct trace_kprobe *tk) __register_trace_kprobe() argument
448 if (trace_probe_is_registered(&tk->tp)) __register_trace_kprobe()
451 for (i = 0; i < tk->tp.nr_args; i++) __register_trace_kprobe()
452 traceprobe_update_arg(&tk->tp.args[i]); __register_trace_kprobe()
455 if (trace_probe_is_enabled(&tk->tp)) __register_trace_kprobe()
456 tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED; __register_trace_kprobe()
458 tk->rp.kp.flags |= KPROBE_FLAG_DISABLED; __register_trace_kprobe()
460 if (trace_kprobe_is_return(tk)) __register_trace_kprobe()
461 ret = register_kretprobe(&tk->rp); __register_trace_kprobe()
463 ret = register_kprobe(&tk->rp.kp); __register_trace_kprobe()
466 tk->tp.flags |= TP_FLAG_REGISTERED; __register_trace_kprobe()
469 trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret); __register_trace_kprobe()
470 if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) { __register_trace_kprobe()
477 tk->rp.kp.addr); __register_trace_kprobe()
486 static void __unregister_trace_kprobe(struct trace_kprobe *tk) __unregister_trace_kprobe() argument
488 if (trace_probe_is_registered(&tk->tp)) { __unregister_trace_kprobe()
489 if (trace_kprobe_is_return(tk)) __unregister_trace_kprobe()
490 unregister_kretprobe(&tk->rp); __unregister_trace_kprobe()
492 unregister_kprobe(&tk->rp.kp); __unregister_trace_kprobe()
493 tk->tp.flags &= ~TP_FLAG_REGISTERED; __unregister_trace_kprobe()
495 if (tk->rp.kp.symbol_name) __unregister_trace_kprobe()
496 tk->rp.kp.addr = NULL; __unregister_trace_kprobe()
501 static int unregister_trace_kprobe(struct trace_kprobe *tk) unregister_trace_kprobe() argument
504 if (trace_probe_is_enabled(&tk->tp)) unregister_trace_kprobe()
508 if (unregister_kprobe_event(tk)) unregister_trace_kprobe()
511 __unregister_trace_kprobe(tk); unregister_trace_kprobe()
512 list_del(&tk->list); unregister_trace_kprobe()
518 static int register_trace_kprobe(struct trace_kprobe *tk) register_trace_kprobe() argument
526 old_tk = find_trace_kprobe(ftrace_event_name(&tk->tp.call), register_trace_kprobe()
527 tk->tp.call.class->system); register_trace_kprobe()
536 ret = register_kprobe_event(tk); register_trace_kprobe()
543 ret = __register_trace_kprobe(tk); register_trace_kprobe()
545 unregister_kprobe_event(tk); register_trace_kprobe()
547 list_add_tail(&tk->list, &probe_list); register_trace_kprobe()
559 struct trace_kprobe *tk; trace_kprobe_module_callback() local
567 list_for_each_entry(tk, &probe_list, list) { trace_kprobe_module_callback()
568 if (trace_kprobe_within_module(tk, mod)) { trace_kprobe_module_callback()
570 __unregister_trace_kprobe(tk); trace_kprobe_module_callback()
571 ret = __register_trace_kprobe(tk); trace_kprobe_module_callback()
575 ftrace_event_name(&tk->tp.call), trace_kprobe_module_callback()
609 struct trace_kprobe *tk; create_trace_kprobe() local
656 tk = find_trace_kprobe(event, group); create_trace_kprobe()
657 if (!tk) { create_trace_kprobe()
663 ret = unregister_trace_kprobe(tk); create_trace_kprobe()
665 free_trace_kprobe(tk); create_trace_kprobe()
712 tk = alloc_trace_kprobe(group, event, addr, symbol, offset, argc, create_trace_kprobe()
714 if (IS_ERR(tk)) { create_trace_kprobe()
716 (int)PTR_ERR(tk)); create_trace_kprobe()
717 return PTR_ERR(tk); create_trace_kprobe()
723 struct probe_arg *parg = &tk->tp.args[i]; create_trace_kprobe()
726 tk->tp.nr_args++; create_trace_kprobe()
754 tk->tp.args, i)) { create_trace_kprobe()
762 ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg, create_trace_kprobe()
771 ret = register_trace_kprobe(tk); create_trace_kprobe()
777 free_trace_kprobe(tk); create_trace_kprobe()
783 struct trace_kprobe *tk; release_all_trace_kprobes() local
788 list_for_each_entry(tk, &probe_list, list) release_all_trace_kprobes()
789 if (trace_probe_is_enabled(&tk->tp)) { release_all_trace_kprobes()
795 tk = list_entry(probe_list.next, struct trace_kprobe, list); release_all_trace_kprobes()
796 ret = unregister_trace_kprobe(tk); release_all_trace_kprobes()
799 free_trace_kprobe(tk); release_all_trace_kprobes()
827 struct trace_kprobe *tk = v; probes_seq_show() local
830 seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p'); probes_seq_show()
831 seq_printf(m, ":%s/%s", tk->tp.call.class->system, probes_seq_show()
832 ftrace_event_name(&tk->tp.call)); probes_seq_show()
834 if (!tk->symbol) probes_seq_show()
835 seq_printf(m, " 0x%p", tk->rp.kp.addr); probes_seq_show()
836 else if (tk->rp.kp.offset) probes_seq_show()
837 seq_printf(m, " %s+%u", trace_kprobe_symbol(tk), probes_seq_show()
838 tk->rp.kp.offset); probes_seq_show()
840 seq_printf(m, " %s", trace_kprobe_symbol(tk)); probes_seq_show()
842 for (i = 0; i < tk->tp.nr_args; i++) probes_seq_show()
843 seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm); probes_seq_show()
888 struct trace_kprobe *tk = v; probes_profile_seq_show() local
891 ftrace_event_name(&tk->tp.call), tk->nhit, probes_profile_seq_show()
892 tk->rp.kp.nmissed); probes_profile_seq_show()
919 __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs, __kprobe_trace_func() argument
927 struct ftrace_event_call *call = &tk->tp.call; __kprobe_trace_func()
937 dsize = __get_data_size(&tk->tp, regs); __kprobe_trace_func()
938 size = sizeof(*entry) + tk->tp.size + dsize; __kprobe_trace_func()
947 entry->ip = (unsigned long)tk->rp.kp.addr; __kprobe_trace_func()
948 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); __kprobe_trace_func()
955 kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs) kprobe_trace_func() argument
959 list_for_each_entry_rcu(link, &tk->tp.files, list) kprobe_trace_func()
960 __kprobe_trace_func(tk, regs, link->file); kprobe_trace_func()
966 __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, __kretprobe_trace_func() argument
975 struct ftrace_event_call *call = &tk->tp.call; __kretprobe_trace_func()
985 dsize = __get_data_size(&tk->tp, regs); __kretprobe_trace_func()
986 size = sizeof(*entry) + tk->tp.size + dsize; __kretprobe_trace_func()
995 entry->func = (unsigned long)tk->rp.kp.addr; __kretprobe_trace_func()
997 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); __kretprobe_trace_func()
1004 kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, kretprobe_trace_func() argument
1009 list_for_each_entry_rcu(link, &tk->tp.files, list) kretprobe_trace_func()
1010 __kretprobe_trace_func(tk, ri, regs, link->file); kretprobe_trace_func()
1088 struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data; kprobe_event_define_fields() local
1092 for (i = 0; i < tk->tp.nr_args; i++) { kprobe_event_define_fields()
1093 struct probe_arg *parg = &tk->tp.args[i]; kprobe_event_define_fields()
1111 struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data; kretprobe_event_define_fields() local
1116 for (i = 0; i < tk->tp.nr_args; i++) { kretprobe_event_define_fields()
1117 struct probe_arg *parg = &tk->tp.args[i]; kretprobe_event_define_fields()
1135 kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs) kprobe_perf_func() argument
1137 struct ftrace_event_call *call = &tk->tp.call; kprobe_perf_func()
1151 dsize = __get_data_size(&tk->tp, regs); kprobe_perf_func()
1152 __size = sizeof(*entry) + tk->tp.size + dsize; kprobe_perf_func()
1160 entry->ip = (unsigned long)tk->rp.kp.addr; kprobe_perf_func()
1162 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); kprobe_perf_func()
1169 kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, kretprobe_perf_func() argument
1172 struct ftrace_event_call *call = &tk->tp.call; kretprobe_perf_func()
1186 dsize = __get_data_size(&tk->tp, regs); kretprobe_perf_func()
1187 __size = sizeof(*entry) + tk->tp.size + dsize; kretprobe_perf_func()
1195 entry->func = (unsigned long)tk->rp.kp.addr; kretprobe_perf_func()
1197 store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize); kretprobe_perf_func()
1212 struct trace_kprobe *tk = (struct trace_kprobe *)event->data; kprobe_register() local
1217 return enable_trace_kprobe(tk, file); kprobe_register()
1219 return disable_trace_kprobe(tk, file); kprobe_register()
1223 return enable_trace_kprobe(tk, NULL); kprobe_register()
1225 return disable_trace_kprobe(tk, NULL); kprobe_register()
1238 struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp); kprobe_dispatcher() local
1240 tk->nhit++; kprobe_dispatcher()
1242 if (tk->tp.flags & TP_FLAG_TRACE) kprobe_dispatcher()
1243 kprobe_trace_func(tk, regs); kprobe_dispatcher()
1245 if (tk->tp.flags & TP_FLAG_PROFILE) kprobe_dispatcher()
1246 kprobe_perf_func(tk, regs); kprobe_dispatcher()
1255 struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp); kretprobe_dispatcher() local
1257 tk->nhit++; kretprobe_dispatcher()
1259 if (tk->tp.flags & TP_FLAG_TRACE) kretprobe_dispatcher()
1260 kretprobe_trace_func(tk, ri, regs); kretprobe_dispatcher()
1262 if (tk->tp.flags & TP_FLAG_PROFILE) kretprobe_dispatcher()
1263 kretprobe_perf_func(tk, ri, regs); kretprobe_dispatcher()
1277 static int register_kprobe_event(struct trace_kprobe *tk) register_kprobe_event() argument
1279 struct ftrace_event_call *call = &tk->tp.call; register_kprobe_event()
1284 if (trace_kprobe_is_return(tk)) { register_kprobe_event()
1291 if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) register_kprobe_event()
1300 call->data = tk; register_kprobe_event()
1311 static int unregister_kprobe_event(struct trace_kprobe *tk) unregister_kprobe_event() argument
1316 ret = trace_remove_event_call(&tk->tp.call); unregister_kprobe_event()
1318 kfree(tk->tp.call.print_fmt); unregister_kprobe_event()
1368 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr) find_trace_probe_file() argument
1373 if (file->event_call == &tk->tp.call) find_trace_probe_file()
1387 struct trace_kprobe *tk; kprobe_trace_self_tests_init() local
1405 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM); kprobe_trace_self_tests_init()
1406 if (WARN_ON_ONCE(tk == NULL)) { kprobe_trace_self_tests_init()
1410 file = find_trace_probe_file(tk, top_trace_array()); kprobe_trace_self_tests_init()
1415 enable_trace_kprobe(tk, file); kprobe_trace_self_tests_init()
1426 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM); kprobe_trace_self_tests_init()
1427 if (WARN_ON_ONCE(tk == NULL)) { kprobe_trace_self_tests_init()
1431 file = find_trace_probe_file(tk, top_trace_array()); kprobe_trace_self_tests_init()
1436 enable_trace_kprobe(tk, file); kprobe_trace_self_tests_init()
1446 tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM); kprobe_trace_self_tests_init()
1447 if (WARN_ON_ONCE(tk == NULL)) { kprobe_trace_self_tests_init()
1451 file = find_trace_probe_file(tk, top_trace_array()); kprobe_trace_self_tests_init()
1456 disable_trace_kprobe(tk, file); kprobe_trace_self_tests_init()
1459 tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM); kprobe_trace_self_tests_init()
1460 if (WARN_ON_ONCE(tk == NULL)) { kprobe_trace_self_tests_init()
1464 file = find_trace_probe_file(tk, top_trace_array()); kprobe_trace_self_tests_init()
1469 disable_trace_kprobe(tk, file); kprobe_trace_self_tests_init()
/linux-4.1.27/kernel/time/
H A Dtimekeeping.c67 static inline void tk_normalize_xtime(struct timekeeper *tk) tk_normalize_xtime() argument
69 while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) { tk_normalize_xtime()
70 tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift; tk_normalize_xtime()
71 tk->xtime_sec++; tk_normalize_xtime()
75 static inline struct timespec64 tk_xtime(struct timekeeper *tk) tk_xtime() argument
79 ts.tv_sec = tk->xtime_sec; tk_xtime()
80 ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); tk_xtime()
84 static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts) tk_set_xtime() argument
86 tk->xtime_sec = ts->tv_sec; tk_set_xtime()
87 tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift; tk_set_xtime()
90 static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts) tk_xtime_add() argument
92 tk->xtime_sec += ts->tv_sec; tk_xtime_add()
93 tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift; tk_xtime_add()
94 tk_normalize_xtime(tk); tk_xtime_add()
97 static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm) tk_set_wall_to_mono() argument
105 set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec, tk_set_wall_to_mono()
106 -tk->wall_to_monotonic.tv_nsec); tk_set_wall_to_mono()
107 WARN_ON_ONCE(tk->offs_real.tv64 != timespec64_to_ktime(tmp).tv64); tk_set_wall_to_mono()
108 tk->wall_to_monotonic = wtm; tk_set_wall_to_mono()
110 tk->offs_real = timespec64_to_ktime(tmp); tk_set_wall_to_mono()
111 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0)); tk_set_wall_to_mono()
114 static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta) tk_update_sleep_time() argument
116 tk->offs_boot = ktime_add(tk->offs_boot, delta); tk_update_sleep_time()
134 static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset) timekeeping_check_update() argument
137 cycle_t max_cycles = tk->tkr_mono.clock->max_cycles; timekeeping_check_update()
138 const char *name = tk->tkr_mono.clock->name; timekeeping_check_update()
213 static inline void timekeeping_check_update(struct timekeeper *tk, cycle_t offset) timekeeping_check_update() argument
233 * @tk: The target timekeeper to setup.
241 static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) tk_setup_internals() argument
247 old_clock = tk->tkr_mono.clock; tk_setup_internals()
248 tk->tkr_mono.clock = clock; tk_setup_internals()
249 tk->tkr_mono.read = clock->read; tk_setup_internals()
250 tk->tkr_mono.mask = clock->mask; tk_setup_internals()
251 tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock); tk_setup_internals()
253 tk->tkr_raw.clock = clock; tk_setup_internals()
254 tk->tkr_raw.read = clock->read; tk_setup_internals()
255 tk->tkr_raw.mask = clock->mask; tk_setup_internals()
256 tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last; tk_setup_internals()
268 tk->cycle_interval = interval; tk_setup_internals()
271 tk->xtime_interval = (u64) interval * clock->mult; tk_setup_internals()
272 tk->xtime_remainder = ntpinterval - tk->xtime_interval; tk_setup_internals()
273 tk->raw_interval = tk_setup_internals()
280 tk->tkr_mono.xtime_nsec >>= -shift_change; tk_setup_internals()
282 tk->tkr_mono.xtime_nsec <<= shift_change; tk_setup_internals()
284 tk->tkr_raw.xtime_nsec = 0; tk_setup_internals()
286 tk->tkr_mono.shift = clock->shift; tk_setup_internals()
287 tk->tkr_raw.shift = clock->shift; tk_setup_internals()
289 tk->ntp_error = 0; tk_setup_internals()
290 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift; tk_setup_internals()
291 tk->ntp_tick = ntpinterval << tk->ntp_error_shift; tk_setup_internals()
298 tk->tkr_mono.mult = clock->mult; tk_setup_internals()
299 tk->tkr_raw.mult = clock->mult; tk_setup_internals()
300 tk->ntp_err_mult = 0; tk_setup_internals()
450 * @tk: Timekeeper to snapshot.
453 * suspended, so take a snapshot of the readout base of @tk and use it as the
458 static void halt_fast_timekeeper(struct timekeeper *tk) halt_fast_timekeeper() argument
461 struct tk_read_base *tkr = &tk->tkr_mono; halt_fast_timekeeper()
468 tkr = &tk->tkr_raw; halt_fast_timekeeper()
476 static inline void update_vsyscall(struct timekeeper *tk) update_vsyscall() argument
480 xt = timespec64_to_timespec(tk_xtime(tk)); update_vsyscall()
481 wm = timespec64_to_timespec(tk->wall_to_monotonic); update_vsyscall()
482 update_vsyscall_old(&xt, &wm, tk->tkr_mono.clock, tk->tkr_mono.mult, update_vsyscall()
483 tk->tkr_mono.cycle_last); update_vsyscall()
486 static inline void old_vsyscall_fixup(struct timekeeper *tk) old_vsyscall_fixup() argument
500 remainder = tk->tkr_mono.xtime_nsec & ((1ULL << tk->tkr_mono.shift) - 1); old_vsyscall_fixup()
501 tk->tkr_mono.xtime_nsec -= remainder; old_vsyscall_fixup()
502 tk->tkr_mono.xtime_nsec += 1ULL << tk->tkr_mono.shift; old_vsyscall_fixup()
503 tk->ntp_error += remainder << tk->ntp_error_shift; old_vsyscall_fixup()
504 tk->ntp_error -= (1ULL << tk->tkr_mono.shift) << tk->ntp_error_shift; old_vsyscall_fixup()
507 #define old_vsyscall_fixup(tk)
512 static void update_pvclock_gtod(struct timekeeper *tk, bool was_set) update_pvclock_gtod() argument
514 raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk); update_pvclock_gtod()
522 struct timekeeper *tk = &tk_core.timekeeper; pvclock_gtod_register_notifier() local
528 update_pvclock_gtod(tk, true); pvclock_gtod_register_notifier()
555 static inline void tk_update_ktime_data(struct timekeeper *tk) tk_update_ktime_data() argument
567 seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec); tk_update_ktime_data()
568 nsec = (u32) tk->wall_to_monotonic.tv_nsec; tk_update_ktime_data()
569 tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec); tk_update_ktime_data()
572 tk->tkr_raw.base = timespec64_to_ktime(tk->raw_time); tk_update_ktime_data()
577 * this into account before updating tk->ktime_sec. tk_update_ktime_data()
579 nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); tk_update_ktime_data()
582 tk->ktime_sec = seconds; tk_update_ktime_data()
586 static void timekeeping_update(struct timekeeper *tk, unsigned int action) timekeeping_update() argument
589 tk->ntp_error = 0; timekeeping_update()
593 tk_update_ktime_data(tk); timekeeping_update()
595 update_vsyscall(tk); timekeeping_update()
596 update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET); timekeeping_update()
602 update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono); timekeeping_update()
603 update_fast_timekeeper(&tk->tkr_raw, &tk_fast_raw); timekeeping_update()
613 static void timekeeping_forward_now(struct timekeeper *tk) timekeeping_forward_now() argument
615 struct clocksource *clock = tk->tkr_mono.clock; timekeeping_forward_now()
619 cycle_now = tk->tkr_mono.read(clock); timekeeping_forward_now()
620 delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask); timekeeping_forward_now()
621 tk->tkr_mono.cycle_last = cycle_now; timekeeping_forward_now()
622 tk->tkr_raw.cycle_last = cycle_now; timekeeping_forward_now()
624 tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult; timekeeping_forward_now()
627 tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift; timekeeping_forward_now()
629 tk_normalize_xtime(tk); timekeeping_forward_now()
631 nsec = clocksource_cyc2ns(delta, tk->tkr_raw.mult, tk->tkr_raw.shift); timekeeping_forward_now()
632 timespec64_add_ns(&tk->raw_time, nsec); timekeeping_forward_now()
644 struct timekeeper *tk = &tk_core.timekeeper; __getnstimeofday64() local
651 ts->tv_sec = tk->xtime_sec; __getnstimeofday64()
652 nsecs = timekeeping_get_ns(&tk->tkr_mono); __getnstimeofday64()
683 struct timekeeper *tk = &tk_core.timekeeper; ktime_get() local
692 base = tk->tkr_mono.base; ktime_get()
693 nsecs = timekeeping_get_ns(&tk->tkr_mono); ktime_get()
709 struct timekeeper *tk = &tk_core.timekeeper; ktime_get_with_offset() local
718 base = ktime_add(tk->tkr_mono.base, *offset); ktime_get_with_offset()
719 nsecs = timekeeping_get_ns(&tk->tkr_mono); ktime_get_with_offset()
753 struct timekeeper *tk = &tk_core.timekeeper; ktime_get_raw() local
760 base = tk->tkr_raw.base; ktime_get_raw()
761 nsecs = timekeeping_get_ns(&tk->tkr_raw); ktime_get_raw()
779 struct timekeeper *tk = &tk_core.timekeeper; ktime_get_ts64() local
788 ts->tv_sec = tk->xtime_sec; ktime_get_ts64()
789 nsec = timekeeping_get_ns(&tk->tkr_mono); ktime_get_ts64()
790 tomono = tk->wall_to_monotonic; ktime_get_ts64()
804 * serialized read. tk->ktime_sec is of type 'unsigned long' so this
811 struct timekeeper *tk = &tk_core.timekeeper; ktime_get_seconds() local
814 return tk->ktime_sec; ktime_get_seconds()
824 * For 64bit systems the fast access to tk->xtime_sec is preserved. On
826 * counter to provide "atomic" access to the 64bit tk->xtime_sec
831 struct timekeeper *tk = &tk_core.timekeeper; ktime_get_real_seconds() local
836 return tk->xtime_sec; ktime_get_real_seconds()
840 seconds = tk->xtime_sec; ktime_get_real_seconds()
861 struct timekeeper *tk = &tk_core.timekeeper; getnstime_raw_and_real() local
870 *ts_raw = timespec64_to_timespec(tk->raw_time); getnstime_raw_and_real()
871 ts_real->tv_sec = tk->xtime_sec; getnstime_raw_and_real()
874 nsecs_raw = timekeeping_get_ns(&tk->tkr_raw); getnstime_raw_and_real()
875 nsecs_real = timekeeping_get_ns(&tk->tkr_mono); getnstime_raw_and_real()
910 struct timekeeper *tk = &tk_core.timekeeper; do_settimeofday64() local
920 timekeeping_forward_now(tk); do_settimeofday64()
922 xt = tk_xtime(tk); do_settimeofday64()
926 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta)); do_settimeofday64()
928 tk_set_xtime(tk, ts); do_settimeofday64()
930 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); do_settimeofday64()
950 struct timekeeper *tk = &tk_core.timekeeper; timekeeping_inject_offset() local
963 timekeeping_forward_now(tk); timekeeping_inject_offset()
966 tmp = timespec64_add(tk_xtime(tk), ts64); timekeeping_inject_offset()
972 tk_xtime_add(tk, &ts64); timekeeping_inject_offset()
973 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts64)); timekeeping_inject_offset()
976 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); timekeeping_inject_offset()
995 struct timekeeper *tk = &tk_core.timekeeper; timekeeping_get_tai_offset() local
1001 ret = tk->tai_offset; timekeeping_get_tai_offset()
1011 static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset) __timekeeping_set_tai_offset() argument
1013 tk->tai_offset = tai_offset; __timekeeping_set_tai_offset()
1014 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0)); __timekeeping_set_tai_offset()
1023 struct timekeeper *tk = &tk_core.timekeeper; timekeeping_set_tai_offset() local
1028 __timekeeping_set_tai_offset(tk, tai_offset); timekeeping_set_tai_offset()
1029 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); timekeeping_set_tai_offset()
1042 struct timekeeper *tk = &tk_core.timekeeper; change_clocksource() local
1051 timekeeping_forward_now(tk); change_clocksource()
1058 old = tk->tkr_mono.clock; change_clocksource()
1059 tk_setup_internals(tk, new); change_clocksource()
1067 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); change_clocksource()
1084 struct timekeeper *tk = &tk_core.timekeeper; timekeeping_notify() local
1086 if (tk->tkr_mono.clock == clock) timekeeping_notify()
1090 return tk->tkr_mono.clock == clock ? 0 : -1; timekeeping_notify()
1101 struct timekeeper *tk = &tk_core.timekeeper; getrawmonotonic64() local
1108 nsecs = timekeeping_get_ns(&tk->tkr_raw); getrawmonotonic64()
1109 ts64 = tk->raw_time; getrawmonotonic64()
1124 struct timekeeper *tk = &tk_core.timekeeper; timekeeping_valid_for_hres() local
1131 ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; timekeeping_valid_for_hres()
1143 struct timekeeper *tk = &tk_core.timekeeper; timekeeping_max_deferment() local
1150 ret = tk->tkr_mono.clock->max_idle_ns; timekeeping_max_deferment()
1214 struct timekeeper *tk = &tk_core.timekeeper; timekeeping_init() local
1243 tk_setup_internals(tk, clock); timekeeping_init()
1245 tk_set_xtime(tk, &now); timekeeping_init()
1246 tk->raw_time.tv_sec = 0; timekeeping_init()
1247 tk->raw_time.tv_nsec = 0; timekeeping_init()
1249 boot = tk_xtime(tk); timekeeping_init()
1252 tk_set_wall_to_mono(tk, tmp); timekeeping_init()
1254 timekeeping_update(tk, TK_MIRROR); timekeeping_init()
1270 static void __timekeeping_inject_sleeptime(struct timekeeper *tk, __timekeeping_inject_sleeptime() argument
1279 tk_xtime_add(tk, delta); __timekeeping_inject_sleeptime()
1280 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta)); __timekeeping_inject_sleeptime()
1281 tk_update_sleep_time(tk, timespec64_to_ktime(*delta)); __timekeeping_inject_sleeptime()
1334 struct timekeeper *tk = &tk_core.timekeeper; timekeeping_inject_sleeptime64() local
1340 timekeeping_forward_now(tk); timekeeping_inject_sleeptime64()
1342 __timekeeping_inject_sleeptime(tk, delta); timekeeping_inject_sleeptime64()
1344 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); timekeeping_inject_sleeptime64()
1359 struct timekeeper *tk = &tk_core.timekeeper; timekeeping_resume() local
1360 struct clocksource *clock = tk->tkr_mono.clock; timekeeping_resume()
1386 cycle_now = tk->tkr_mono.read(clock); timekeeping_resume()
1388 cycle_now > tk->tkr_mono.cycle_last) { timekeeping_resume()
1394 cycle_delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, timekeeping_resume()
1395 tk->tkr_mono.mask); timekeeping_resume()
1418 __timekeeping_inject_sleeptime(tk, &ts_delta); timekeeping_resume()
1421 tk->tkr_mono.cycle_last = cycle_now; timekeeping_resume()
1422 tk->tkr_raw.cycle_last = cycle_now; timekeeping_resume()
1424 tk->ntp_error = 0; timekeeping_resume()
1426 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); timekeeping_resume()
1438 struct timekeeper *tk = &tk_core.timekeeper; timekeeping_suspend() local
1455 timekeeping_forward_now(tk); timekeeping_suspend()
1465 delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time); timekeeping_suspend()
1480 timekeeping_update(tk, TK_MIRROR); timekeeping_suspend()
1481 halt_fast_timekeeper(tk); timekeeping_suspend()
1508 static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk, timekeeping_apply_adjustment() argument
1513 s64 interval = tk->cycle_interval; timekeeping_apply_adjustment()
1574 if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) { timekeeping_apply_adjustment()
1580 tk->tkr_mono.mult += mult_adj; timekeeping_apply_adjustment()
1581 tk->xtime_interval += interval; timekeeping_apply_adjustment()
1582 tk->tkr_mono.xtime_nsec -= offset; timekeeping_apply_adjustment()
1583 tk->ntp_error -= (interval - offset) << tk->ntp_error_shift; timekeeping_apply_adjustment()
1590 static __always_inline void timekeeping_freqadjust(struct timekeeper *tk, timekeeping_freqadjust() argument
1593 s64 interval = tk->cycle_interval; timekeeping_freqadjust()
1594 s64 xinterval = tk->xtime_interval; timekeeping_freqadjust()
1600 if (tk->ntp_err_mult) timekeeping_freqadjust()
1601 xinterval -= tk->cycle_interval; timekeeping_freqadjust()
1603 tk->ntp_tick = ntp_tick_length(); timekeeping_freqadjust()
1606 tick_error = ntp_tick_length() >> tk->ntp_error_shift; timekeeping_freqadjust()
1607 tick_error -= (xinterval + tk->xtime_remainder); timekeeping_freqadjust()
1622 timekeeping_apply_adjustment(tk, offset, negative, adj); timekeeping_freqadjust()
1629 static void timekeeping_adjust(struct timekeeper *tk, s64 offset) timekeeping_adjust() argument
1632 timekeeping_freqadjust(tk, offset); timekeeping_adjust()
1635 if (!tk->ntp_err_mult && (tk->ntp_error > 0)) { timekeeping_adjust()
1636 tk->ntp_err_mult = 1; timekeeping_adjust()
1637 timekeeping_apply_adjustment(tk, offset, 0, 0); timekeeping_adjust()
1638 } else if (tk->ntp_err_mult && (tk->ntp_error <= 0)) { timekeeping_adjust()
1640 timekeeping_apply_adjustment(tk, offset, 1, 0); timekeeping_adjust()
1641 tk->ntp_err_mult = 0; timekeeping_adjust()
1644 if (unlikely(tk->tkr_mono.clock->maxadj && timekeeping_adjust()
1645 (abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult) timekeeping_adjust()
1646 > tk->tkr_mono.clock->maxadj))) { timekeeping_adjust()
1649 tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult, timekeeping_adjust()
1650 (long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj); timekeeping_adjust()
1667 if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) { timekeeping_adjust()
1668 s64 neg = -(s64)tk->tkr_mono.xtime_nsec; timekeeping_adjust()
1669 tk->tkr_mono.xtime_nsec = 0; timekeeping_adjust()
1670 tk->ntp_error += neg << tk->ntp_error_shift; timekeeping_adjust()
1682 static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk) accumulate_nsecs_to_secs() argument
1684 u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift; accumulate_nsecs_to_secs()
1687 while (tk->tkr_mono.xtime_nsec >= nsecps) { accumulate_nsecs_to_secs()
1690 tk->tkr_mono.xtime_nsec -= nsecps; accumulate_nsecs_to_secs()
1691 tk->xtime_sec++; accumulate_nsecs_to_secs()
1694 leap = second_overflow(tk->xtime_sec); accumulate_nsecs_to_secs()
1698 tk->xtime_sec += leap; accumulate_nsecs_to_secs()
1702 tk_set_wall_to_mono(tk, accumulate_nsecs_to_secs()
1703 timespec64_sub(tk->wall_to_monotonic, ts)); accumulate_nsecs_to_secs()
1705 __timekeeping_set_tai_offset(tk, tk->tai_offset - leap); accumulate_nsecs_to_secs()
1722 static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, logarithmic_accumulation() argument
1726 cycle_t interval = tk->cycle_interval << shift; logarithmic_accumulation()
1735 tk->tkr_mono.cycle_last += interval; logarithmic_accumulation()
1736 tk->tkr_raw.cycle_last += interval; logarithmic_accumulation()
1738 tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift; logarithmic_accumulation()
1739 *clock_set |= accumulate_nsecs_to_secs(tk); logarithmic_accumulation()
1742 raw_nsecs = (u64)tk->raw_interval << shift; logarithmic_accumulation()
1743 raw_nsecs += tk->raw_time.tv_nsec; logarithmic_accumulation()
1747 tk->raw_time.tv_sec += raw_secs; logarithmic_accumulation()
1749 tk->raw_time.tv_nsec = raw_nsecs; logarithmic_accumulation()
1752 tk->ntp_error += tk->ntp_tick << shift; logarithmic_accumulation()
1753 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) << logarithmic_accumulation()
1754 (tk->ntp_error_shift + shift); logarithmic_accumulation()
1766 struct timekeeper *tk = &shadow_timekeeper; update_wall_time() local
1781 offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock), update_wall_time()
1782 tk->tkr_mono.cycle_last, tk->tkr_mono.mask); update_wall_time()
1800 shift = ilog2(offset) - ilog2(tk->cycle_interval); update_wall_time()
1805 while (offset >= tk->cycle_interval) { update_wall_time()
1806 offset = logarithmic_accumulation(tk, offset, shift, update_wall_time()
1808 if (offset < tk->cycle_interval<<shift) update_wall_time()
1813 timekeeping_adjust(tk, offset); update_wall_time()
1819 old_vsyscall_fixup(tk); update_wall_time()
1825 clock_set |= accumulate_nsecs_to_secs(tk); update_wall_time()
1838 memcpy(real_tk, tk, sizeof(*tk)); update_wall_time()
1861 struct timekeeper *tk = &tk_core.timekeeper; getboottime64() local
1862 ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot); getboottime64()
1870 struct timekeeper *tk = &tk_core.timekeeper; get_seconds() local
1872 return tk->xtime_sec; get_seconds()
1878 struct timekeeper *tk = &tk_core.timekeeper; __current_kernel_time() local
1880 return timespec64_to_timespec(tk_xtime(tk)); __current_kernel_time()
1885 struct timekeeper *tk = &tk_core.timekeeper; current_kernel_time() local
1892 now = tk_xtime(tk); current_kernel_time()
1901 struct timekeeper *tk = &tk_core.timekeeper; get_monotonic_coarse64() local
1908 now = tk_xtime(tk); get_monotonic_coarse64()
1909 mono = tk->wall_to_monotonic; get_monotonic_coarse64()
1938 struct timekeeper *tk = &tk_core.timekeeper; ktime_get_update_offsets_tick() local
1946 base = tk->tkr_mono.base; ktime_get_update_offsets_tick()
1947 nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift; ktime_get_update_offsets_tick()
1949 *offs_real = tk->offs_real; ktime_get_update_offsets_tick()
1950 *offs_boot = tk->offs_boot; ktime_get_update_offsets_tick()
1951 *offs_tai = tk->offs_tai; ktime_get_update_offsets_tick()
1970 struct timekeeper *tk = &tk_core.timekeeper; ktime_get_update_offsets_now() local
1978 base = tk->tkr_mono.base; ktime_get_update_offsets_now()
1979 nsecs = timekeeping_get_ns(&tk->tkr_mono); ktime_get_update_offsets_now()
1981 *offs_real = tk->offs_real; ktime_get_update_offsets_now()
1982 *offs_boot = tk->offs_boot; ktime_get_update_offsets_now()
1983 *offs_tai = tk->offs_tai; ktime_get_update_offsets_now()
1995 struct timekeeper *tk = &tk_core.timekeeper; do_adjtimex() local
2022 orig_tai = tai = tk->tai_offset; do_adjtimex()
2026 __timekeeping_set_tai_offset(tk, tai); do_adjtimex()
2027 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); do_adjtimex()
/linux-4.1.27/arch/tile/kernel/
H A Dtime.c258 void update_vsyscall(struct timekeeper *tk) update_vsyscall() argument
260 if (tk->tkr_mono.clock != &cycle_counter_cs) update_vsyscall()
265 vdso_data->cycle_last = tk->tkr_mono.cycle_last; update_vsyscall()
266 vdso_data->mask = tk->tkr_mono.mask; update_vsyscall()
267 vdso_data->mult = tk->tkr_mono.mult; update_vsyscall()
268 vdso_data->shift = tk->tkr_mono.shift; update_vsyscall()
270 vdso_data->wall_time_sec = tk->xtime_sec; update_vsyscall()
271 vdso_data->wall_time_snsec = tk->tkr_mono.xtime_nsec; update_vsyscall()
273 vdso_data->monotonic_time_sec = tk->xtime_sec update_vsyscall()
274 + tk->wall_to_monotonic.tv_sec; update_vsyscall()
275 vdso_data->monotonic_time_snsec = tk->tkr_mono.xtime_nsec update_vsyscall()
276 + ((u64)tk->wall_to_monotonic.tv_nsec update_vsyscall()
277 << tk->tkr_mono.shift); update_vsyscall()
279 (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { update_vsyscall()
281 ((u64)NSEC_PER_SEC) << tk->tkr_mono.shift; update_vsyscall()
285 vdso_data->wall_time_coarse_sec = tk->xtime_sec; update_vsyscall()
286 vdso_data->wall_time_coarse_nsec = (long)(tk->tkr_mono.xtime_nsec >> update_vsyscall()
287 tk->tkr_mono.shift); update_vsyscall()
290 vdso_data->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec; update_vsyscall()
292 vdso_data->wall_time_coarse_nsec + tk->wall_to_monotonic.tv_nsec; update_vsyscall()
/linux-4.1.27/net/mac80211/
H A Dtkip.c84 static void tkip_mixing_phase1(const u8 *tk, struct tkip_ctx *ctx, tkip_mixing_phase1() argument
98 p1k[0] += tkipS(p1k[4] ^ get_unaligned_le16(tk + 0 + j)); tkip_mixing_phase1()
99 p1k[1] += tkipS(p1k[0] ^ get_unaligned_le16(tk + 4 + j)); tkip_mixing_phase1()
100 p1k[2] += tkipS(p1k[1] ^ get_unaligned_le16(tk + 8 + j)); tkip_mixing_phase1()
101 p1k[3] += tkipS(p1k[2] ^ get_unaligned_le16(tk + 12 + j)); tkip_mixing_phase1()
102 p1k[4] += tkipS(p1k[3] ^ get_unaligned_le16(tk + 0 + j)) + i; tkip_mixing_phase1()
108 static void tkip_mixing_phase2(const u8 *tk, struct tkip_ctx *ctx, tkip_mixing_phase2() argument
122 ppk[0] += tkipS(ppk[5] ^ get_unaligned_le16(tk + 0)); tkip_mixing_phase2()
123 ppk[1] += tkipS(ppk[0] ^ get_unaligned_le16(tk + 2)); tkip_mixing_phase2()
124 ppk[2] += tkipS(ppk[1] ^ get_unaligned_le16(tk + 4)); tkip_mixing_phase2()
125 ppk[3] += tkipS(ppk[2] ^ get_unaligned_le16(tk + 6)); tkip_mixing_phase2()
126 ppk[4] += tkipS(ppk[3] ^ get_unaligned_le16(tk + 8)); tkip_mixing_phase2()
127 ppk[5] += tkipS(ppk[4] ^ get_unaligned_le16(tk + 10)); tkip_mixing_phase2()
128 ppk[0] += ror16(ppk[5] ^ get_unaligned_le16(tk + 12), 1); tkip_mixing_phase2()
129 ppk[1] += ror16(ppk[0] ^ get_unaligned_le16(tk + 14), 1); tkip_mixing_phase2()
136 *rc4key++ = ((ppk[5] ^ get_unaligned_le16(tk)) >> 1) & 0xFF; tkip_mixing_phase2()
159 const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY]; ieee80211_compute_tkip_p1k() local
171 tkip_mixing_phase1(tk, ctx, sdata->vif.addr, iv32); ieee80211_compute_tkip_p1k()
191 const u8 *tk = &keyconf->key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY]; ieee80211_get_tkip_rx_p1k() local
194 tkip_mixing_phase1(tk, &ctx, ta, iv32); ieee80211_get_tkip_rx_p1k()
204 const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY]; ieee80211_get_tkip_p2k() local
213 tkip_mixing_phase2(tk, ctx, iv16, p2k); ieee80211_get_tkip_p2k()
252 const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY]; ieee80211_tkip_decrypt_data() local
283 tkip_mixing_phase1(tk, &key->u.tkip.rx[queue], ta, iv32); ieee80211_tkip_decrypt_data()
298 tkip_mixing_phase2(tk, &key->u.tkip.rx[queue], iv16, rc4key); ieee80211_tkip_decrypt_data()
/linux-4.1.27/arch/s390/kernel/vdso64/
H A Dgettimeofday.S35 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
36 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
37 lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
41 srlg %r1,%r1,0(%r5) /* >> tk->shift */
H A Dclock_gettime.S44 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
46 srlg %r1,%r1,0(%r2) /* >> tk->shift */
89 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
90 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
91 srlg %r1,%r1,0(%r2) /* >> tk->shift */
92 lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
/linux-4.1.27/arch/arm64/kernel/
H A Dvdso.c200 void update_vsyscall(struct timekeeper *tk) update_vsyscall() argument
203 u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter"); update_vsyscall()
212 vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec; update_vsyscall()
213 vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; update_vsyscall()
216 vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; update_vsyscall()
217 vdso_data->xtime_clock_sec = tk->xtime_sec; update_vsyscall()
218 vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec; update_vsyscall()
219 vdso_data->cs_mult = tk->tkr_mono.mult; update_vsyscall()
220 vdso_data->cs_shift = tk->tkr_mono.shift; update_vsyscall()
/linux-4.1.27/arch/s390/kernel/vdso32/
H A Dclock_gettime.S44 2: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
57 srdl %r0,0(%r2) /* >> tk->shift */
107 12: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
115 al %r0,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
120 srdl %r0,0(%r2) /* >> tk->shift */
H A Dgettimeofday.S39 3: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
55 srdl %r0,0(%r4) /* >> tk->shift */
/linux-4.1.27/arch/arm/kernel/
H A Dvdso.c268 static bool tk_is_cntvct(const struct timekeeper *tk) tk_is_cntvct() argument
273 if (strcmp(tk->tkr_mono.clock->name, "arch_sys_counter") != 0) tk_is_cntvct()
297 void update_vsyscall(struct timekeeper *tk) update_vsyscall() argument
300 struct timespec64 *wtm = &tk->wall_to_monotonic; update_vsyscall()
312 vdso_data->tk_is_cntvct = tk_is_cntvct(tk); update_vsyscall()
319 vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; update_vsyscall()
320 vdso_data->xtime_clock_sec = tk->xtime_sec; update_vsyscall()
321 vdso_data->xtime_clock_snsec = tk->tkr_mono.xtime_nsec; update_vsyscall()
322 vdso_data->cs_mult = tk->tkr_mono.mult; update_vsyscall()
323 vdso_data->cs_shift = tk->tkr_mono.shift; update_vsyscall()
324 vdso_data->cs_mask = tk->tkr_mono.mask; update_vsyscall()
/linux-4.1.27/sound/pci/ca0106/
H A Dca_midi.h2 * Copyright 10/16/2005 Tilman Kranz <tilde@tk-sls.de>
H A Dca_midi.c2 * Copyright 10/16/2005 Tilman Kranz <tilde@tk-sls.de>
/linux-4.1.27/mm/
H A Dmemory-failure.c294 struct to_kill *tk; add_to_kill() local
297 tk = *tkc; add_to_kill()
300 tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC); add_to_kill()
301 if (!tk) { add_to_kill()
307 tk->addr = page_address_in_vma(p, vma); add_to_kill()
308 tk->addr_valid = 1; add_to_kill()
316 if (tk->addr == -EFAULT) { add_to_kill()
319 tk->addr_valid = 0; add_to_kill()
322 tk->tsk = tsk; add_to_kill()
323 list_add_tail(&tk->nd, to_kill); add_to_kill()
338 struct to_kill *tk, *next; kill_procs() local
340 list_for_each_entry_safe (tk, next, to_kill, nd) { list_for_each_entry_safe()
347 if (fail || tk->addr_valid == 0) { list_for_each_entry_safe()
350 pfn, tk->tsk->comm, tk->tsk->pid); list_for_each_entry_safe()
351 force_sig(SIGKILL, tk->tsk); list_for_each_entry_safe()
360 else if (kill_proc(tk->tsk, tk->addr, trapno, list_for_each_entry_safe()
364 pfn, tk->tsk->comm, tk->tsk->pid); list_for_each_entry_safe()
366 put_task_struct(tk->tsk); list_for_each_entry_safe()
367 kfree(tk); list_for_each_entry_safe()
491 struct to_kill *tk; collect_procs() local
496 tk = kmalloc(sizeof(struct to_kill), GFP_NOIO); collect_procs()
497 if (!tk) collect_procs()
500 collect_procs_anon(page, tokill, &tk, force_early); collect_procs()
502 collect_procs_file(page, tokill, &tk, force_early); collect_procs()
503 kfree(tk); collect_procs()
/linux-4.1.27/arch/s390/kernel/
H A Dtime.c214 void update_vsyscall(struct timekeeper *tk) update_vsyscall() argument
218 if (tk->tkr_mono.clock != &clocksource_tod) update_vsyscall()
224 vdso_data->xtime_tod_stamp = tk->tkr_mono.cycle_last; update_vsyscall()
225 vdso_data->xtime_clock_sec = tk->xtime_sec; update_vsyscall()
226 vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec; update_vsyscall()
228 tk->xtime_sec + tk->wall_to_monotonic.tv_sec; update_vsyscall()
229 vdso_data->wtom_clock_nsec = tk->tkr_mono.xtime_nsec + update_vsyscall()
230 + ((u64) tk->wall_to_monotonic.tv_nsec << tk->tkr_mono.shift); update_vsyscall()
231 nsecps = (u64) NSEC_PER_SEC << tk->tkr_mono.shift; update_vsyscall()
237 vdso_data->xtime_coarse_sec = tk->xtime_sec; update_vsyscall()
239 (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); update_vsyscall()
241 vdso_data->xtime_coarse_sec + tk->wall_to_monotonic.tv_sec; update_vsyscall()
243 vdso_data->xtime_coarse_nsec + tk->wall_to_monotonic.tv_nsec; update_vsyscall()
249 vdso_data->tk_mult = tk->tkr_mono.mult; update_vsyscall()
250 vdso_data->tk_shift = tk->tkr_mono.shift; update_vsyscall()
/linux-4.1.27/include/linux/
H A Dtimekeeper_internal.h111 extern void update_vsyscall(struct timekeeper *tk);
123 static inline void update_vsyscall(struct timekeeper *tk) update_vsyscall() argument
H A Dkprobes.h382 void kprobe_flush_task(struct task_struct *tk);
453 static inline void kprobe_flush_task(struct task_struct *tk) kprobe_flush_task() argument
H A Disdn_ppp.h166 struct task_struct *tk; member in struct:ippp_struct
/linux-4.1.27/net/bluetooth/
H A Dsmp.c98 u8 tk[16]; /* SMP Temporary Key */ member in struct:smp_chan
851 memset(smp->tk, 0, sizeof(smp->tk)); tk_request()
908 memset(smp->tk, 0, sizeof(smp->tk)); tk_request()
911 put_unaligned_le32(passkey, smp->tk); tk_request()
939 ret = smp_c1(smp->tfm_aes, smp->tk, smp->prnd, smp->preq, smp->prsp, smp_confirm()
970 ret = smp_c1(smp->tfm_aes, smp->tk, smp->rrnd, smp->preq, smp->prsp, smp_random()
986 smp_s1(smp->tfm_aes, smp->tk, smp->rrnd, smp->prnd, stk); smp_random()
1005 smp_s1(smp->tfm_aes, smp->tk, smp->prnd, smp->rrnd, stk); smp_random()
1147 memset(smp->tk + smp->enc_key_size, 0, sc_add_ltk()
1151 key_type, auth, smp->tk, smp->enc_key_size, sc_add_ltk()
1167 if (smp_h6(smp->tfm_cmac, smp->tk, tmp1, smp->link_key)) { sc_generate_link_key()
1214 if (smp_h6(smp->tfm_cmac, key->val, tmp2, smp->tk)) sc_generate_ltk()
1217 if (smp_h6(smp->tfm_cmac, smp->tk, brle, smp->tk)) sc_generate_ltk()
1500 if (sc_mackey_and_ltk(smp, smp->mackey, smp->tk)) sc_passkey_round()
1635 memset(smp->tk, 0, sizeof(smp->tk)); smp_user_confirm_reply()
1637 put_unaligned_le32(value, smp->tk); smp_user_confirm_reply()
2152 err = sc_mackey_and_ltk(smp, smp->mackey, smp->tk); smp_cmd_pairing_random()
2381 memcpy(smp->tk, rp->ltk, sizeof(smp->tk)); smp_cmd_encrypt_info()
2413 authenticated, smp->tk, smp->enc_key_size, smp_cmd_master_ident()
2745 hci_le_start_enc(hcon, 0, 0, smp->tk); smp_cmd_dhkey_check()
H A Dhci_core.c2373 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand) hci_add_ltk()
2390 memcpy(key->val, tk, sizeof(key->val)); hci_add_ltk()
2371 hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type, u8 authenticated, u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand) hci_add_ltk() argument
/linux-4.1.27/sound/drivers/
H A Dportman2x4.c23 * Sep 30 2004 Tobias Gehrig <tobias@gehrig.tk>
25 * Sep 03 2004 Tobias Gehrig <tobias@gehrig.tk>
29 * Mar 24 2004 Tobias Gehrig <tobias@gehrig.tk>
31 * Mar 18 2004 Tobias Gehrig <tobias@gehrig.tk>
34 * Mar 17 2004 Tobias Gehrig <tobias@gehrig.tk>
36 * Feb 20 2004 Tobias Gehrig <tobias@gehrig.tk>
/linux-4.1.27/lib/
H A Dbch.c843 struct gf_poly *tk = bch->poly_2t[2]; factor_polynomial() local
852 /* tk = Tr(a^k.X) mod f */ factor_polynomial()
853 compute_trace_bk_mod(bch, k, f, z, tk); factor_polynomial()
855 if (tk->deg > 0) { factor_polynomial()
856 /* compute g = gcd(f, tk) (destructive operation) */ factor_polynomial()
858 gcd = gf_poly_gcd(bch, f2, tk); factor_polynomial()
860 /* compute h=f/gcd(f,tk); this will modify f and q */ factor_polynomial()
/linux-4.1.27/drivers/staging/rtl8712/
H A Drtl871x_security.c386 #define TK16(N) Mk16(tk[2 * (N) + 1], tk[2 * (N)])
476 * tk[] = temporal key [128 bits]
488 static void phase1(u16 *p1k, const u8 *tk, const u8 *ta, u32 iv32) phase1() argument
515 * tk[] = Temporal key [128 bits]
533 static void phase2(u8 *rc4key, const u8 *tk, const u16 *p1k, u16 iv16) phase2() argument
/linux-4.1.27/drivers/staging/rtl8723au/core/
H A Drtw_security.c402 #define TK16(N) Mk16(tk[2 * (N) + 1], tk[2 * (N)])
492 * tk[] = temporal key [128 bits]
504 static void phase1(u16 *p1k, const u8 *tk, const u8 *ta, u32 iv32) phase1() argument
534 * tk[] = Temporal key [128 bits]
552 static void phase2(u8 *rc4key, const u8 *tk, const u16 *p1k, u16 iv16) phase2() argument
/linux-4.1.27/drivers/staging/rtl8188eu/core/
H A Drtw_security.c372 #define TK16(N) Mk16(tk[2*(N)+1], tk[2*(N)])
462 * tk[] = temporal key [128 bits]
474 static void phase1(u16 *p1k, const u8 *tk, const u8 *ta, u32 iv32) phase1() argument
501 * tk[] = Temporal key [128 bits]
519 static void phase2(u8 *rc4key, const u8 *tk, const u16 *p1k, u16 iv16) phase2() argument
/linux-4.1.27/kernel/
H A Dkprobes.c1139 * This function is called from finish_task_switch when task tk becomes dead,
1144 void kprobe_flush_task(struct task_struct *tk) kprobe_flush_task() argument
1156 hash = hash_ptr(tk, KPROBE_HASH_BITS); kprobe_flush_task()
1160 if (ri->task == tk) hlist_for_each_entry_safe()
/linux-4.1.27/arch/x86/kvm/
H A Dx86.c1076 static void update_pvclock_gtod(struct timekeeper *tk) update_pvclock_gtod() argument
1081 boot_ns = ktime_to_ns(ktime_add(tk->tkr_mono.base, tk->offs_boot)); update_pvclock_gtod()
1086 vdata->clock.vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode; update_pvclock_gtod()
1087 vdata->clock.cycle_last = tk->tkr_mono.cycle_last; update_pvclock_gtod()
1088 vdata->clock.mask = tk->tkr_mono.mask; update_pvclock_gtod()
1089 vdata->clock.mult = tk->tkr_mono.mult; update_pvclock_gtod()
1090 vdata->clock.shift = tk->tkr_mono.shift; update_pvclock_gtod()
1093 vdata->nsec_base = tk->tkr_mono.xtime_nsec; update_pvclock_gtod()
5770 struct timekeeper *tk = priv; pvclock_gtod_notify() local
5772 update_pvclock_gtod(tk); pvclock_gtod_notify()
/linux-4.1.27/drivers/i2c/algos/
H A Di2c-algo-bit.c663 MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>");
/linux-4.1.27/drivers/i2c/
H A Di2c-dev.c665 "Simon G. Vogl <simon@tk.uni-linz.ac.at>");
H A Di2c-core.c2962 MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>");
/linux-4.1.27/drivers/mmc/host/
H A Ds3cmci.c4 * Copyright (C) 2004-2006 maintech GmbH, Thomas Kleffel <tk@maintech.de>
1889 MODULE_AUTHOR("Thomas Kleffel <tk@maintech.de>, Ben Dooks <ben-linux@fluff.org>");
/linux-4.1.27/include/net/bluetooth/
H A Dhci_core.h984 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand);
/linux-4.1.27/drivers/ata/
H A Dpata_bf54x.c451 /* increase tk until we meed the minimum cycle length */ bfin_set_dmamode()
/linux-4.1.27/drivers/isdn/i4l/
H A Disdn_ppp.c315 is->tk = current; isdn_ppp_open()

Completed in 824 milliseconds