Lines Matching refs:cpuctx

205 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
310 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
313 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
343 static void perf_ctx_lock(struct perf_cpu_context *cpuctx, in perf_ctx_lock() argument
346 raw_spin_lock(&cpuctx->ctx.lock); in perf_ctx_lock()
351 static void perf_ctx_unlock(struct perf_cpu_context *cpuctx, in perf_ctx_unlock() argument
356 raw_spin_unlock(&cpuctx->ctx.lock); in perf_ctx_unlock()
365 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in perf_cgroup_match() local
372 if (!cpuctx->cgrp) in perf_cgroup_match()
381 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup, in perf_cgroup_match()
417 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) in update_cgrp_time_from_cpuctx() argument
419 struct perf_cgroup *cgrp_out = cpuctx->cgrp; in update_cgrp_time_from_cpuctx()
474 struct perf_cpu_context *cpuctx; in perf_cgroup_switch() local
492 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); in perf_cgroup_switch()
493 if (cpuctx->unique_pmu != pmu) in perf_cgroup_switch()
503 if (cpuctx->ctx.nr_cgroups > 0) { in perf_cgroup_switch()
504 perf_ctx_lock(cpuctx, cpuctx->task_ctx); in perf_cgroup_switch()
505 perf_pmu_disable(cpuctx->ctx.pmu); in perf_cgroup_switch()
508 cpu_ctx_sched_out(cpuctx, EVENT_ALL); in perf_cgroup_switch()
513 cpuctx->cgrp = NULL; in perf_cgroup_switch()
517 WARN_ON_ONCE(cpuctx->cgrp); in perf_cgroup_switch()
523 cpuctx->cgrp = perf_cgroup_from_task(task); in perf_cgroup_switch()
524 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); in perf_cgroup_switch()
526 perf_pmu_enable(cpuctx->ctx.pmu); in perf_cgroup_switch()
527 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); in perf_cgroup_switch()
688 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) in update_cgrp_time_from_cpuctx() argument
752 struct perf_cpu_context *cpuctx; in perf_cpu_hrtimer_handler() local
758 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer); in perf_cpu_hrtimer_handler()
760 rotations = perf_rotate_context(cpuctx); in perf_cpu_hrtimer_handler()
766 hrtimer_forward_now(hr, cpuctx->hrtimer_interval); in perf_cpu_hrtimer_handler()
776 struct perf_cpu_context *cpuctx; in perf_cpu_hrtimer_cancel() local
788 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); in perf_cpu_hrtimer_cancel()
793 hrtimer_cancel(&cpuctx->hrtimer); in perf_cpu_hrtimer_cancel()
801 static void __perf_cpu_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu) in __perf_cpu_hrtimer_init() argument
803 struct hrtimer *hr = &cpuctx->hrtimer; in __perf_cpu_hrtimer_init()
804 struct pmu *pmu = cpuctx->ctx.pmu; in __perf_cpu_hrtimer_init()
819 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer); in __perf_cpu_hrtimer_init()
825 static void perf_cpu_hrtimer_restart(struct perf_cpu_context *cpuctx) in perf_cpu_hrtimer_restart() argument
827 struct hrtimer *hr = &cpuctx->hrtimer; in perf_cpu_hrtimer_restart()
828 struct pmu *pmu = cpuctx->ctx.pmu; in perf_cpu_hrtimer_restart()
838 __hrtimer_start_range_ns(hr, cpuctx->hrtimer_interval, in perf_cpu_hrtimer_restart()
1393 struct perf_cpu_context *cpuctx; in list_del_event() local
1408 cpuctx = __get_cpu_context(ctx); in list_del_event()
1415 cpuctx->cgrp = NULL; in list_del_event()
1538 struct perf_cpu_context *cpuctx, in event_sched_out() argument
1575 cpuctx->active_oncpu--; in event_sched_out()
1580 if (event->attr.exclusive || !cpuctx->active_oncpu) in event_sched_out()
1581 cpuctx->exclusive = 0; in event_sched_out()
1591 struct perf_cpu_context *cpuctx, in group_sched_out() argument
1597 event_sched_out(group_event, cpuctx, ctx); in group_sched_out()
1603 event_sched_out(event, cpuctx, ctx); in group_sched_out()
1606 cpuctx->exclusive = 0; in group_sched_out()
1625 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in __perf_remove_from_context() local
1628 event_sched_out(event, cpuctx, ctx); in __perf_remove_from_context()
1632 if (!ctx->nr_events && cpuctx->task_ctx == ctx) { in __perf_remove_from_context()
1634 cpuctx->task_ctx = NULL; in __perf_remove_from_context()
1713 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in __perf_event_disable() local
1722 if (ctx->task && cpuctx->task_ctx != ctx) in __perf_event_disable()
1736 group_sched_out(event, cpuctx, ctx); in __perf_event_disable()
1738 event_sched_out(event, cpuctx, ctx); in __perf_event_disable()
1858 struct perf_cpu_context *cpuctx, in event_sched_in() argument
1903 cpuctx->active_oncpu++; in event_sched_in()
1910 cpuctx->exclusive = 1; in event_sched_in()
1923 struct perf_cpu_context *cpuctx, in group_sched_in() argument
1936 if (event_sched_in(group_event, cpuctx, ctx)) { in group_sched_in()
1938 perf_cpu_hrtimer_restart(cpuctx); in group_sched_in()
1946 if (event_sched_in(event, cpuctx, ctx)) { in group_sched_in()
1978 event_sched_out(event, cpuctx, ctx); in group_sched_in()
1981 event_sched_out(group_event, cpuctx, ctx); in group_sched_in()
1985 perf_cpu_hrtimer_restart(cpuctx); in group_sched_in()
1994 struct perf_cpu_context *cpuctx, in group_can_go_on() argument
2006 if (cpuctx->exclusive) in group_can_go_on()
2012 if (event->attr.exclusive && cpuctx->active_oncpu) in group_can_go_on()
2036 struct perf_cpu_context *cpuctx,
2040 static void perf_event_sched_in(struct perf_cpu_context *cpuctx, in perf_event_sched_in() argument
2044 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task); in perf_event_sched_in()
2046 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); in perf_event_sched_in()
2047 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); in perf_event_sched_in()
2049 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); in perf_event_sched_in()
2061 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in __perf_install_in_context() local
2062 struct perf_event_context *task_ctx = cpuctx->task_ctx; in __perf_install_in_context()
2065 perf_ctx_lock(cpuctx, task_ctx); in __perf_install_in_context()
2066 perf_pmu_disable(cpuctx->ctx.pmu); in __perf_install_in_context()
2086 cpuctx->task_ctx = task_ctx; in __perf_install_in_context()
2090 cpu_ctx_sched_out(cpuctx, EVENT_ALL); in __perf_install_in_context()
2105 perf_event_sched_in(cpuctx, task_ctx, task); in __perf_install_in_context()
2107 perf_pmu_enable(cpuctx->ctx.pmu); in __perf_install_in_context()
2108 perf_ctx_unlock(cpuctx, task_ctx); in __perf_install_in_context()
2201 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in __perf_event_enable() local
2242 if (!group_can_go_on(event, cpuctx, 1)) { in __perf_event_enable()
2246 err = group_sched_in(event, cpuctx, ctx); in __perf_event_enable()
2248 err = event_sched_in(event, cpuctx, ctx); in __perf_event_enable()
2257 group_sched_out(leader, cpuctx, ctx); in __perf_event_enable()
2258 perf_cpu_hrtimer_restart(cpuctx); in __perf_event_enable()
2382 struct perf_cpu_context *cpuctx, in ctx_sched_out() argument
2393 update_cgrp_time_from_cpuctx(cpuctx); in ctx_sched_out()
2400 group_sched_out(event, cpuctx, ctx); in ctx_sched_out()
2405 group_sched_out(event, cpuctx, ctx); in ctx_sched_out()
2526 struct perf_cpu_context *cpuctx; in perf_event_context_sched_out() local
2532 cpuctx = __get_cpu_context(ctx); in perf_event_context_sched_out()
2533 if (!cpuctx->task_ctx) in perf_event_context_sched_out()
2584 ctx_sched_out(ctx, cpuctx, EVENT_ALL); in perf_event_context_sched_out()
2585 cpuctx->task_ctx = NULL; in perf_event_context_sched_out()
2608 struct perf_cpu_context *cpuctx; in perf_pmu_sched_task() local
2621 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); in perf_pmu_sched_task()
2623 perf_ctx_lock(cpuctx, cpuctx->task_ctx); in perf_pmu_sched_task()
2627 pmu->sched_task(cpuctx->task_ctx, sched_in); in perf_pmu_sched_task()
2631 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); in perf_pmu_sched_task()
2676 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in task_ctx_sched_out() local
2678 if (!cpuctx->task_ctx) in task_ctx_sched_out()
2681 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) in task_ctx_sched_out()
2684 ctx_sched_out(ctx, cpuctx, EVENT_ALL); in task_ctx_sched_out()
2685 cpuctx->task_ctx = NULL; in task_ctx_sched_out()
2691 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, in cpu_ctx_sched_out() argument
2694 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type); in cpu_ctx_sched_out()
2699 struct perf_cpu_context *cpuctx) in ctx_pinned_sched_in() argument
2713 if (group_can_go_on(event, cpuctx, 1)) in ctx_pinned_sched_in()
2714 group_sched_in(event, cpuctx, ctx); in ctx_pinned_sched_in()
2729 struct perf_cpu_context *cpuctx) in ctx_flexible_sched_in() argument
2749 if (group_can_go_on(event, cpuctx, can_add_hw)) { in ctx_flexible_sched_in()
2750 if (group_sched_in(event, cpuctx, ctx)) in ctx_flexible_sched_in()
2758 struct perf_cpu_context *cpuctx, in ctx_sched_in() argument
2777 ctx_pinned_sched_in(ctx, cpuctx); in ctx_sched_in()
2781 ctx_flexible_sched_in(ctx, cpuctx); in ctx_sched_in()
2784 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, in cpu_ctx_sched_in() argument
2788 struct perf_event_context *ctx = &cpuctx->ctx; in cpu_ctx_sched_in()
2790 ctx_sched_in(ctx, cpuctx, event_type, task); in cpu_ctx_sched_in()
2796 struct perf_cpu_context *cpuctx; in perf_event_context_sched_in() local
2798 cpuctx = __get_cpu_context(ctx); in perf_event_context_sched_in()
2799 if (cpuctx->task_ctx == ctx) in perf_event_context_sched_in()
2802 perf_ctx_lock(cpuctx, ctx); in perf_event_context_sched_in()
2809 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); in perf_event_context_sched_in()
2812 cpuctx->task_ctx = ctx; in perf_event_context_sched_in()
2814 perf_event_sched_in(cpuctx, cpuctx->task_ctx, task); in perf_event_context_sched_in()
2817 perf_ctx_unlock(cpuctx, ctx); in perf_event_context_sched_in()
3046 static int perf_rotate_context(struct perf_cpu_context *cpuctx) in perf_rotate_context() argument
3051 if (cpuctx->ctx.nr_events) { in perf_rotate_context()
3052 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) in perf_rotate_context()
3056 ctx = cpuctx->task_ctx; in perf_rotate_context()
3065 perf_ctx_lock(cpuctx, cpuctx->task_ctx); in perf_rotate_context()
3066 perf_pmu_disable(cpuctx->ctx.pmu); in perf_rotate_context()
3068 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); in perf_rotate_context()
3070 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); in perf_rotate_context()
3072 rotate_ctx(&cpuctx->ctx); in perf_rotate_context()
3076 perf_event_sched_in(cpuctx, ctx, current); in perf_rotate_context()
3078 perf_pmu_enable(cpuctx->ctx.pmu); in perf_rotate_context()
3079 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); in perf_rotate_context()
3202 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in __perf_event_read() local
3211 if (ctx->task && cpuctx->task_ctx != ctx) in __perf_event_read()
3335 struct perf_cpu_context *cpuctx; in find_get_context() local
3354 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in find_get_context()
3355 ctx = &cpuctx->ctx; in find_get_context()
5504 struct perf_cpu_context *cpuctx; in perf_event_aux() local
5511 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); in perf_event_aux()
5512 if (cpuctx->unique_pmu != pmu) in perf_event_aux()
5514 perf_event_aux_ctx(&cpuctx->ctx, output, data); in perf_event_aux()
7156 struct perf_cpu_context *cpuctx; in update_pmu_context() local
7158 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in update_pmu_context()
7160 if (cpuctx->unique_pmu == old_pmu) in update_pmu_context()
7161 cpuctx->unique_pmu = pmu; in update_pmu_context()
7228 struct perf_cpu_context *cpuctx; in perf_event_mux_interval_ms_store() local
7229 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in perf_event_mux_interval_ms_store()
7230 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer); in perf_event_mux_interval_ms_store()
7232 if (hrtimer_active(&cpuctx->hrtimer)) in perf_event_mux_interval_ms_store()
7233 hrtimer_forward_now(&cpuctx->hrtimer, cpuctx->hrtimer_interval); in perf_event_mux_interval_ms_store()
7331 struct perf_cpu_context *cpuctx; in perf_pmu_register() local
7333 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in perf_pmu_register()
7334 __perf_event_init_context(&cpuctx->ctx); in perf_pmu_register()
7335 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); in perf_pmu_register()
7336 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); in perf_pmu_register()
7337 cpuctx->ctx.pmu = pmu; in perf_pmu_register()
7339 __perf_cpu_hrtimer_init(cpuctx, cpu); in perf_pmu_register()
7341 cpuctx->unique_pmu = pmu; in perf_pmu_register()