Lines Matching refs:policy

74 static int __cpufreq_governor(struct cpufreq_policy *policy,
76 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
115 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy) in get_governor_parent_kobj() argument
118 return &policy->kobj; in get_governor_parent_kobj()
166 int cpufreq_generic_init(struct cpufreq_policy *policy, in cpufreq_generic_init() argument
172 ret = cpufreq_table_validate_and_show(policy, table); in cpufreq_generic_init()
178 policy->cpuinfo.transition_latency = transition_latency; in cpufreq_generic_init()
184 cpumask_setall(policy->cpus); in cpufreq_generic_init()
192 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); in cpufreq_generic_get() local
194 if (!policy || IS_ERR(policy->clk)) { in cpufreq_generic_get()
196 __func__, policy ? "clk" : "policy", cpu); in cpufreq_generic_get()
200 return clk_get_rate(policy->clk) / 1000; in cpufreq_generic_get()
212 struct cpufreq_policy *policy = NULL; in cpufreq_cpu_get() local
226 policy = per_cpu(cpufreq_cpu_data, cpu); in cpufreq_cpu_get()
227 if (policy) in cpufreq_cpu_get()
228 kobject_get(&policy->kobj); in cpufreq_cpu_get()
233 if (!policy) in cpufreq_cpu_get()
236 return policy; in cpufreq_cpu_get()
240 void cpufreq_cpu_put(struct cpufreq_policy *policy) in cpufreq_cpu_put() argument
242 kobject_put(&policy->kobj); in cpufreq_cpu_put()
283 static void __cpufreq_notify_transition(struct cpufreq_policy *policy, in __cpufreq_notify_transition() argument
303 if ((policy) && (policy->cpu == freqs->cpu) && in __cpufreq_notify_transition()
304 (policy->cur) && (policy->cur != freqs->old)) { in __cpufreq_notify_transition()
306 freqs->old, policy->cur); in __cpufreq_notify_transition()
307 freqs->old = policy->cur; in __cpufreq_notify_transition()
322 if (likely(policy) && likely(policy->cpu == freqs->cpu)) in __cpufreq_notify_transition()
323 policy->cur = freqs->new; in __cpufreq_notify_transition()
336 static void cpufreq_notify_transition(struct cpufreq_policy *policy, in cpufreq_notify_transition() argument
339 for_each_cpu(freqs->cpu, policy->cpus) in cpufreq_notify_transition()
340 __cpufreq_notify_transition(policy, freqs, state); in cpufreq_notify_transition()
344 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy, in cpufreq_notify_post_transition() argument
347 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); in cpufreq_notify_post_transition()
352 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); in cpufreq_notify_post_transition()
353 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); in cpufreq_notify_post_transition()
356 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy, in cpufreq_freq_transition_begin() argument
369 && current == policy->transition_task); in cpufreq_freq_transition_begin()
372 wait_event(policy->transition_wait, !policy->transition_ongoing); in cpufreq_freq_transition_begin()
374 spin_lock(&policy->transition_lock); in cpufreq_freq_transition_begin()
376 if (unlikely(policy->transition_ongoing)) { in cpufreq_freq_transition_begin()
377 spin_unlock(&policy->transition_lock); in cpufreq_freq_transition_begin()
381 policy->transition_ongoing = true; in cpufreq_freq_transition_begin()
382 policy->transition_task = current; in cpufreq_freq_transition_begin()
384 spin_unlock(&policy->transition_lock); in cpufreq_freq_transition_begin()
386 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); in cpufreq_freq_transition_begin()
390 void cpufreq_freq_transition_end(struct cpufreq_policy *policy, in cpufreq_freq_transition_end() argument
393 if (unlikely(WARN_ON(!policy->transition_ongoing))) in cpufreq_freq_transition_end()
396 cpufreq_notify_post_transition(policy, freqs, transition_failed); in cpufreq_freq_transition_end()
398 policy->transition_ongoing = false; in cpufreq_freq_transition_end()
399 policy->transition_task = NULL; in cpufreq_freq_transition_end()
401 wake_up(&policy->transition_wait); in cpufreq_freq_transition_end()
451 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, in cpufreq_parse_governor() argument
461 *policy = CPUFREQ_POLICY_PERFORMANCE; in cpufreq_parse_governor()
465 *policy = CPUFREQ_POLICY_POWERSAVE; in cpufreq_parse_governor()
507 (struct cpufreq_policy *policy, char *buf) \
509 return sprintf(buf, "%u\n", policy->object); \
518 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf) in show_scaling_cur_freq() argument
523 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu)); in show_scaling_cur_freq()
525 ret = sprintf(buf, "%u\n", policy->cur); in show_scaling_cur_freq()
529 static int cpufreq_set_policy(struct cpufreq_policy *policy,
537 (struct cpufreq_policy *policy, const char *buf, size_t count) \
542 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
551 ret = cpufreq_set_policy(policy, &new_policy); \
553 policy->user_policy.object = temp; \
564 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, in show_cpuinfo_cur_freq() argument
567 unsigned int cur_freq = __cpufreq_get(policy); in show_cpuinfo_cur_freq()
576 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) in show_scaling_governor() argument
578 if (policy->policy == CPUFREQ_POLICY_POWERSAVE) in show_scaling_governor()
580 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) in show_scaling_governor()
582 else if (policy->governor) in show_scaling_governor()
584 policy->governor->name); in show_scaling_governor()
591 static ssize_t store_scaling_governor(struct cpufreq_policy *policy, in store_scaling_governor() argument
598 ret = cpufreq_get_policy(&new_policy, policy->cpu); in store_scaling_governor()
606 if (cpufreq_parse_governor(str_governor, &new_policy.policy, in store_scaling_governor()
610 ret = cpufreq_set_policy(policy, &new_policy); in store_scaling_governor()
612 policy->user_policy.policy = policy->policy; in store_scaling_governor()
613 policy->user_policy.governor = policy->governor; in store_scaling_governor()
624 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf) in show_scaling_driver() argument
632 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, in show_scaling_available_governors() argument
675 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) in show_related_cpus() argument
677 return cpufreq_show_cpus(policy->related_cpus, buf); in show_related_cpus()
683 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf) in show_affected_cpus() argument
685 return cpufreq_show_cpus(policy->cpus, buf); in show_affected_cpus()
688 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy, in store_scaling_setspeed() argument
694 if (!policy->governor || !policy->governor->store_setspeed) in store_scaling_setspeed()
701 policy->governor->store_setspeed(policy, freq); in store_scaling_setspeed()
706 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf) in show_scaling_setspeed() argument
708 if (!policy->governor || !policy->governor->show_setspeed) in show_scaling_setspeed()
711 return policy->governor->show_setspeed(policy, buf); in show_scaling_setspeed()
717 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf) in show_bios_limit() argument
722 ret = cpufreq_driver->bios_limit(policy->cpu, &limit); in show_bios_limit()
726 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq); in show_bios_limit()
764 struct cpufreq_policy *policy = to_policy(kobj); in show() local
771 down_read(&policy->rwsem); in show()
774 ret = fattr->show(policy, buf); in show()
778 up_read(&policy->rwsem); in show()
787 struct cpufreq_policy *policy = to_policy(kobj); in store() local
793 if (!cpu_online(policy->cpu)) in store()
799 down_write(&policy->rwsem); in store()
802 ret = fattr->store(policy, buf, count); in store()
806 up_write(&policy->rwsem); in store()
817 struct cpufreq_policy *policy = to_policy(kobj); in cpufreq_sysfs_release() local
819 complete(&policy->kobj_unregister); in cpufreq_sysfs_release()
877 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy) in cpufreq_add_dev_symlink() argument
882 for_each_cpu(j, policy->cpus) { in cpufreq_add_dev_symlink()
885 if (j == policy->cpu) in cpufreq_add_dev_symlink()
890 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj, in cpufreq_add_dev_symlink()
898 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy, in cpufreq_add_dev_interface() argument
907 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); in cpufreq_add_dev_interface()
913 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); in cpufreq_add_dev_interface()
918 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); in cpufreq_add_dev_interface()
923 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr); in cpufreq_add_dev_interface()
928 return cpufreq_add_dev_symlink(policy); in cpufreq_add_dev_interface()
931 static void cpufreq_init_policy(struct cpufreq_policy *policy) in cpufreq_init_policy() argument
937 memcpy(&new_policy, policy, sizeof(*policy)); in cpufreq_init_policy()
940 gov = find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu)); in cpufreq_init_policy()
943 policy->governor->name, policy->cpu); in cpufreq_init_policy()
951 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL); in cpufreq_init_policy()
954 ret = cpufreq_set_policy(policy, &new_policy); in cpufreq_init_policy()
958 cpufreq_driver->exit(policy); in cpufreq_init_policy()
962 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, in cpufreq_add_policy_cpu() argument
969 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); in cpufreq_add_policy_cpu()
976 down_write(&policy->rwsem); in cpufreq_add_policy_cpu()
980 cpumask_set_cpu(cpu, policy->cpus); in cpufreq_add_policy_cpu()
981 per_cpu(cpufreq_cpu_data, cpu) = policy; in cpufreq_add_policy_cpu()
984 up_write(&policy->rwsem); in cpufreq_add_policy_cpu()
987 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); in cpufreq_add_policy_cpu()
989 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); in cpufreq_add_policy_cpu()
997 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); in cpufreq_add_policy_cpu()
1002 struct cpufreq_policy *policy; in cpufreq_policy_restore() local
1007 policy = per_cpu(cpufreq_cpu_data_fallback, cpu); in cpufreq_policy_restore()
1011 if (policy) in cpufreq_policy_restore()
1012 policy->governor = NULL; in cpufreq_policy_restore()
1014 return policy; in cpufreq_policy_restore()
1019 struct cpufreq_policy *policy; in cpufreq_policy_alloc() local
1021 policy = kzalloc(sizeof(*policy), GFP_KERNEL); in cpufreq_policy_alloc()
1022 if (!policy) in cpufreq_policy_alloc()
1025 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) in cpufreq_policy_alloc()
1028 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) in cpufreq_policy_alloc()
1031 INIT_LIST_HEAD(&policy->policy_list); in cpufreq_policy_alloc()
1032 init_rwsem(&policy->rwsem); in cpufreq_policy_alloc()
1033 spin_lock_init(&policy->transition_lock); in cpufreq_policy_alloc()
1034 init_waitqueue_head(&policy->transition_wait); in cpufreq_policy_alloc()
1035 init_completion(&policy->kobj_unregister); in cpufreq_policy_alloc()
1036 INIT_WORK(&policy->update, handle_update); in cpufreq_policy_alloc()
1038 return policy; in cpufreq_policy_alloc()
1041 free_cpumask_var(policy->cpus); in cpufreq_policy_alloc()
1043 kfree(policy); in cpufreq_policy_alloc()
1048 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy) in cpufreq_policy_put_kobj() argument
1054 CPUFREQ_REMOVE_POLICY, policy); in cpufreq_policy_put_kobj()
1056 down_read(&policy->rwsem); in cpufreq_policy_put_kobj()
1057 kobj = &policy->kobj; in cpufreq_policy_put_kobj()
1058 cmp = &policy->kobj_unregister; in cpufreq_policy_put_kobj()
1059 up_read(&policy->rwsem); in cpufreq_policy_put_kobj()
1072 static void cpufreq_policy_free(struct cpufreq_policy *policy) in cpufreq_policy_free() argument
1074 free_cpumask_var(policy->related_cpus); in cpufreq_policy_free()
1075 free_cpumask_var(policy->cpus); in cpufreq_policy_free()
1076 kfree(policy); in cpufreq_policy_free()
1079 static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu, in update_policy_cpu() argument
1084 if (WARN_ON(cpu == policy->cpu)) in update_policy_cpu()
1088 ret = kobject_move(&policy->kobj, &cpu_dev->kobj); in update_policy_cpu()
1094 down_write(&policy->rwsem); in update_policy_cpu()
1095 policy->cpu = cpu; in update_policy_cpu()
1096 up_write(&policy->rwsem); in update_policy_cpu()
1105 struct cpufreq_policy *policy; in __cpufreq_add_dev() local
1116 policy = cpufreq_cpu_get_raw(cpu); in __cpufreq_add_dev()
1117 if (unlikely(policy)) in __cpufreq_add_dev()
1125 for_each_policy(policy) { in __cpufreq_add_dev()
1126 if (cpumask_test_cpu(cpu, policy->related_cpus)) { in __cpufreq_add_dev()
1128 ret = cpufreq_add_policy_cpu(policy, cpu, dev); in __cpufreq_add_dev()
1139 policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL; in __cpufreq_add_dev()
1140 if (!policy) { in __cpufreq_add_dev()
1142 policy = cpufreq_policy_alloc(); in __cpufreq_add_dev()
1143 if (!policy) in __cpufreq_add_dev()
1153 if (recover_policy && cpu != policy->cpu) in __cpufreq_add_dev()
1154 WARN_ON(update_policy_cpu(policy, cpu, dev)); in __cpufreq_add_dev()
1156 policy->cpu = cpu; in __cpufreq_add_dev()
1158 cpumask_copy(policy->cpus, cpumask_of(cpu)); in __cpufreq_add_dev()
1163 ret = cpufreq_driver->init(policy); in __cpufreq_add_dev()
1169 down_write(&policy->rwsem); in __cpufreq_add_dev()
1172 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); in __cpufreq_add_dev()
1178 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); in __cpufreq_add_dev()
1181 policy->user_policy.min = policy->min; in __cpufreq_add_dev()
1182 policy->user_policy.max = policy->max; in __cpufreq_add_dev()
1185 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, in __cpufreq_add_dev()
1195 for_each_cpu(j, policy->cpus) in __cpufreq_add_dev()
1196 per_cpu(cpufreq_cpu_data, j) = policy; in __cpufreq_add_dev()
1200 policy->cur = cpufreq_driver->get(policy->cpu); in __cpufreq_add_dev()
1201 if (!policy->cur) { in __cpufreq_add_dev()
1228 ret = cpufreq_frequency_table_get_index(policy, policy->cur); in __cpufreq_add_dev()
1232 __func__, policy->cpu, policy->cur); in __cpufreq_add_dev()
1233 ret = __cpufreq_driver_target(policy, policy->cur - 1, in __cpufreq_add_dev()
1243 __func__, policy->cpu, policy->cur); in __cpufreq_add_dev()
1248 CPUFREQ_START, policy); in __cpufreq_add_dev()
1251 ret = cpufreq_add_dev_interface(policy, dev); in __cpufreq_add_dev()
1255 CPUFREQ_CREATE_POLICY, policy); in __cpufreq_add_dev()
1259 list_add(&policy->policy_list, &cpufreq_policy_list); in __cpufreq_add_dev()
1262 cpufreq_init_policy(policy); in __cpufreq_add_dev()
1265 policy->user_policy.policy = policy->policy; in __cpufreq_add_dev()
1266 policy->user_policy.governor = policy->governor; in __cpufreq_add_dev()
1268 up_write(&policy->rwsem); in __cpufreq_add_dev()
1270 kobject_uevent(&policy->kobj, KOBJ_ADD); in __cpufreq_add_dev()
1276 cpufreq_driver->ready(policy); in __cpufreq_add_dev()
1285 for_each_cpu(j, policy->cpus) in __cpufreq_add_dev()
1290 kobject_put(&policy->kobj); in __cpufreq_add_dev()
1291 wait_for_completion(&policy->kobj_unregister); in __cpufreq_add_dev()
1294 up_write(&policy->rwsem); in __cpufreq_add_dev()
1297 cpufreq_driver->exit(policy); in __cpufreq_add_dev()
1302 cpufreq_policy_put_kobj(policy); in __cpufreq_add_dev()
1304 cpufreq_policy_free(policy); in __cpufreq_add_dev()
1332 struct cpufreq_policy *policy; in __cpufreq_remove_dev_prepare() local
1338 policy = per_cpu(cpufreq_cpu_data, cpu); in __cpufreq_remove_dev_prepare()
1342 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy; in __cpufreq_remove_dev_prepare()
1346 if (!policy) { in __cpufreq_remove_dev_prepare()
1352 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); in __cpufreq_remove_dev_prepare()
1359 policy->governor->name, CPUFREQ_NAME_LEN); in __cpufreq_remove_dev_prepare()
1362 down_read(&policy->rwsem); in __cpufreq_remove_dev_prepare()
1363 cpus = cpumask_weight(policy->cpus); in __cpufreq_remove_dev_prepare()
1364 up_read(&policy->rwsem); in __cpufreq_remove_dev_prepare()
1366 if (cpu != policy->cpu) { in __cpufreq_remove_dev_prepare()
1370 int new_cpu = cpumask_any_but(policy->cpus, cpu); in __cpufreq_remove_dev_prepare()
1374 ret = update_policy_cpu(policy, new_cpu, cpu_dev); in __cpufreq_remove_dev_prepare()
1376 if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj, in __cpufreq_remove_dev_prepare()
1387 cpufreq_driver->stop_cpu(policy); in __cpufreq_remove_dev_prepare()
1399 struct cpufreq_policy *policy; in __cpufreq_remove_dev_finish() local
1402 policy = per_cpu(cpufreq_cpu_data, cpu); in __cpufreq_remove_dev_finish()
1406 if (!policy) { in __cpufreq_remove_dev_finish()
1411 down_write(&policy->rwsem); in __cpufreq_remove_dev_finish()
1412 cpus = cpumask_weight(policy->cpus); in __cpufreq_remove_dev_finish()
1415 cpumask_clear_cpu(cpu, policy->cpus); in __cpufreq_remove_dev_finish()
1416 up_write(&policy->rwsem); in __cpufreq_remove_dev_finish()
1421 ret = __cpufreq_governor(policy, in __cpufreq_remove_dev_finish()
1431 cpufreq_policy_put_kobj(policy); in __cpufreq_remove_dev_finish()
1439 cpufreq_driver->exit(policy); in __cpufreq_remove_dev_finish()
1443 list_del(&policy->policy_list); in __cpufreq_remove_dev_finish()
1447 cpufreq_policy_free(policy); in __cpufreq_remove_dev_finish()
1449 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); in __cpufreq_remove_dev_finish()
1451 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); in __cpufreq_remove_dev_finish()
1485 struct cpufreq_policy *policy = in handle_update() local
1487 unsigned int cpu = policy->cpu; in handle_update()
1501 static void cpufreq_out_of_sync(struct cpufreq_policy *policy, in cpufreq_out_of_sync() argument
1507 policy->cur, new_freq); in cpufreq_out_of_sync()
1509 freqs.old = policy->cur; in cpufreq_out_of_sync()
1512 cpufreq_freq_transition_begin(policy, &freqs); in cpufreq_out_of_sync()
1513 cpufreq_freq_transition_end(policy, &freqs, 0); in cpufreq_out_of_sync()
1525 struct cpufreq_policy *policy; in cpufreq_quick_get() local
1531 policy = cpufreq_cpu_get(cpu); in cpufreq_quick_get()
1532 if (policy) { in cpufreq_quick_get()
1533 ret_freq = policy->cur; in cpufreq_quick_get()
1534 cpufreq_cpu_put(policy); in cpufreq_quick_get()
1549 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); in cpufreq_quick_get_max() local
1552 if (policy) { in cpufreq_quick_get_max()
1553 ret_freq = policy->max; in cpufreq_quick_get_max()
1554 cpufreq_cpu_put(policy); in cpufreq_quick_get_max()
1561 static unsigned int __cpufreq_get(struct cpufreq_policy *policy) in __cpufreq_get() argument
1568 ret_freq = cpufreq_driver->get(policy->cpu); in __cpufreq_get()
1570 if (ret_freq && policy->cur && in __cpufreq_get()
1574 if (unlikely(ret_freq != policy->cur)) { in __cpufreq_get()
1575 cpufreq_out_of_sync(policy, ret_freq); in __cpufreq_get()
1576 schedule_work(&policy->update); in __cpufreq_get()
1591 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); in cpufreq_get() local
1594 if (policy) { in cpufreq_get()
1595 down_read(&policy->rwsem); in cpufreq_get()
1596 ret_freq = __cpufreq_get(policy); in cpufreq_get()
1597 up_read(&policy->rwsem); in cpufreq_get()
1599 cpufreq_cpu_put(policy); in cpufreq_get()
1617 int cpufreq_generic_suspend(struct cpufreq_policy *policy) in cpufreq_generic_suspend() argument
1621 if (!policy->suspend_freq) { in cpufreq_generic_suspend()
1627 policy->suspend_freq); in cpufreq_generic_suspend()
1629 ret = __cpufreq_driver_target(policy, policy->suspend_freq, in cpufreq_generic_suspend()
1633 __func__, policy->suspend_freq, ret); in cpufreq_generic_suspend()
1649 struct cpufreq_policy *policy; in cpufreq_suspend() local
1659 for_each_policy(policy) { in cpufreq_suspend()
1660 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP)) in cpufreq_suspend()
1662 __func__, policy); in cpufreq_suspend()
1664 && cpufreq_driver->suspend(policy)) in cpufreq_suspend()
1666 policy); in cpufreq_suspend()
1681 struct cpufreq_policy *policy; in cpufreq_resume() local
1693 for_each_policy(policy) { in cpufreq_resume()
1694 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) in cpufreq_resume()
1696 policy); in cpufreq_resume()
1697 else if (__cpufreq_governor(policy, CPUFREQ_GOV_START) in cpufreq_resume()
1698 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS)) in cpufreq_resume()
1700 __func__, policy); in cpufreq_resume()
1708 policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask)); in cpufreq_resume()
1709 if (WARN_ON(!policy)) in cpufreq_resume()
1712 schedule_work(&policy->update); in cpufreq_resume()
1828 static int __target_intermediate(struct cpufreq_policy *policy, in __target_intermediate() argument
1833 freqs->new = cpufreq_driver->get_intermediate(policy, index); in __target_intermediate()
1840 __func__, policy->cpu, freqs->old, freqs->new); in __target_intermediate()
1842 cpufreq_freq_transition_begin(policy, freqs); in __target_intermediate()
1843 ret = cpufreq_driver->target_intermediate(policy, index); in __target_intermediate()
1844 cpufreq_freq_transition_end(policy, freqs, ret); in __target_intermediate()
1853 static int __target_index(struct cpufreq_policy *policy, in __target_index() argument
1856 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0}; in __target_index()
1865 retval = __target_intermediate(policy, &freqs, index); in __target_index()
1877 __func__, policy->cpu, freqs.old, freqs.new); in __target_index()
1879 cpufreq_freq_transition_begin(policy, &freqs); in __target_index()
1882 retval = cpufreq_driver->target_index(policy, index); in __target_index()
1888 cpufreq_freq_transition_end(policy, &freqs, retval); in __target_index()
1898 freqs.new = policy->restore_freq; in __target_index()
1899 cpufreq_freq_transition_begin(policy, &freqs); in __target_index()
1900 cpufreq_freq_transition_end(policy, &freqs, 0); in __target_index()
1907 int __cpufreq_driver_target(struct cpufreq_policy *policy, in __cpufreq_driver_target() argument
1918 if (target_freq > policy->max) in __cpufreq_driver_target()
1919 target_freq = policy->max; in __cpufreq_driver_target()
1920 if (target_freq < policy->min) in __cpufreq_driver_target()
1921 target_freq = policy->min; in __cpufreq_driver_target()
1924 policy->cpu, target_freq, relation, old_target_freq); in __cpufreq_driver_target()
1932 if (target_freq == policy->cur) in __cpufreq_driver_target()
1936 policy->restore_freq = policy->cur; in __cpufreq_driver_target()
1939 retval = cpufreq_driver->target(policy, target_freq, relation); in __cpufreq_driver_target()
1944 freq_table = cpufreq_frequency_get_table(policy->cpu); in __cpufreq_driver_target()
1950 retval = cpufreq_frequency_table_target(policy, freq_table, in __cpufreq_driver_target()
1957 if (freq_table[index].frequency == policy->cur) { in __cpufreq_driver_target()
1962 retval = __target_index(policy, freq_table, index); in __cpufreq_driver_target()
1970 int cpufreq_driver_target(struct cpufreq_policy *policy, in cpufreq_driver_target() argument
1976 down_write(&policy->rwsem); in cpufreq_driver_target()
1978 ret = __cpufreq_driver_target(policy, target_freq, relation); in cpufreq_driver_target()
1980 up_write(&policy->rwsem); in cpufreq_driver_target()
1986 static int __cpufreq_governor(struct cpufreq_policy *policy, in __cpufreq_governor() argument
2008 if (!policy->governor) in __cpufreq_governor()
2011 if (policy->governor->max_transition_latency && in __cpufreq_governor()
2012 policy->cpuinfo.transition_latency > in __cpufreq_governor()
2013 policy->governor->max_transition_latency) { in __cpufreq_governor()
2018 policy->governor->name, gov->name); in __cpufreq_governor()
2019 policy->governor = gov; in __cpufreq_governor()
2024 if (!try_module_get(policy->governor->owner)) in __cpufreq_governor()
2028 policy->cpu, event); in __cpufreq_governor()
2031 if ((policy->governor_enabled && event == CPUFREQ_GOV_START) in __cpufreq_governor()
2032 || (!policy->governor_enabled in __cpufreq_governor()
2039 policy->governor_enabled = false; in __cpufreq_governor()
2041 policy->governor_enabled = true; in __cpufreq_governor()
2045 ret = policy->governor->governor(policy, event); in __cpufreq_governor()
2049 policy->governor->initialized++; in __cpufreq_governor()
2051 policy->governor->initialized--; in __cpufreq_governor()
2056 policy->governor_enabled = true; in __cpufreq_governor()
2058 policy->governor_enabled = false; in __cpufreq_governor()
2064 module_put(policy->governor->owner); in __cpufreq_governor()
2129 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) in cpufreq_get_policy() argument
2132 if (!policy) in cpufreq_get_policy()
2139 memcpy(policy, cpu_policy, sizeof(*policy)); in cpufreq_get_policy()
2150 static int cpufreq_set_policy(struct cpufreq_policy *policy, in cpufreq_set_policy() argument
2159 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo)); in cpufreq_set_policy()
2161 if (new_policy->min > policy->max || new_policy->max < policy->min) in cpufreq_set_policy()
2189 policy->min = new_policy->min; in cpufreq_set_policy()
2190 policy->max = new_policy->max; in cpufreq_set_policy()
2193 policy->min, policy->max); in cpufreq_set_policy()
2196 policy->policy = new_policy->policy; in cpufreq_set_policy()
2201 if (new_policy->governor == policy->governor) in cpufreq_set_policy()
2207 old_gov = policy->governor; in cpufreq_set_policy()
2210 __cpufreq_governor(policy, CPUFREQ_GOV_STOP); in cpufreq_set_policy()
2211 up_write(&policy->rwsem); in cpufreq_set_policy()
2212 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); in cpufreq_set_policy()
2213 down_write(&policy->rwsem); in cpufreq_set_policy()
2217 policy->governor = new_policy->governor; in cpufreq_set_policy()
2218 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) { in cpufreq_set_policy()
2219 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) in cpufreq_set_policy()
2222 up_write(&policy->rwsem); in cpufreq_set_policy()
2223 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); in cpufreq_set_policy()
2224 down_write(&policy->rwsem); in cpufreq_set_policy()
2228 pr_debug("starting governor %s failed\n", policy->governor->name); in cpufreq_set_policy()
2230 policy->governor = old_gov; in cpufreq_set_policy()
2231 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT); in cpufreq_set_policy()
2232 __cpufreq_governor(policy, CPUFREQ_GOV_START); in cpufreq_set_policy()
2239 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); in cpufreq_set_policy()
2251 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); in cpufreq_update_policy() local
2255 if (!policy) in cpufreq_update_policy()
2258 down_write(&policy->rwsem); in cpufreq_update_policy()
2261 memcpy(&new_policy, policy, sizeof(*policy)); in cpufreq_update_policy()
2262 new_policy.min = policy->user_policy.min; in cpufreq_update_policy()
2263 new_policy.max = policy->user_policy.max; in cpufreq_update_policy()
2264 new_policy.policy = policy->user_policy.policy; in cpufreq_update_policy()
2265 new_policy.governor = policy->user_policy.governor; in cpufreq_update_policy()
2278 if (!policy->cur) { in cpufreq_update_policy()
2280 policy->cur = new_policy.cur; in cpufreq_update_policy()
2282 if (policy->cur != new_policy.cur && has_target()) in cpufreq_update_policy()
2283 cpufreq_out_of_sync(policy, new_policy.cur); in cpufreq_update_policy()
2287 ret = cpufreq_set_policy(policy, &new_policy); in cpufreq_update_policy()
2290 up_write(&policy->rwsem); in cpufreq_update_policy()
2292 cpufreq_cpu_put(policy); in cpufreq_update_policy()
2336 struct cpufreq_policy *policy; in cpufreq_boost_set_sw() local
2339 for_each_policy(policy) { in cpufreq_boost_set_sw()
2340 freq_table = cpufreq_frequency_get_table(policy->cpu); in cpufreq_boost_set_sw()
2342 ret = cpufreq_frequency_table_cpuinfo(policy, in cpufreq_boost_set_sw()
2349 policy->user_policy.max = policy->max; in cpufreq_boost_set_sw()
2350 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); in cpufreq_boost_set_sw()