Lines Matching +full:0 +full:x600000000

346 	static u32 max_highest_perf = 0, min_highest_perf = U32_MAX;  in intel_pstate_set_itmt_prio()
438 for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { in intel_pstate_init_acpi_perf_limits()
439 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n", in intel_pstate_init_acpi_perf_limits()
458 cpu->acpi_perf_data.states[0].core_frequency = in intel_pstate_init_acpi_perf_limits()
507 cpu = all_cpu_data[0]; in update_turbo_state()
516 struct cpudata *cpu = all_cpu_data[0]; in min_perf_pct_min()
520 (cpu->pstate.min_pstate * 100 / turbo_pstate) : 0; in min_perf_pct_min()
535 return (s16)(epb & 0x0f); in intel_pstate_get_epb()
544 * When hwp_req_data is 0, means that caller didn't read in intel_pstate_get_epp()
553 epp = (hwp_req_data >> 24) & 0xff; in intel_pstate_get_epp()
574 epb = (epb & ~0x0f) | pref; in intel_pstate_set_epb()
577 return 0; in intel_pstate_set_epb()
585 * 0 default
611 *raw_epp = 0; in intel_pstate_get_energy_pref_index()
612 epp = intel_pstate_get_epp(cpu_data, 0); in intel_pstate_get_energy_pref_index()
613 if (epp < 0) in intel_pstate_get_energy_pref_index()
626 return 0; in intel_pstate_get_energy_pref_index()
630 * 0x00-0x03 : Performance in intel_pstate_get_energy_pref_index()
631 * 0x04-0x07 : Balance performance in intel_pstate_get_energy_pref_index()
632 * 0x08-0x0B : Balance power in intel_pstate_get_energy_pref_index()
633 * 0x0C-0x0F : Power in intel_pstate_get_energy_pref_index()
688 * from 0 (performance) if the current policy is "performance", in intel_pstate_set_energy_pref_index()
691 if (epp > 0 && cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) in intel_pstate_set_energy_pref_index()
707 int i = 0; in show_energy_performance_available_preferences()
708 int ret = 0; in show_energy_performance_available_preferences()
729 u32 epp = 0; in store_energy_performance_preference()
736 if (ret < 0) { in store_energy_performance_preference()
795 if (preference < 0) in show_energy_performance_preference()
813 if (ratio <= 0) { in show_base_frequency()
862 value &= ~HWP_MIN_PERF(~0L); in intel_pstate_hwp_set()
865 value &= ~HWP_MAX_PERF(~0L); in intel_pstate_hwp_set()
877 if (epp < 0) in intel_pstate_hwp_set()
880 epp = 0; in intel_pstate_hwp_set()
883 if (cpu_data->epp_powersave < 0) in intel_pstate_hwp_set()
926 value &= ~GENMASK_ULL(31, 0); in intel_pstate_hwp_offline()
978 return 0; in intel_pstate_suspend()
1004 return 0; in intel_pstate_resume()
1086 return ret < 0 ? ret : count; in store_status()
1103 cpu = all_cpu_data[0]; in show_turbo_pct()
1128 cpu = all_cpu_data[0]; in show_num_pstates()
1186 global.no_turbo = clamp_t(int, input, 0, 1); in store_no_turbo()
1189 struct cpudata *cpu = all_cpu_data[0]; in store_no_turbo()
1240 if (freq_qos_update_request(req, freq) < 0) in update_qos_request()
1466 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); in intel_pstate_hwp_enable()
1468 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); in intel_pstate_hwp_enable()
1470 cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); in intel_pstate_hwp_enable()
1478 return (value >> 8) & 0x7F; in atom_get_min_pstate()
1486 return (value >> 16) & 0x7F; in atom_get_max_pstate()
1494 return value & 0x7F; in atom_get_turbo_pstate()
1529 i = value & 0x7; in silvermont_get_scaling()
1545 i = value & 0xF; in airmont_get_scaling()
1556 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); in atom_get_vid()
1557 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); in atom_get_vid()
1564 cpudata->vid.turbo = value & 0x7f; in atom_get_vid()
1572 return (value >> 40) & 0xFF; in core_get_min_pstate()
1580 return (value >> 8) & 0xFF; in core_get_max_pstate_physical()
1586 if (plat_info & 0x600000000) { in core_get_tdp_ratio()
1592 /* Get the TDP level (0, 1, 2) to get ratios */ in core_get_tdp_ratio()
1597 /* TDP MSR are continuous starting at 0x648 */ in core_get_tdp_ratio()
1598 tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03); in core_get_tdp_ratio()
1604 if (tdp_ctrl & 0x03) in core_get_tdp_ratio()
1607 tdp_ratio &= 0xff; /* ratios are only 8 bits long */ in core_get_tdp_ratio()
1625 max_pstate = (plat_info >> 8) & 0xFF; in core_get_max_pstate()
1628 if (tdp_ratio <= 0) in core_get_max_pstate()
1641 tar_levels = tar & 0xff; in core_get_max_pstate()
1692 ret = (((value) >> 8) & 0xFF); in knl_get_turbo_pstate()
1764 u32 max_limit = (hwp_req & 0xff00) >> 8; in intel_pstate_hwp_boost_up()
1765 u32 min_limit = (hwp_req & 0xff); in intel_pstate_hwp_boost_up()
1802 hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min; in intel_pstate_hwp_boost_up()
1817 cpu->hwp_boost_min = 0; in intel_pstate_hwp_boost_down()
1831 cpu->sched_flags = 0; in intel_pstate_update_util_hwp_local()
2025 cpu->iowait_boost = 0; in intel_pstate_update_util()
2161 cpu->epp_policy = 0; in intel_pstate_init_cpu()
2167 return 0; in intel_pstate_init_cpu()
2181 cpu->sample.time = 0; in intel_pstate_set_update_util_hook()
2235 0, max_policy_perf); in intel_pstate_update_perf_limits()
2251 global_min = clamp_t(int32_t, global_min, 0, global_max); in intel_pstate_update_perf_limits()
2312 return 0; in intel_pstate_set_policy()
2350 return 0; in intel_pstate_verify_policy()
2360 return 0; in intel_pstate_cpu_offline()
2375 return 0; in intel_pstate_cpu_offline()
2395 return 0; in intel_pstate_cpu_online()
2411 return 0; in intel_pstate_cpu_exit()
2425 cpu->max_perf_ratio = 0xFF; in __intel_pstate_cpu_init()
2426 cpu->min_perf_ratio = 0; in __intel_pstate_cpu_init()
2452 return 0; in __intel_pstate_cpu_init()
2471 cpu->epp_cached = intel_pstate_get_epp(cpu, 0); in intel_pstate_cpu_init()
2474 return 0; in intel_pstate_cpu_init()
2499 return 0; in intel_cpufreq_verify_policy()
2509 * (or performance or core_avg_perf) have a fixed y-axis from 0 to 100%,
2512 * The scaled_busy field is not used, and is set to 0.
2530 0, in intel_cpufreq_trace()
2545 value &= ~HWP_MIN_PERF(~0L); in intel_cpufreq_adjust_hwp()
2552 value &= ~HWP_MAX_PERF(~0L); in intel_cpufreq_adjust_hwp()
2631 return 0; in intel_cpufreq_target()
2696 if (ret < 0) { in intel_cpufreq_cpu_init()
2703 if (ret < 0) { in intel_cpufreq_cpu_init()
2710 return 0; in intel_cpufreq_cpu_init()
2778 memset(&global, 0, sizeof(global)); in intel_pstate_register_driver()
2790 return 0; in intel_pstate_register_driver()
2813 return 0; in intel_pstate_update_status()
2819 return 0; in intel_pstate_update_status()
2830 return 0; in intel_pstate_update_status()
2854 return 0; in intel_pstate_msrs_not_valid()
2941 {"HP ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, NULL, PSS},
2942 {"ORACLE", "X4-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
2943 {"ORACLE", "X4-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
2944 {"ORACLE", "X4-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
2945 {"ORACLE", "X3-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
2946 {"ORACLE", "X3-2L ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
2947 {"ORACLE", "X3-2B ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
2948 {"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
2949 {"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
2950 {"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
2951 {"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
2952 {"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
2953 {"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
2954 {"ORACLE", "X6-2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
2955 {"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
2978 if (idx < 0) in intel_pstate_platform_pwr_mgmt_exists()
3009 #define INTEL_PSTATE_HWP_BROADWELL 0x01
3018 X86_MATCH_HWP(ANY, 0),
3027 return !!(value & 0x1); in intel_pstate_hwp_is_enabled()
3132 return 0; in intel_pstate_init()
3163 return 0; in intel_pstate_setup()