summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/cpufreq.h14
-rw-r--r--include/linux/intel_rapl.h155
-rw-r--r--include/linux/pm_qos.h48
3 files changed, 199 insertions, 18 deletions
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 46b167fba155..536a049d7ecc 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -47,11 +47,6 @@ struct cpufreq_cpuinfo {
unsigned int transition_latency;
};
-struct cpufreq_user_policy {
- unsigned int min; /* in kHz */
- unsigned int max; /* in kHz */
-};
-
struct cpufreq_policy {
/* CPUs sharing clock, require sw coordination */
cpumask_var_t cpus; /* Online CPUs only */
@@ -81,7 +76,8 @@ struct cpufreq_policy {
struct work_struct update; /* if update_policy() needs to be
* called, but you're in IRQ context */
- struct cpufreq_user_policy user_policy;
+ struct dev_pm_qos_request *min_freq_req;
+ struct dev_pm_qos_request *max_freq_req;
struct cpufreq_frequency_table *freq_table;
enum cpufreq_table_sorting freq_table_sorted;
@@ -144,6 +140,9 @@ struct cpufreq_policy {
/* Pointer to the cooling device if used for thermal mitigation */
struct thermal_cooling_device *cdev;
+
+ struct notifier_block nb_min;
+ struct notifier_block nb_max;
};
struct cpufreq_freqs {
@@ -201,6 +200,7 @@ void cpufreq_cpu_release(struct cpufreq_policy *policy);
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_policy *new_policy);
+void refresh_frequency_limits(struct cpufreq_policy *policy);
void cpufreq_update_policy(unsigned int cpu);
void cpufreq_update_limits(unsigned int cpu);
bool have_governor_per_policy(void);
@@ -992,7 +992,7 @@ extern struct freq_attr *cpufreq_generic_attr[];
int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy);
unsigned int cpufreq_generic_get(unsigned int cpu);
-int cpufreq_generic_init(struct cpufreq_policy *policy,
+void cpufreq_generic_init(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table,
unsigned int transition_latency);
#endif /* _LINUX_CPUFREQ_H */
diff --git a/include/linux/intel_rapl.h b/include/linux/intel_rapl.h
new file mode 100644
index 000000000000..efb3ce892c20
--- /dev/null
+++ b/include/linux/intel_rapl.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Data types and headers for RAPL support
+ *
+ * Copyright (C) 2019 Intel Corporation.
+ *
+ * Author: Zhang Rui <rui.zhang@intel.com>
+ */
+
+#ifndef __INTEL_RAPL_H__
+#define __INTEL_RAPL_H__
+
+#include <linux/types.h>
+#include <linux/powercap.h>
+#include <linux/cpuhotplug.h>
+
+enum rapl_domain_type {
+ RAPL_DOMAIN_PACKAGE, /* entire package/socket */
+ RAPL_DOMAIN_PP0, /* core power plane */
+ RAPL_DOMAIN_PP1, /* graphics uncore */
+ RAPL_DOMAIN_DRAM, /* DRAM control_type */
+ RAPL_DOMAIN_PLATFORM, /* PSys control_type */
+ RAPL_DOMAIN_MAX,
+};
+
+enum rapl_domain_reg_id {
+ RAPL_DOMAIN_REG_LIMIT,
+ RAPL_DOMAIN_REG_STATUS,
+ RAPL_DOMAIN_REG_PERF,
+ RAPL_DOMAIN_REG_POLICY,
+ RAPL_DOMAIN_REG_INFO,
+ RAPL_DOMAIN_REG_MAX,
+};
+
+struct rapl_package;
+
+enum rapl_primitives {
+ ENERGY_COUNTER,
+ POWER_LIMIT1,
+ POWER_LIMIT2,
+ FW_LOCK,
+
+ PL1_ENABLE, /* power limit 1, aka long term */
+ PL1_CLAMP, /* allow frequency to go below OS request */
+ PL2_ENABLE, /* power limit 2, aka short term, instantaneous */
+ PL2_CLAMP,
+
+ TIME_WINDOW1, /* long term */
+ TIME_WINDOW2, /* short term */
+ THERMAL_SPEC_POWER,
+ MAX_POWER,
+
+ MIN_POWER,
+ MAX_TIME_WINDOW,
+ THROTTLED_TIME,
+ PRIORITY_LEVEL,
+
+ /* below are not raw primitive data */
+ AVERAGE_POWER,
+ NR_RAPL_PRIMITIVES,
+};
+
+struct rapl_domain_data {
+ u64 primitives[NR_RAPL_PRIMITIVES];
+ unsigned long timestamp;
+};
+
+#define NR_POWER_LIMITS (2)
+struct rapl_power_limit {
+ struct powercap_zone_constraint *constraint;
+ int prim_id; /* primitive ID used to enable */
+ struct rapl_domain *domain;
+ const char *name;
+ u64 last_power_limit;
+};
+
+struct rapl_package;
+
+struct rapl_domain {
+ const char *name;
+ enum rapl_domain_type id;
+ u64 regs[RAPL_DOMAIN_REG_MAX];
+ struct powercap_zone power_zone;
+ struct rapl_domain_data rdd;
+ struct rapl_power_limit rpl[NR_POWER_LIMITS];
+ u64 attr_map; /* track capabilities */
+ unsigned int state;
+ unsigned int domain_energy_unit;
+ struct rapl_package *rp;
+};
+
+struct reg_action {
+ u64 reg;
+ u64 mask;
+ u64 value;
+ int err;
+};
+
+/**
+ * struct rapl_if_priv: private data for different RAPL interfaces
+ * @control_type: Each RAPL interface must have its own powercap
+ * control type.
+ * @platform_rapl_domain: Optional. Some RAPL interface may have platform
+ * level RAPL control.
+ * @pcap_rapl_online: CPU hotplug state for each RAPL interface.
+ * @reg_unit: Register for getting energy/power/time unit.
+ * @regs: Register sets for different RAPL Domains.
+ * @limits: Number of power limits supported by each domain.
+ * @read_raw: Callback for reading RAPL interface specific
+ * registers.
+ * @write_raw: Callback for writing RAPL interface specific
+ * registers.
+ */
+struct rapl_if_priv {
+ struct powercap_control_type *control_type;
+ struct rapl_domain *platform_rapl_domain;
+ enum cpuhp_state pcap_rapl_online;
+ u64 reg_unit;
+ u64 regs[RAPL_DOMAIN_MAX][RAPL_DOMAIN_REG_MAX];
+ int limits[RAPL_DOMAIN_MAX];
+ int (*read_raw)(int cpu, struct reg_action *ra);
+ int (*write_raw)(int cpu, struct reg_action *ra);
+};
+
+/* maximum rapl package domain name: package-%d-die-%d */
+#define PACKAGE_DOMAIN_NAME_LENGTH 30
+
+struct rapl_package {
+ unsigned int id; /* logical die id, equals physical 1-die systems */
+ unsigned int nr_domains;
+ unsigned long domain_map; /* bit map of active domains */
+ unsigned int power_unit;
+ unsigned int energy_unit;
+ unsigned int time_unit;
+ struct rapl_domain *domains; /* array of domains, sized at runtime */
+ struct powercap_zone *power_zone; /* keep track of parent zone */
+ unsigned long power_limit_irq; /* keep track of package power limit
+ * notify interrupt enable status.
+ */
+ struct list_head plist;
+ int lead_cpu; /* one active cpu per package for access */
+ /* Track active cpus */
+ struct cpumask cpumask;
+ char name[PACKAGE_DOMAIN_NAME_LENGTH];
+ struct rapl_if_priv *priv;
+};
+
+struct rapl_package *rapl_find_package_domain(int cpu, struct rapl_if_priv *priv);
+struct rapl_package *rapl_add_package(int cpu, struct rapl_if_priv *priv);
+void rapl_remove_package(struct rapl_package *rp);
+
+int rapl_add_platform_domain(struct rapl_if_priv *priv);
+void rapl_remove_platform_domain(struct rapl_if_priv *priv);
+
+#endif /* __INTEL_RAPL_H__ */
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 6ea1ae373d77..2aebbc5b9950 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -40,6 +40,8 @@ enum pm_qos_flags_status {
#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY
#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS PM_QOS_LATENCY_ANY_NS
#define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
+#define PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE 0
+#define PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE (-1)
#define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
#define PM_QOS_FLAG_NO_POWER_OFF (1 << 0)
@@ -58,6 +60,8 @@ struct pm_qos_flags_request {
enum dev_pm_qos_req_type {
DEV_PM_QOS_RESUME_LATENCY = 1,
DEV_PM_QOS_LATENCY_TOLERANCE,
+ DEV_PM_QOS_MIN_FREQUENCY,
+ DEV_PM_QOS_MAX_FREQUENCY,
DEV_PM_QOS_FLAGS,
};
@@ -99,10 +103,14 @@ struct pm_qos_flags {
struct dev_pm_qos {
struct pm_qos_constraints resume_latency;
struct pm_qos_constraints latency_tolerance;
+ struct pm_qos_constraints min_frequency;
+ struct pm_qos_constraints max_frequency;
struct pm_qos_flags flags;
struct dev_pm_qos_request *resume_latency_req;
struct dev_pm_qos_request *latency_tolerance_req;
struct dev_pm_qos_request *flags_req;
+ struct dev_pm_qos_request *min_frequency_req;
+ struct dev_pm_qos_request *max_frequency_req;
};
/* Action requested to pm_qos_update_target */
@@ -139,16 +147,18 @@ s32 pm_qos_read_value(struct pm_qos_constraints *c);
#ifdef CONFIG_PM
enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask);
enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask);
-s32 __dev_pm_qos_read_value(struct device *dev);
-s32 dev_pm_qos_read_value(struct device *dev);
+s32 __dev_pm_qos_resume_latency(struct device *dev);
+s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type);
int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
enum dev_pm_qos_req_type type, s32 value);
int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value);
int dev_pm_qos_remove_request(struct dev_pm_qos_request *req);
int dev_pm_qos_add_notifier(struct device *dev,
- struct notifier_block *notifier);
+ struct notifier_block *notifier,
+ enum dev_pm_qos_req_type type);
int dev_pm_qos_remove_notifier(struct device *dev,
- struct notifier_block *notifier);
+ struct notifier_block *notifier,
+ enum dev_pm_qos_req_type type);
void dev_pm_qos_constraints_init(struct device *dev);
void dev_pm_qos_constraints_destroy(struct device *dev);
int dev_pm_qos_add_ancestor_request(struct device *dev,
@@ -174,7 +184,7 @@ static inline s32 dev_pm_qos_requested_flags(struct device *dev)
return dev->power.qos->flags_req->data.flr.flags;
}
-static inline s32 dev_pm_qos_raw_read_value(struct device *dev)
+static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev)
{
return IS_ERR_OR_NULL(dev->power.qos) ?
PM_QOS_RESUME_LATENCY_NO_CONSTRAINT :
@@ -187,10 +197,24 @@ static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev,
static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev,
s32 mask)
{ return PM_QOS_FLAGS_UNDEFINED; }
-static inline s32 __dev_pm_qos_read_value(struct device *dev)
- { return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; }
-static inline s32 dev_pm_qos_read_value(struct device *dev)
+static inline s32 __dev_pm_qos_resume_latency(struct device *dev)
{ return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; }
+static inline s32 dev_pm_qos_read_value(struct device *dev,
+ enum dev_pm_qos_req_type type)
+{
+ switch (type) {
+ case DEV_PM_QOS_RESUME_LATENCY:
+ return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ return PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ return PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+}
+
static inline int dev_pm_qos_add_request(struct device *dev,
struct dev_pm_qos_request *req,
enum dev_pm_qos_req_type type,
@@ -202,10 +226,12 @@ static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
{ return 0; }
static inline int dev_pm_qos_add_notifier(struct device *dev,
- struct notifier_block *notifier)
+ struct notifier_block *notifier,
+ enum dev_pm_qos_req_type type)
{ return 0; }
static inline int dev_pm_qos_remove_notifier(struct device *dev,
- struct notifier_block *notifier)
+ struct notifier_block *notifier,
+ enum dev_pm_qos_req_type type)
{ return 0; }
static inline void dev_pm_qos_constraints_init(struct device *dev)
{
@@ -241,7 +267,7 @@ static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
}
static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; }
-static inline s32 dev_pm_qos_raw_read_value(struct device *dev)
+static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev)
{
return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
}