| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _LINUX_ENERGY_MODEL_H |
| 3 | #define _LINUX_ENERGY_MODEL_H |
| 4 | #include <linux/cpumask.h> |
| 5 | #include <linux/device.h> |
| 6 | #include <linux/jump_label.h> |
| 7 | #include <linux/kobject.h> |
| 8 | #include <linux/kref.h> |
| 9 | #include <linux/rcupdate.h> |
| 10 | #include <linux/sched/cpufreq.h> |
| 11 | #include <linux/sched/topology.h> |
| 12 | #include <linux/types.h> |
| 13 | |
| 14 | /** |
| 15 | * struct em_perf_state - Performance state of a performance domain |
| 16 | * @performance: CPU performance (capacity) at a given frequency |
| 17 | * @frequency: The frequency in KHz, for consistency with CPUFreq |
| 18 | * @power: The power consumed at this level (by 1 CPU or by a registered |
| 19 | * device). It can be a total power: static and dynamic. |
| 20 | * @cost: The cost coefficient associated with this level, used during |
| 21 | * energy calculation. Equal to: power * max_frequency / frequency |
| 22 | * @flags: see "em_perf_state flags" description below. |
| 23 | */ |
| 24 | struct em_perf_state { |
| 25 | unsigned long performance; |
| 26 | unsigned long frequency; |
| 27 | unsigned long power; |
| 28 | unsigned long cost; |
| 29 | unsigned long flags; |
| 30 | }; |
| 31 | |
| 32 | /* |
| 33 | * em_perf_state flags: |
| 34 | * |
| 35 | * EM_PERF_STATE_INEFFICIENT: The performance state is inefficient. There is |
| 36 | * in this em_perf_domain, another performance state with a higher frequency |
| 37 | * but a lower or equal power cost. Such inefficient states are ignored when |
| 38 | * using em_pd_get_efficient_*() functions. |
| 39 | */ |
| 40 | #define EM_PERF_STATE_INEFFICIENT BIT(0) |
| 41 | |
| 42 | /** |
| 43 | * struct em_perf_table - Performance states table |
| 44 | * @rcu: RCU used for safe access and destruction |
| 45 | * @kref: Reference counter to track the users |
| 46 | * @state: List of performance states, in ascending order |
| 47 | */ |
| 48 | struct em_perf_table { |
| 49 | struct rcu_head rcu; |
| 50 | struct kref kref; |
| 51 | struct em_perf_state state[]; |
| 52 | }; |
| 53 | |
| 54 | /** |
| 55 | * struct em_perf_domain - Performance domain |
| 56 | * @em_table: Pointer to the runtime modifiable em_perf_table |
| 57 | * @nr_perf_states: Number of performance states |
| 58 | * @min_perf_state: Minimum allowed Performance State index |
| 59 | * @max_perf_state: Maximum allowed Performance State index |
| 60 | * @flags: See "em_perf_domain flags" |
| 61 | * @cpus: Cpumask covering the CPUs of the domain. It's here |
| 62 | * for performance reasons to avoid potential cache |
| 63 | * misses during energy calculations in the scheduler |
| 64 | * and simplifies allocating/freeing that memory region. |
| 65 | * |
| 66 | * In case of CPU device, a "performance domain" represents a group of CPUs |
| 67 | * whose performance is scaled together. All CPUs of a performance domain |
| 68 | * must have the same micro-architecture. Performance domains often have |
| 69 | * a 1-to-1 mapping with CPUFreq policies. In case of other devices the @cpus |
| 70 | * field is unused. |
| 71 | */ |
| 72 | struct em_perf_domain { |
| 73 | struct em_perf_table __rcu *em_table; |
| 74 | int nr_perf_states; |
| 75 | int min_perf_state; |
| 76 | int max_perf_state; |
| 77 | unsigned long flags; |
| 78 | unsigned long cpus[]; |
| 79 | }; |
| 80 | |
| 81 | /* |
| 82 | * em_perf_domain flags: |
| 83 | * |
| 84 | * EM_PERF_DOMAIN_MICROWATTS: The power values are in micro-Watts or some |
| 85 | * other scale. |
| 86 | * |
| 87 | * EM_PERF_DOMAIN_SKIP_INEFFICIENCIES: Skip inefficient states when estimating |
| 88 | * energy consumption. |
| 89 | * |
| 90 | * EM_PERF_DOMAIN_ARTIFICIAL: The power values are artificial and might be |
| 91 | * created by platform missing real power information |
| 92 | */ |
| 93 | #define EM_PERF_DOMAIN_MICROWATTS BIT(0) |
| 94 | #define EM_PERF_DOMAIN_SKIP_INEFFICIENCIES BIT(1) |
| 95 | #define EM_PERF_DOMAIN_ARTIFICIAL BIT(2) |
| 96 | |
| 97 | #define em_span_cpus(em) (to_cpumask((em)->cpus)) |
| 98 | #define em_is_artificial(em) ((em)->flags & EM_PERF_DOMAIN_ARTIFICIAL) |
| 99 | |
| 100 | #ifdef CONFIG_ENERGY_MODEL |
| 101 | /* |
| 102 | * The max power value in micro-Watts. The limit of 64 Watts is set as |
| 103 | * a safety net to not overflow multiplications on 32bit platforms. The |
| 104 | * 32bit value limit for total Perf Domain power implies a limit of |
| 105 | * maximum CPUs in such domain to 64. |
| 106 | */ |
| 107 | #define EM_MAX_POWER (64000000) /* 64 Watts */ |
| 108 | |
| 109 | /* |
| 110 | * To avoid possible energy estimation overflow on 32bit machines add |
| 111 | * limits to number of CPUs in the Perf. Domain. |
| 112 | * We are safe on 64bit machine, thus some big number. |
| 113 | */ |
| 114 | #ifdef CONFIG_64BIT |
| 115 | #define EM_MAX_NUM_CPUS 4096 |
| 116 | #else |
| 117 | #define EM_MAX_NUM_CPUS 16 |
| 118 | #endif |
| 119 | |
| 120 | struct em_data_callback { |
| 121 | /** |
| 122 | * active_power() - Provide power at the next performance state of |
| 123 | * a device |
| 124 | * @dev : Device for which we do this operation (can be a CPU) |
| 125 | * @power : Active power at the performance state |
| 126 | * (modified) |
| 127 | * @freq : Frequency at the performance state in kHz |
| 128 | * (modified) |
| 129 | * |
| 130 | * active_power() must find the lowest performance state of 'dev' above |
| 131 | * 'freq' and update 'power' and 'freq' to the matching active power |
| 132 | * and frequency. |
| 133 | * |
| 134 | * In case of CPUs, the power is the one of a single CPU in the domain, |
| 135 | * expressed in micro-Watts or an abstract scale. It is expected to |
| 136 | * fit in the [0, EM_MAX_POWER] range. |
| 137 | * |
| 138 | * Return 0 on success. |
| 139 | */ |
| 140 | int (*active_power)(struct device *dev, unsigned long *power, |
| 141 | unsigned long *freq); |
| 142 | |
| 143 | /** |
| 144 | * get_cost() - Provide the cost at the given performance state of |
| 145 | * a device |
| 146 | * @dev : Device for which we do this operation (can be a CPU) |
| 147 | * @freq : Frequency at the performance state in kHz |
| 148 | * @cost : The cost value for the performance state |
| 149 | * (modified) |
| 150 | * |
| 151 | * In case of CPUs, the cost is the one of a single CPU in the domain. |
| 152 | * It is expected to fit in the [0, EM_MAX_POWER] range due to internal |
| 153 | * usage in EAS calculation. |
| 154 | * |
| 155 | * Return 0 on success, or appropriate error value in case of failure. |
| 156 | */ |
| 157 | int (*get_cost)(struct device *dev, unsigned long freq, |
| 158 | unsigned long *cost); |
| 159 | }; |
| 160 | #define EM_SET_ACTIVE_POWER_CB(em_cb, cb) ((em_cb).active_power = cb) |
| 161 | #define EM_ADV_DATA_CB(_active_power_cb, _cost_cb) \ |
| 162 | { .active_power = _active_power_cb, \ |
| 163 | .get_cost = _cost_cb } |
| 164 | #define EM_DATA_CB(_active_power_cb) \ |
| 165 | EM_ADV_DATA_CB(_active_power_cb, NULL) |
| 166 | |
| 167 | struct em_perf_domain *em_cpu_get(int cpu); |
| 168 | struct em_perf_domain *em_pd_get(struct device *dev); |
| 169 | int em_dev_update_perf_domain(struct device *dev, |
| 170 | struct em_perf_table *new_table); |
| 171 | int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, |
| 172 | const struct em_data_callback *cb, |
| 173 | const cpumask_t *cpus, bool microwatts); |
| 174 | void em_dev_unregister_perf_domain(struct device *dev); |
| 175 | struct em_perf_table *em_table_alloc(struct em_perf_domain *pd); |
| 176 | void em_table_free(struct em_perf_table *table); |
| 177 | int em_dev_compute_costs(struct device *dev, struct em_perf_state *table, |
| 178 | int nr_states); |
| 179 | int em_dev_update_chip_binning(struct device *dev); |
| 180 | int em_update_performance_limits(struct em_perf_domain *pd, |
| 181 | unsigned long freq_min_khz, unsigned long freq_max_khz); |
| 182 | void em_adjust_cpu_capacity(unsigned int cpu); |
| 183 | void em_rebuild_sched_domains(void); |
| 184 | |
| 185 | /** |
| 186 | * em_pd_get_efficient_state() - Get an efficient performance state from the EM |
| 187 | * @table: List of performance states, in ascending order |
| 188 | * @pd: performance domain for which this must be done |
| 189 | * @max_util: Max utilization to map with the EM |
| 190 | * |
| 191 | * It is called from the scheduler code quite frequently and as a consequence |
| 192 | * doesn't implement any check. |
| 193 | * |
| 194 | * Return: An efficient performance state id, high enough to meet @max_util |
| 195 | * requirement. |
| 196 | */ |
| 197 | static inline int |
| 198 | em_pd_get_efficient_state(struct em_perf_state *table, |
| 199 | struct em_perf_domain *pd, unsigned long max_util) |
| 200 | { |
| 201 | unsigned long pd_flags = pd->flags; |
| 202 | int min_ps = pd->min_perf_state; |
| 203 | int max_ps = pd->max_perf_state; |
| 204 | struct em_perf_state *ps; |
| 205 | int i; |
| 206 | |
| 207 | for (i = min_ps; i <= max_ps; i++) { |
| 208 | ps = &table[i]; |
| 209 | if (ps->performance >= max_util) { |
| 210 | if (pd_flags & EM_PERF_DOMAIN_SKIP_INEFFICIENCIES && |
| 211 | ps->flags & EM_PERF_STATE_INEFFICIENT) |
| 212 | continue; |
| 213 | return i; |
| 214 | } |
| 215 | } |
| 216 | |
| 217 | return max_ps; |
| 218 | } |
| 219 | |
| 220 | /** |
| 221 | * em_cpu_energy() - Estimates the energy consumed by the CPUs of a |
| 222 | * performance domain |
| 223 | * @pd : performance domain for which energy has to be estimated |
| 224 | * @max_util : highest utilization among CPUs of the domain |
| 225 | * @sum_util : sum of the utilization of all CPUs in the domain |
| 226 | * @allowed_cpu_cap : maximum allowed CPU capacity for the @pd, which |
| 227 | * might reflect reduced frequency (due to thermal) |
| 228 | * |
| 229 | * This function must be used only for CPU devices. There is no validation, |
| 230 | * i.e. if the EM is a CPU type and has cpumask allocated. It is called from |
| 231 | * the scheduler code quite frequently and that is why there is not checks. |
| 232 | * |
| 233 | * Return: the sum of the energy consumed by the CPUs of the domain assuming |
| 234 | * a capacity state satisfying the max utilization of the domain. |
| 235 | */ |
| 236 | static inline unsigned long em_cpu_energy(struct em_perf_domain *pd, |
| 237 | unsigned long max_util, unsigned long sum_util, |
| 238 | unsigned long allowed_cpu_cap) |
| 239 | { |
| 240 | struct em_perf_table *em_table; |
| 241 | struct em_perf_state *ps; |
| 242 | int i; |
| 243 | |
| 244 | WARN_ONCE(!rcu_read_lock_held(), "EM: rcu read lock needed\n" ); |
| 245 | |
| 246 | if (!sum_util) |
| 247 | return 0; |
| 248 | |
| 249 | /* |
| 250 | * In order to predict the performance state, map the utilization of |
| 251 | * the most utilized CPU of the performance domain to a requested |
| 252 | * performance, like schedutil. Take also into account that the real |
| 253 | * performance might be set lower (due to thermal capping). Thus, clamp |
| 254 | * max utilization to the allowed CPU capacity before calculating |
| 255 | * effective performance. |
| 256 | */ |
| 257 | max_util = min(max_util, allowed_cpu_cap); |
| 258 | |
| 259 | /* |
| 260 | * Find the lowest performance state of the Energy Model above the |
| 261 | * requested performance. |
| 262 | */ |
| 263 | em_table = rcu_dereference(pd->em_table); |
| 264 | i = em_pd_get_efficient_state(table: em_table->state, pd, max_util); |
| 265 | ps = &em_table->state[i]; |
| 266 | |
| 267 | /* |
| 268 | * The performance (capacity) of a CPU in the domain at the performance |
| 269 | * state (ps) can be computed as: |
| 270 | * |
| 271 | * ps->freq * scale_cpu |
| 272 | * ps->performance = -------------------- (1) |
| 273 | * cpu_max_freq |
| 274 | * |
| 275 | * So, ignoring the costs of idle states (which are not available in |
| 276 | * the EM), the energy consumed by this CPU at that performance state |
| 277 | * is estimated as: |
| 278 | * |
| 279 | * ps->power * cpu_util |
| 280 | * cpu_nrg = -------------------- (2) |
| 281 | * ps->performance |
| 282 | * |
| 283 | * since 'cpu_util / ps->performance' represents its percentage of busy |
| 284 | * time. |
| 285 | * |
| 286 | * NOTE: Although the result of this computation actually is in |
| 287 | * units of power, it can be manipulated as an energy value |
| 288 | * over a scheduling period, since it is assumed to be |
| 289 | * constant during that interval. |
| 290 | * |
| 291 | * By injecting (1) in (2), 'cpu_nrg' can be re-expressed as a product |
| 292 | * of two terms: |
| 293 | * |
| 294 | * ps->power * cpu_max_freq |
| 295 | * cpu_nrg = ------------------------ * cpu_util (3) |
| 296 | * ps->freq * scale_cpu |
| 297 | * |
| 298 | * The first term is static, and is stored in the em_perf_state struct |
| 299 | * as 'ps->cost'. |
| 300 | * |
| 301 | * Since all CPUs of the domain have the same micro-architecture, they |
| 302 | * share the same 'ps->cost', and the same CPU capacity. Hence, the |
| 303 | * total energy of the domain (which is the simple sum of the energy of |
| 304 | * all of its CPUs) can be factorized as: |
| 305 | * |
| 306 | * pd_nrg = ps->cost * \Sum cpu_util (4) |
| 307 | */ |
| 308 | return ps->cost * sum_util; |
| 309 | } |
| 310 | |
| 311 | /** |
| 312 | * em_pd_nr_perf_states() - Get the number of performance states of a perf. |
| 313 | * domain |
| 314 | * @pd : performance domain for which this must be done |
| 315 | * |
| 316 | * Return: the number of performance states in the performance domain table |
| 317 | */ |
| 318 | static inline int em_pd_nr_perf_states(struct em_perf_domain *pd) |
| 319 | { |
| 320 | return pd->nr_perf_states; |
| 321 | } |
| 322 | |
| 323 | /** |
| 324 | * em_perf_state_from_pd() - Get the performance states table of perf. |
| 325 | * domain |
| 326 | * @pd : performance domain for which this must be done |
| 327 | * |
| 328 | * To use this function the rcu_read_lock() should be hold. After the usage |
| 329 | * of the performance states table is finished, the rcu_read_unlock() should |
| 330 | * be called. |
| 331 | * |
| 332 | * Return: the pointer to performance states table of the performance domain |
| 333 | */ |
| 334 | static inline |
| 335 | struct em_perf_state *em_perf_state_from_pd(struct em_perf_domain *pd) |
| 336 | { |
| 337 | return rcu_dereference(pd->em_table)->state; |
| 338 | } |
| 339 | |
| 340 | #else |
| 341 | struct em_data_callback {}; |
| 342 | #define EM_ADV_DATA_CB(_active_power_cb, _cost_cb) { } |
| 343 | #define EM_DATA_CB(_active_power_cb) { } |
| 344 | #define EM_SET_ACTIVE_POWER_CB(em_cb, cb) do { } while (0) |
| 345 | |
| 346 | static inline |
| 347 | int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, |
| 348 | const struct em_data_callback *cb, |
| 349 | const cpumask_t *cpus, bool microwatts) |
| 350 | { |
| 351 | return -EINVAL; |
| 352 | } |
| 353 | static inline void em_dev_unregister_perf_domain(struct device *dev) |
| 354 | { |
| 355 | } |
| 356 | static inline struct em_perf_domain *em_cpu_get(int cpu) |
| 357 | { |
| 358 | return NULL; |
| 359 | } |
| 360 | static inline struct em_perf_domain *em_pd_get(struct device *dev) |
| 361 | { |
| 362 | return NULL; |
| 363 | } |
| 364 | static inline unsigned long em_cpu_energy(struct em_perf_domain *pd, |
| 365 | unsigned long max_util, unsigned long sum_util, |
| 366 | unsigned long allowed_cpu_cap) |
| 367 | { |
| 368 | return 0; |
| 369 | } |
| 370 | static inline int em_pd_nr_perf_states(struct em_perf_domain *pd) |
| 371 | { |
| 372 | return 0; |
| 373 | } |
| 374 | static inline |
| 375 | struct em_perf_table *em_table_alloc(struct em_perf_domain *pd) |
| 376 | { |
| 377 | return NULL; |
| 378 | } |
| 379 | static inline void em_table_free(struct em_perf_table *table) {} |
| 380 | static inline |
| 381 | int em_dev_update_perf_domain(struct device *dev, |
| 382 | struct em_perf_table *new_table) |
| 383 | { |
| 384 | return -EINVAL; |
| 385 | } |
| 386 | static inline |
| 387 | struct em_perf_state *em_perf_state_from_pd(struct em_perf_domain *pd) |
| 388 | { |
| 389 | return NULL; |
| 390 | } |
| 391 | static inline |
| 392 | int em_dev_compute_costs(struct device *dev, struct em_perf_state *table, |
| 393 | int nr_states) |
| 394 | { |
| 395 | return -EINVAL; |
| 396 | } |
| 397 | static inline int em_dev_update_chip_binning(struct device *dev) |
| 398 | { |
| 399 | return -EINVAL; |
| 400 | } |
| 401 | static inline |
| 402 | int em_update_performance_limits(struct em_perf_domain *pd, |
| 403 | unsigned long freq_min_khz, unsigned long freq_max_khz) |
| 404 | { |
| 405 | return -EINVAL; |
| 406 | } |
| 407 | static inline void em_adjust_cpu_capacity(unsigned int cpu) {} |
| 408 | static inline void em_rebuild_sched_domains(void) {} |
| 409 | #endif |
| 410 | |
| 411 | #endif |
| 412 | |