1// SPDX-License-Identifier: GPL-2.0
2/* Copyright 2019 Collabora ltd. */
3
4#include <linux/clk.h>
5#include <linux/devfreq.h>
6#include <linux/devfreq_cooling.h>
7#include <linux/nvmem-consumer.h>
8#include <linux/platform_device.h>
9#include <linux/pm_opp.h>
10
11#include "panfrost_device.h"
12#include "panfrost_devfreq.h"
13
14static void panfrost_devfreq_update_utilization(struct panfrost_devfreq *pfdevfreq)
15{
16 ktime_t now, last;
17
18 now = ktime_get();
19 last = pfdevfreq->time_last_update;
20
21 if (pfdevfreq->busy_count > 0)
22 pfdevfreq->busy_time += ktime_sub(now, last);
23 else
24 pfdevfreq->idle_time += ktime_sub(now, last);
25
26 pfdevfreq->time_last_update = now;
27}
28
29static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
30 u32 flags)
31{
32 struct dev_pm_opp *opp;
33
34 opp = devfreq_recommended_opp(dev, freq, flags);
35 if (IS_ERR(ptr: opp))
36 return PTR_ERR(ptr: opp);
37 dev_pm_opp_put(opp);
38
39 return dev_pm_opp_set_rate(dev, target_freq: *freq);
40}
41
42static void panfrost_devfreq_reset(struct panfrost_devfreq *pfdevfreq)
43{
44 pfdevfreq->busy_time = 0;
45 pfdevfreq->idle_time = 0;
46 pfdevfreq->time_last_update = ktime_get();
47}
48
49static int panfrost_devfreq_get_dev_status(struct device *dev,
50 struct devfreq_dev_status *status)
51{
52 struct panfrost_device *pfdev = dev_get_drvdata(dev);
53 struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
54 unsigned long irqflags;
55
56 status->current_frequency = clk_get_rate(clk: pfdev->clock);
57
58 spin_lock_irqsave(&pfdevfreq->lock, irqflags);
59
60 panfrost_devfreq_update_utilization(pfdevfreq);
61 pfdevfreq->current_frequency = status->current_frequency;
62
63 status->total_time = ktime_to_ns(ktime_add(pfdevfreq->busy_time,
64 pfdevfreq->idle_time));
65
66 status->busy_time = ktime_to_ns(kt: pfdevfreq->busy_time);
67
68 panfrost_devfreq_reset(pfdevfreq);
69
70 spin_unlock_irqrestore(lock: &pfdevfreq->lock, flags: irqflags);
71
72 dev_dbg(pfdev->dev, "busy %lu total %lu %lu %% freq %lu MHz\n",
73 status->busy_time, status->total_time,
74 status->busy_time / (status->total_time / 100),
75 status->current_frequency / 1000 / 1000);
76
77 return 0;
78}
79
80static struct devfreq_dev_profile panfrost_devfreq_profile = {
81 .timer = DEVFREQ_TIMER_DELAYED,
82 .polling_ms = 50, /* ~3 frames */
83 .target = panfrost_devfreq_target,
84 .get_dev_status = panfrost_devfreq_get_dev_status,
85};
86
87static int panfrost_read_speedbin(struct device *dev)
88{
89 u32 val;
90 int ret;
91
92 ret = nvmem_cell_read_variable_le_u32(dev, cell_id: "speed-bin", val: &val);
93 if (ret) {
94 /*
95 * -ENOENT means that this platform doesn't support speedbins
96 * as it didn't declare any speed-bin nvmem: in this case, we
97 * keep going without it; any other error means that we are
98 * supposed to read the bin value, but we failed doing so.
99 */
100 if (ret != -ENOENT && ret != -EOPNOTSUPP) {
101 DRM_DEV_ERROR(dev, "Cannot read speed-bin (%d).", ret);
102 return ret;
103 }
104
105 return 0;
106 }
107 DRM_DEV_DEBUG(dev, "Using speed-bin = 0x%x\n", val);
108
109 return devm_pm_opp_set_supported_hw(dev, versions: &val, count: 1);
110}
111
112int panfrost_devfreq_init(struct panfrost_device *pfdev)
113{
114 int ret;
115 struct dev_pm_opp *opp;
116 unsigned long cur_freq;
117 struct device *dev = &pfdev->pdev->dev;
118 struct devfreq *devfreq;
119 struct thermal_cooling_device *cooling;
120 struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
121 unsigned long freq = ULONG_MAX;
122
123 if (pfdev->comp->num_supplies > 1) {
124 /*
125 * GPUs with more than 1 supply require platform-specific handling:
126 * continue without devfreq
127 */
128 DRM_DEV_INFO(dev, "More than 1 supply is not supported yet\n");
129 return 0;
130 }
131
132 ret = panfrost_read_speedbin(dev);
133 if (ret)
134 return ret;
135
136 ret = devm_pm_opp_set_regulators(dev, names: pfdev->comp->supply_names);
137 if (ret) {
138 /* Continue if the optional regulator is missing */
139 if (ret != -ENODEV) {
140 if (ret != -EPROBE_DEFER)
141 DRM_DEV_ERROR(dev, "Couldn't set OPP regulators\n");
142 return ret;
143 }
144 }
145
146 ret = devm_pm_opp_of_add_table(dev);
147 if (ret) {
148 /* Optional, continue without devfreq */
149 if (ret == -ENODEV)
150 ret = 0;
151 return ret;
152 }
153 pfdevfreq->opp_of_table_added = true;
154
155 spin_lock_init(&pfdevfreq->lock);
156
157 panfrost_devfreq_reset(pfdevfreq);
158
159 cur_freq = clk_get_rate(clk: pfdev->clock);
160
161 opp = devfreq_recommended_opp(dev, freq: &cur_freq, flags: 0);
162 if (IS_ERR(ptr: opp))
163 return PTR_ERR(ptr: opp);
164
165 panfrost_devfreq_profile.initial_freq = cur_freq;
166
167 /*
168 * Set the recommend OPP this will enable and configure the regulator
169 * if any and will avoid a switch off by regulator_late_cleanup()
170 */
171 ret = dev_pm_opp_set_opp(dev, opp);
172 if (ret) {
173 DRM_DEV_ERROR(dev, "Couldn't set recommended OPP\n");
174 return ret;
175 }
176
177 /* Find the fastest defined rate */
178 opp = dev_pm_opp_find_freq_floor(dev, freq: &freq);
179 if (IS_ERR(ptr: opp))
180 return PTR_ERR(ptr: opp);
181 pfdevfreq->fast_rate = freq;
182
183 dev_pm_opp_put(opp);
184
185 /*
186 * Setup default thresholds for the simple_ondemand governor.
187 * The values are chosen based on experiments.
188 */
189 pfdevfreq->gov_data.upthreshold = 45;
190 pfdevfreq->gov_data.downdifferential = 5;
191
192 devfreq = devm_devfreq_add_device(dev, profile: &panfrost_devfreq_profile,
193 DEVFREQ_GOV_SIMPLE_ONDEMAND,
194 data: &pfdevfreq->gov_data);
195 if (IS_ERR(ptr: devfreq)) {
196 DRM_DEV_ERROR(dev, "Couldn't initialize GPU devfreq\n");
197 return PTR_ERR(ptr: devfreq);
198 }
199 pfdevfreq->devfreq = devfreq;
200
201 cooling = devfreq_cooling_em_register(df: devfreq, NULL);
202 if (IS_ERR(ptr: cooling))
203 DRM_DEV_INFO(dev, "Failed to register cooling device\n");
204 else
205 pfdevfreq->cooling = cooling;
206
207 return 0;
208}
209
210void panfrost_devfreq_fini(struct panfrost_device *pfdev)
211{
212 struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
213
214 if (pfdevfreq->cooling) {
215 devfreq_cooling_unregister(dfc: pfdevfreq->cooling);
216 pfdevfreq->cooling = NULL;
217 }
218}
219
220void panfrost_devfreq_resume(struct panfrost_device *pfdev)
221{
222 struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
223
224 if (!pfdevfreq->devfreq)
225 return;
226
227 panfrost_devfreq_reset(pfdevfreq);
228
229 devfreq_resume_device(devfreq: pfdevfreq->devfreq);
230}
231
232void panfrost_devfreq_suspend(struct panfrost_device *pfdev)
233{
234 struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
235
236 if (!pfdevfreq->devfreq)
237 return;
238
239 devfreq_suspend_device(devfreq: pfdevfreq->devfreq);
240}
241
242void panfrost_devfreq_record_busy(struct panfrost_devfreq *pfdevfreq)
243{
244 unsigned long irqflags;
245
246 if (!pfdevfreq->devfreq)
247 return;
248
249 spin_lock_irqsave(&pfdevfreq->lock, irqflags);
250
251 panfrost_devfreq_update_utilization(pfdevfreq);
252
253 pfdevfreq->busy_count++;
254
255 spin_unlock_irqrestore(lock: &pfdevfreq->lock, flags: irqflags);
256}
257
258void panfrost_devfreq_record_idle(struct panfrost_devfreq *pfdevfreq)
259{
260 unsigned long irqflags;
261
262 if (!pfdevfreq->devfreq)
263 return;
264
265 spin_lock_irqsave(&pfdevfreq->lock, irqflags);
266
267 panfrost_devfreq_update_utilization(pfdevfreq);
268
269 WARN_ON(--pfdevfreq->busy_count < 0);
270
271 spin_unlock_irqrestore(lock: &pfdevfreq->lock, flags: irqflags);
272}
273

source code of linux/drivers/gpu/drm/panfrost/panfrost_devfreq.c