1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Qualcomm ICE (Inline Crypto Engine) support.
4 *
5 * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
6 * Copyright (c) 2019, Google LLC
7 * Copyright (c) 2023, Linaro Limited
8 */
9
10#include <linux/bitfield.h>
11#include <linux/clk.h>
12#include <linux/delay.h>
13#include <linux/iopoll.h>
14#include <linux/of.h>
15#include <linux/of_platform.h>
16#include <linux/platform_device.h>
17
18#include <linux/firmware/qcom/qcom_scm.h>
19
20#include <soc/qcom/ice.h>
21
22#define AES_256_XTS_KEY_SIZE 64
23
24/* QCOM ICE registers */
25#define QCOM_ICE_REG_VERSION 0x0008
26#define QCOM_ICE_REG_FUSE_SETTING 0x0010
27#define QCOM_ICE_REG_BIST_STATUS 0x0070
28#define QCOM_ICE_REG_ADVANCED_CONTROL 0x1000
29
30/* BIST ("built-in self-test") status flags */
31#define QCOM_ICE_BIST_STATUS_MASK GENMASK(31, 28)
32
33#define QCOM_ICE_FUSE_SETTING_MASK 0x1
34#define QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK 0x2
35#define QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK 0x4
36
37#define qcom_ice_writel(engine, val, reg) \
38 writel((val), (engine)->base + (reg))
39
40#define qcom_ice_readl(engine, reg) \
41 readl((engine)->base + (reg))
42
43struct qcom_ice {
44 struct device *dev;
45 void __iomem *base;
46 struct device_link *link;
47
48 struct clk *core_clk;
49};
50
51static bool qcom_ice_check_supported(struct qcom_ice *ice)
52{
53 u32 regval = qcom_ice_readl(ice, QCOM_ICE_REG_VERSION);
54 struct device *dev = ice->dev;
55 int major = FIELD_GET(GENMASK(31, 24), regval);
56 int minor = FIELD_GET(GENMASK(23, 16), regval);
57 int step = FIELD_GET(GENMASK(15, 0), regval);
58
59 /* For now this driver only supports ICE version 3 and 4. */
60 if (major != 3 && major != 4) {
61 dev_warn(dev, "Unsupported ICE version: v%d.%d.%d\n",
62 major, minor, step);
63 return false;
64 }
65
66 dev_info(dev, "Found QC Inline Crypto Engine (ICE) v%d.%d.%d\n",
67 major, minor, step);
68
69 /* If fuses are blown, ICE might not work in the standard way. */
70 regval = qcom_ice_readl(ice, QCOM_ICE_REG_FUSE_SETTING);
71 if (regval & (QCOM_ICE_FUSE_SETTING_MASK |
72 QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK |
73 QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK)) {
74 dev_warn(dev, "Fuses are blown; ICE is unusable!\n");
75 return false;
76 }
77
78 return true;
79}
80
81static void qcom_ice_low_power_mode_enable(struct qcom_ice *ice)
82{
83 u32 regval;
84
85 regval = qcom_ice_readl(ice, QCOM_ICE_REG_ADVANCED_CONTROL);
86
87 /* Enable low power mode sequence */
88 regval |= 0x7000;
89 qcom_ice_writel(ice, regval, QCOM_ICE_REG_ADVANCED_CONTROL);
90}
91
92static void qcom_ice_optimization_enable(struct qcom_ice *ice)
93{
94 u32 regval;
95
96 /* ICE Optimizations Enable Sequence */
97 regval = qcom_ice_readl(ice, QCOM_ICE_REG_ADVANCED_CONTROL);
98 regval |= 0xd807100;
99 /* ICE HPG requires delay before writing */
100 udelay(5);
101 qcom_ice_writel(ice, regval, QCOM_ICE_REG_ADVANCED_CONTROL);
102 udelay(5);
103}
104
105/*
106 * Wait until the ICE BIST (built-in self-test) has completed.
107 *
108 * This may be necessary before ICE can be used.
109 * Note that we don't really care whether the BIST passed or failed;
110 * we really just want to make sure that it isn't still running. This is
111 * because (a) the BIST is a FIPS compliance thing that never fails in
112 * practice, (b) ICE is documented to reject crypto requests if the BIST
113 * fails, so we needn't do it in software too, and (c) properly testing
114 * storage encryption requires testing the full storage stack anyway,
115 * and not relying on hardware-level self-tests.
116 */
117static int qcom_ice_wait_bist_status(struct qcom_ice *ice)
118{
119 u32 regval;
120 int err;
121
122 err = readl_poll_timeout(ice->base + QCOM_ICE_REG_BIST_STATUS,
123 regval, !(regval & QCOM_ICE_BIST_STATUS_MASK),
124 50, 5000);
125 if (err)
126 dev_err(ice->dev, "Timed out waiting for ICE self-test to complete\n");
127
128 return err;
129}
130
131int qcom_ice_enable(struct qcom_ice *ice)
132{
133 qcom_ice_low_power_mode_enable(ice);
134 qcom_ice_optimization_enable(ice);
135
136 return qcom_ice_wait_bist_status(ice);
137}
138EXPORT_SYMBOL_GPL(qcom_ice_enable);
139
140int qcom_ice_resume(struct qcom_ice *ice)
141{
142 struct device *dev = ice->dev;
143 int err;
144
145 err = clk_prepare_enable(clk: ice->core_clk);
146 if (err) {
147 dev_err(dev, "failed to enable core clock (%d)\n",
148 err);
149 return err;
150 }
151
152 return qcom_ice_wait_bist_status(ice);
153}
154EXPORT_SYMBOL_GPL(qcom_ice_resume);
155
156int qcom_ice_suspend(struct qcom_ice *ice)
157{
158 clk_disable_unprepare(clk: ice->core_clk);
159
160 return 0;
161}
162EXPORT_SYMBOL_GPL(qcom_ice_suspend);
163
164int qcom_ice_program_key(struct qcom_ice *ice,
165 u8 algorithm_id, u8 key_size,
166 const u8 crypto_key[], u8 data_unit_size,
167 int slot)
168{
169 struct device *dev = ice->dev;
170 union {
171 u8 bytes[AES_256_XTS_KEY_SIZE];
172 u32 words[AES_256_XTS_KEY_SIZE / sizeof(u32)];
173 } key;
174 int i;
175 int err;
176
177 /* Only AES-256-XTS has been tested so far. */
178 if (algorithm_id != QCOM_ICE_CRYPTO_ALG_AES_XTS ||
179 key_size != QCOM_ICE_CRYPTO_KEY_SIZE_256) {
180 dev_err_ratelimited(dev,
181 "Unhandled crypto capability; algorithm_id=%d, key_size=%d\n",
182 algorithm_id, key_size);
183 return -EINVAL;
184 }
185
186 memcpy(key.bytes, crypto_key, AES_256_XTS_KEY_SIZE);
187
188 /* The SCM call requires that the key words are encoded in big endian */
189 for (i = 0; i < ARRAY_SIZE(key.words); i++)
190 __cpu_to_be32s(&key.words[i]);
191
192 err = qcom_scm_ice_set_key(index: slot, key: key.bytes, AES_256_XTS_KEY_SIZE,
193 cipher: QCOM_SCM_ICE_CIPHER_AES_256_XTS,
194 data_unit_size);
195
196 memzero_explicit(s: &key, count: sizeof(key));
197
198 return err;
199}
200EXPORT_SYMBOL_GPL(qcom_ice_program_key);
201
202int qcom_ice_evict_key(struct qcom_ice *ice, int slot)
203{
204 return qcom_scm_ice_invalidate_key(index: slot);
205}
206EXPORT_SYMBOL_GPL(qcom_ice_evict_key);
207
208static struct qcom_ice *qcom_ice_create(struct device *dev,
209 void __iomem *base)
210{
211 struct qcom_ice *engine;
212
213 if (!qcom_scm_is_available())
214 return ERR_PTR(error: -EPROBE_DEFER);
215
216 if (!qcom_scm_ice_available()) {
217 dev_warn(dev, "ICE SCM interface not found\n");
218 return NULL;
219 }
220
221 engine = devm_kzalloc(dev, size: sizeof(*engine), GFP_KERNEL);
222 if (!engine)
223 return ERR_PTR(error: -ENOMEM);
224
225 engine->dev = dev;
226 engine->base = base;
227
228 /*
229 * Legacy DT binding uses different clk names for each consumer,
230 * so lets try those first. If none of those are a match, it means
231 * the we only have one clock and it is part of the dedicated DT node.
232 * Also, enable the clock before we check what HW version the driver
233 * supports.
234 */
235 engine->core_clk = devm_clk_get_optional_enabled(dev, id: "ice_core_clk");
236 if (!engine->core_clk)
237 engine->core_clk = devm_clk_get_optional_enabled(dev, id: "ice");
238 if (!engine->core_clk)
239 engine->core_clk = devm_clk_get_enabled(dev, NULL);
240 if (IS_ERR(ptr: engine->core_clk))
241 return ERR_CAST(ptr: engine->core_clk);
242
243 if (!qcom_ice_check_supported(ice: engine))
244 return ERR_PTR(error: -EOPNOTSUPP);
245
246 dev_dbg(dev, "Registered Qualcomm Inline Crypto Engine\n");
247
248 return engine;
249}
250
251/**
252 * of_qcom_ice_get() - get an ICE instance from a DT node
253 * @dev: device pointer for the consumer device
254 *
255 * This function will provide an ICE instance either by creating one for the
256 * consumer device if its DT node provides the 'ice' reg range and the 'ice'
257 * clock (for legacy DT style). On the other hand, if consumer provides a
258 * phandle via 'qcom,ice' property to an ICE DT, the ICE instance will already
259 * be created and so this function will return that instead.
260 *
261 * Return: ICE pointer on success, NULL if there is no ICE data provided by the
262 * consumer or ERR_PTR() on error.
263 */
264struct qcom_ice *of_qcom_ice_get(struct device *dev)
265{
266 struct platform_device *pdev = to_platform_device(dev);
267 struct qcom_ice *ice;
268 struct device_node *node;
269 struct resource *res;
270 void __iomem *base;
271
272 if (!dev || !dev->of_node)
273 return ERR_PTR(error: -ENODEV);
274
275 /*
276 * In order to support legacy style devicetree bindings, we need
277 * to create the ICE instance using the consumer device and the reg
278 * range called 'ice' it provides.
279 */
280 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ice");
281 if (res) {
282 base = devm_ioremap_resource(dev: &pdev->dev, res);
283 if (IS_ERR(ptr: base))
284 return ERR_CAST(ptr: base);
285
286 /* create ICE instance using consumer dev */
287 return qcom_ice_create(dev: &pdev->dev, base);
288 }
289
290 /*
291 * If the consumer node does not provider an 'ice' reg range
292 * (legacy DT binding), then it must at least provide a phandle
293 * to the ICE devicetree node, otherwise ICE is not supported.
294 */
295 node = of_parse_phandle(np: dev->of_node, phandle_name: "qcom,ice", index: 0);
296 if (!node)
297 return NULL;
298
299 pdev = of_find_device_by_node(np: node);
300 if (!pdev) {
301 dev_err(dev, "Cannot find device node %s\n", node->name);
302 ice = ERR_PTR(error: -EPROBE_DEFER);
303 goto out;
304 }
305
306 ice = platform_get_drvdata(pdev);
307 if (!ice) {
308 dev_err(dev, "Cannot get ice instance from %s\n",
309 dev_name(&pdev->dev));
310 platform_device_put(pdev);
311 ice = ERR_PTR(error: -EPROBE_DEFER);
312 goto out;
313 }
314
315 ice->link = device_link_add(consumer: dev, supplier: &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER);
316 if (!ice->link) {
317 dev_err(&pdev->dev,
318 "Failed to create device link to consumer %s\n",
319 dev_name(dev));
320 platform_device_put(pdev);
321 ice = ERR_PTR(error: -EINVAL);
322 }
323
324out:
325 of_node_put(node);
326
327 return ice;
328}
329EXPORT_SYMBOL_GPL(of_qcom_ice_get);
330
331static int qcom_ice_probe(struct platform_device *pdev)
332{
333 struct qcom_ice *engine;
334 void __iomem *base;
335
336 base = devm_platform_ioremap_resource(pdev, index: 0);
337 if (IS_ERR(ptr: base)) {
338 dev_warn(&pdev->dev, "ICE registers not found\n");
339 return PTR_ERR(ptr: base);
340 }
341
342 engine = qcom_ice_create(dev: &pdev->dev, base);
343 if (IS_ERR(ptr: engine))
344 return PTR_ERR(ptr: engine);
345
346 platform_set_drvdata(pdev, data: engine);
347
348 return 0;
349}
350
351static const struct of_device_id qcom_ice_of_match_table[] = {
352 { .compatible = "qcom,inline-crypto-engine" },
353 { },
354};
355MODULE_DEVICE_TABLE(of, qcom_ice_of_match_table);
356
357static struct platform_driver qcom_ice_driver = {
358 .probe = qcom_ice_probe,
359 .driver = {
360 .name = "qcom-ice",
361 .of_match_table = qcom_ice_of_match_table,
362 },
363};
364
365module_platform_driver(qcom_ice_driver);
366
367MODULE_DESCRIPTION("Qualcomm Inline Crypto Engine driver");
368MODULE_LICENSE("GPL");
369

source code of linux/drivers/soc/qcom/ice.c