1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * System Control and Management Interface (SCMI) Message SMC/HVC |
4 | * Transport driver |
5 | * |
6 | * Copyright 2020 NXP |
7 | */ |
8 | |
9 | #include <linux/arm-smccc.h> |
10 | #include <linux/atomic.h> |
11 | #include <linux/device.h> |
12 | #include <linux/err.h> |
13 | #include <linux/interrupt.h> |
14 | #include <linux/mutex.h> |
15 | #include <linux/of.h> |
16 | #include <linux/of_address.h> |
17 | #include <linux/of_irq.h> |
18 | #include <linux/limits.h> |
19 | #include <linux/processor.h> |
20 | #include <linux/slab.h> |
21 | |
22 | #include "common.h" |
23 | |
24 | /* |
25 | * The shmem address is split into 4K page and offset. |
26 | * This is to make sure the parameters fit in 32bit arguments of the |
27 | * smc/hvc call to keep it uniform across smc32/smc64 conventions. |
28 | * This however limits the shmem address to 44 bit. |
29 | * |
30 | * These optional parameters can be used to distinguish among multiple |
31 | * scmi instances that are using the same smc-id. |
32 | * The page parameter is passed in r1/x1/w1 register and the offset parameter |
33 | * is passed in r2/x2/w2 register. |
34 | */ |
35 | |
36 | #define SHMEM_SIZE (SZ_4K) |
37 | #define SHMEM_SHIFT 12 |
38 | #define SHMEM_PAGE(x) (_UL((x) >> SHMEM_SHIFT)) |
39 | #define SHMEM_OFFSET(x) ((x) & (SHMEM_SIZE - 1)) |
40 | |
41 | /** |
42 | * struct scmi_smc - Structure representing a SCMI smc transport |
43 | * |
44 | * @irq: An optional IRQ for completion |
45 | * @cinfo: SCMI channel info |
46 | * @shmem: Transmit/Receive shared memory area |
47 | * @shmem_lock: Lock to protect access to Tx/Rx shared memory area. |
48 | * Used when NOT operating in atomic mode. |
49 | * @inflight: Atomic flag to protect access to Tx/Rx shared memory area. |
50 | * Used when operating in atomic mode. |
51 | * @func_id: smc/hvc call function id |
52 | * @param_page: 4K page number of the shmem channel |
53 | * @param_offset: Offset within the 4K page of the shmem channel |
54 | * @cap_id: smc/hvc doorbell's capability id to be used on Qualcomm virtual |
55 | * platforms |
56 | */ |
57 | |
58 | struct scmi_smc { |
59 | int irq; |
60 | struct scmi_chan_info *cinfo; |
61 | struct scmi_shared_mem __iomem *shmem; |
62 | /* Protect access to shmem area */ |
63 | struct mutex shmem_lock; |
64 | #define INFLIGHT_NONE MSG_TOKEN_MAX |
65 | atomic_t inflight; |
66 | unsigned long func_id; |
67 | unsigned long param_page; |
68 | unsigned long param_offset; |
69 | unsigned long cap_id; |
70 | }; |
71 | |
72 | static irqreturn_t smc_msg_done_isr(int irq, void *data) |
73 | { |
74 | struct scmi_smc *scmi_info = data; |
75 | |
76 | scmi_rx_callback(cinfo: scmi_info->cinfo, |
77 | msg_hdr: shmem_read_header(shmem: scmi_info->shmem), NULL); |
78 | |
79 | return IRQ_HANDLED; |
80 | } |
81 | |
82 | static bool smc_chan_available(struct device_node *of_node, int idx) |
83 | { |
84 | struct device_node *np = of_parse_phandle(np: of_node, phandle_name: "shmem" , index: 0); |
85 | if (!np) |
86 | return false; |
87 | |
88 | of_node_put(node: np); |
89 | return true; |
90 | } |
91 | |
92 | static inline void smc_channel_lock_init(struct scmi_smc *scmi_info) |
93 | { |
94 | if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE)) |
95 | atomic_set(v: &scmi_info->inflight, INFLIGHT_NONE); |
96 | else |
97 | mutex_init(&scmi_info->shmem_lock); |
98 | } |
99 | |
100 | static bool smc_xfer_inflight(struct scmi_xfer *xfer, atomic_t *inflight) |
101 | { |
102 | int ret; |
103 | |
104 | ret = atomic_cmpxchg(v: inflight, INFLIGHT_NONE, new: xfer->hdr.seq); |
105 | |
106 | return ret == INFLIGHT_NONE; |
107 | } |
108 | |
109 | static inline void |
110 | smc_channel_lock_acquire(struct scmi_smc *scmi_info, |
111 | struct scmi_xfer *xfer __maybe_unused) |
112 | { |
113 | if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE)) |
114 | spin_until_cond(smc_xfer_inflight(xfer, &scmi_info->inflight)); |
115 | else |
116 | mutex_lock(&scmi_info->shmem_lock); |
117 | } |
118 | |
119 | static inline void smc_channel_lock_release(struct scmi_smc *scmi_info) |
120 | { |
121 | if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE)) |
122 | atomic_set(v: &scmi_info->inflight, INFLIGHT_NONE); |
123 | else |
124 | mutex_unlock(lock: &scmi_info->shmem_lock); |
125 | } |
126 | |
127 | static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, |
128 | bool tx) |
129 | { |
130 | struct device *cdev = cinfo->dev; |
131 | unsigned long cap_id = ULONG_MAX; |
132 | struct scmi_smc *scmi_info; |
133 | resource_size_t size; |
134 | struct resource res; |
135 | struct device_node *np; |
136 | u32 func_id; |
137 | int ret; |
138 | |
139 | if (!tx) |
140 | return -ENODEV; |
141 | |
142 | scmi_info = devm_kzalloc(dev, size: sizeof(*scmi_info), GFP_KERNEL); |
143 | if (!scmi_info) |
144 | return -ENOMEM; |
145 | |
146 | np = of_parse_phandle(np: cdev->of_node, phandle_name: "shmem" , index: 0); |
147 | if (!of_device_is_compatible(device: np, "arm,scmi-shmem" )) { |
148 | of_node_put(node: np); |
149 | return -ENXIO; |
150 | } |
151 | |
152 | ret = of_address_to_resource(dev: np, index: 0, r: &res); |
153 | of_node_put(node: np); |
154 | if (ret) { |
155 | dev_err(cdev, "failed to get SCMI Tx shared memory\n" ); |
156 | return ret; |
157 | } |
158 | |
159 | size = resource_size(res: &res); |
160 | scmi_info->shmem = devm_ioremap(dev, offset: res.start, size); |
161 | if (!scmi_info->shmem) { |
162 | dev_err(dev, "failed to ioremap SCMI Tx shared memory\n" ); |
163 | return -EADDRNOTAVAIL; |
164 | } |
165 | |
166 | ret = of_property_read_u32(np: dev->of_node, propname: "arm,smc-id" , out_value: &func_id); |
167 | if (ret < 0) |
168 | return ret; |
169 | |
170 | if (of_device_is_compatible(device: dev->of_node, "qcom,scmi-smc" )) { |
171 | void __iomem *ptr = (void __iomem *)scmi_info->shmem + size - 8; |
172 | /* The capability-id is kept in last 8 bytes of shmem. |
173 | * +-------+ <-- 0 |
174 | * | shmem | |
175 | * +-------+ <-- size - 8 |
176 | * | capId | |
177 | * +-------+ <-- size |
178 | */ |
179 | memcpy_fromio(&cap_id, ptr, sizeof(cap_id)); |
180 | } |
181 | |
182 | if (of_device_is_compatible(device: dev->of_node, "arm,scmi-smc-param" )) { |
183 | scmi_info->param_page = SHMEM_PAGE(res.start); |
184 | scmi_info->param_offset = SHMEM_OFFSET(res.start); |
185 | } |
186 | /* |
187 | * If there is an interrupt named "a2p", then the service and |
188 | * completion of a message is signaled by an interrupt rather than by |
189 | * the return of the SMC call. |
190 | */ |
191 | scmi_info->irq = of_irq_get_byname(dev: cdev->of_node, name: "a2p" ); |
192 | if (scmi_info->irq > 0) { |
193 | ret = request_irq(irq: scmi_info->irq, handler: smc_msg_done_isr, |
194 | IRQF_NO_SUSPEND, name: dev_name(dev), dev: scmi_info); |
195 | if (ret) { |
196 | dev_err(dev, "failed to setup SCMI smc irq\n" ); |
197 | return ret; |
198 | } |
199 | } else { |
200 | cinfo->no_completion_irq = true; |
201 | } |
202 | |
203 | scmi_info->func_id = func_id; |
204 | scmi_info->cap_id = cap_id; |
205 | scmi_info->cinfo = cinfo; |
206 | smc_channel_lock_init(scmi_info); |
207 | cinfo->transport_info = scmi_info; |
208 | |
209 | return 0; |
210 | } |
211 | |
212 | static int smc_chan_free(int id, void *p, void *data) |
213 | { |
214 | struct scmi_chan_info *cinfo = p; |
215 | struct scmi_smc *scmi_info = cinfo->transport_info; |
216 | |
217 | /* |
218 | * Different protocols might share the same chan info, so a previous |
219 | * smc_chan_free call might have already freed the structure. |
220 | */ |
221 | if (!scmi_info) |
222 | return 0; |
223 | |
224 | /* Ignore any possible further reception on the IRQ path */ |
225 | if (scmi_info->irq > 0) |
226 | free_irq(scmi_info->irq, scmi_info); |
227 | |
228 | cinfo->transport_info = NULL; |
229 | scmi_info->cinfo = NULL; |
230 | |
231 | return 0; |
232 | } |
233 | |
234 | static int smc_send_message(struct scmi_chan_info *cinfo, |
235 | struct scmi_xfer *xfer) |
236 | { |
237 | struct scmi_smc *scmi_info = cinfo->transport_info; |
238 | struct arm_smccc_res res; |
239 | |
240 | /* |
241 | * Channel will be released only once response has been |
242 | * surely fully retrieved, so after .mark_txdone() |
243 | */ |
244 | smc_channel_lock_acquire(scmi_info, xfer); |
245 | |
246 | shmem_tx_prepare(shmem: scmi_info->shmem, xfer, cinfo); |
247 | |
248 | if (scmi_info->cap_id != ULONG_MAX) |
249 | arm_smccc_1_1_invoke(scmi_info->func_id, scmi_info->cap_id, 0, |
250 | 0, 0, 0, 0, 0, &res); |
251 | else |
252 | arm_smccc_1_1_invoke(scmi_info->func_id, scmi_info->param_page, |
253 | scmi_info->param_offset, 0, 0, 0, 0, 0, |
254 | &res); |
255 | |
256 | /* Only SMCCC_RET_NOT_SUPPORTED is valid error code */ |
257 | if (res.a0) { |
258 | smc_channel_lock_release(scmi_info); |
259 | return -EOPNOTSUPP; |
260 | } |
261 | |
262 | return 0; |
263 | } |
264 | |
265 | static void smc_fetch_response(struct scmi_chan_info *cinfo, |
266 | struct scmi_xfer *xfer) |
267 | { |
268 | struct scmi_smc *scmi_info = cinfo->transport_info; |
269 | |
270 | shmem_fetch_response(shmem: scmi_info->shmem, xfer); |
271 | } |
272 | |
273 | static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret, |
274 | struct scmi_xfer *__unused) |
275 | { |
276 | struct scmi_smc *scmi_info = cinfo->transport_info; |
277 | |
278 | smc_channel_lock_release(scmi_info); |
279 | } |
280 | |
281 | static const struct scmi_transport_ops scmi_smc_ops = { |
282 | .chan_available = smc_chan_available, |
283 | .chan_setup = smc_chan_setup, |
284 | .chan_free = smc_chan_free, |
285 | .send_message = smc_send_message, |
286 | .mark_txdone = smc_mark_txdone, |
287 | .fetch_response = smc_fetch_response, |
288 | }; |
289 | |
290 | const struct scmi_desc scmi_smc_desc = { |
291 | .ops = &scmi_smc_ops, |
292 | .max_rx_timeout_ms = 30, |
293 | .max_msg = 20, |
294 | .max_msg_size = 128, |
295 | /* |
296 | * Setting .sync_cmds_atomic_replies to true for SMC assumes that, |
297 | * once the SMC instruction has completed successfully, the issued |
298 | * SCMI command would have been already fully processed by the SCMI |
299 | * platform firmware and so any possible response value expected |
300 | * for the issued command will be immmediately ready to be fetched |
301 | * from the shared memory area. |
302 | */ |
303 | .sync_cmds_completed_on_ret = true, |
304 | .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE), |
305 | }; |
306 | |