1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. */
3
4#include <linux/io.h>
5#include <linux/kernel.h>
6#include <linux/math64.h>
7#include <linux/mhi.h>
8#include <linux/mod_devicetable.h>
9#include <linux/module.h>
10#include <linux/time64.h>
11#include <linux/timer.h>
12
13#include "qaic.h"
14#include "qaic_timesync.h"
15
16#define QTIMER_REG_OFFSET 0xa28
17#define QAIC_TIMESYNC_SIGNATURE 0x55aa
18#define QAIC_CONV_QTIMER_TO_US(qtimer) (mul_u64_u32_div(qtimer, 10, 192))
19
20static unsigned int timesync_delay_ms = 1000; /* 1 sec default */
21module_param(timesync_delay_ms, uint, 0600);
22MODULE_PARM_DESC(timesync_delay_ms, "Delay in ms between two consecutive timesync operations");
23
24enum qts_msg_type {
25 QAIC_TS_CMD_TO_HOST,
26 QAIC_TS_SYNC_REQ,
27 QAIC_TS_ACK_TO_HOST,
28 QAIC_TS_MSG_TYPE_MAX
29};
30
31/**
32 * struct qts_hdr - Timesync message header structure.
33 * @signature: Unique signature to identify the timesync message.
34 * @reserved_1: Reserved for future use.
35 * @reserved_2: Reserved for future use.
36 * @msg_type: sub-type of the timesync message.
37 * @reserved_3: Reserved for future use.
38 */
39struct qts_hdr {
40 __le16 signature;
41 __le16 reserved_1;
42 u8 reserved_2;
43 u8 msg_type;
44 __le16 reserved_3;
45} __packed;
46
47/**
48 * struct qts_timeval - Structure to carry time information.
49 * @tv_sec: Seconds part of the time.
50 * @tv_usec: uS (microseconds) part of the time.
51 */
52struct qts_timeval {
53 __le64 tv_sec;
54 __le64 tv_usec;
55} __packed;
56
57/**
58 * struct qts_host_time_sync_msg_data - Structure to denote the timesync message.
59 * @header: Header of the timesync message.
60 * @data: Time information.
61 */
62struct qts_host_time_sync_msg_data {
63 struct qts_hdr header;
64 struct qts_timeval data;
65} __packed;
66
67/**
68 * struct mqts_dev - MHI QAIC Timesync Control device.
69 * @qdev: Pointer to the root device struct driven by QAIC driver.
70 * @mhi_dev: Pointer to associated MHI device.
71 * @timer: Timer handle used for timesync.
72 * @qtimer_addr: Device QTimer register pointer.
73 * @buff_in_use: atomic variable to track if the sync_msg buffer is in use.
74 * @dev: Device pointer to qdev->pdev->dev stored for easy access.
75 * @sync_msg: Buffer used to send timesync message over MHI.
76 */
77struct mqts_dev {
78 struct qaic_device *qdev;
79 struct mhi_device *mhi_dev;
80 struct timer_list timer;
81 void __iomem *qtimer_addr;
82 atomic_t buff_in_use;
83 struct device *dev;
84 struct qts_host_time_sync_msg_data *sync_msg;
85};
86
87struct qts_resp_msg {
88 struct qts_hdr hdr;
89} __packed;
90
91struct qts_resp {
92 struct qts_resp_msg data;
93 struct work_struct work;
94 struct qaic_device *qdev;
95};
96
97#ifdef readq
98static u64 read_qtimer(const volatile void __iomem *addr)
99{
100 return readq(addr);
101}
102#else
103static u64 read_qtimer(const volatile void __iomem *addr)
104{
105 u64 low, high;
106
107 low = readl(addr);
108 high = readl(addr + sizeof(u32));
109 return low | (high << 32);
110}
111#endif
112
113static void qaic_timesync_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
114{
115 struct mqts_dev *mqtsdev = dev_get_drvdata(dev: &mhi_dev->dev);
116
117 dev_dbg(mqtsdev->dev, "%s status: %d xfer_len: %zu\n", __func__,
118 mhi_result->transaction_status, mhi_result->bytes_xferd);
119
120 atomic_set(v: &mqtsdev->buff_in_use, i: 0);
121}
122
123static void qaic_timesync_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
124{
125 struct mqts_dev *mqtsdev = dev_get_drvdata(dev: &mhi_dev->dev);
126
127 dev_err(mqtsdev->dev, "%s no data expected on dl channel\n", __func__);
128}
129
130static void qaic_timesync_timer(struct timer_list *t)
131{
132 struct mqts_dev *mqtsdev = from_timer(mqtsdev, t, timer);
133 struct qts_host_time_sync_msg_data *sync_msg;
134 u64 device_qtimer_us;
135 u64 device_qtimer;
136 u64 host_time_us;
137 u64 offset_us;
138 u64 host_sec;
139 int ret;
140
141 if (atomic_read(v: &mqtsdev->buff_in_use)) {
142 dev_dbg(mqtsdev->dev, "%s buffer not free, schedule next cycle\n", __func__);
143 goto mod_timer;
144 }
145 atomic_set(v: &mqtsdev->buff_in_use, i: 1);
146
147 sync_msg = mqtsdev->sync_msg;
148 sync_msg->header.signature = cpu_to_le16(QAIC_TIMESYNC_SIGNATURE);
149 sync_msg->header.msg_type = QAIC_TS_SYNC_REQ;
150 /* Read host UTC time and convert to uS*/
151 host_time_us = div_u64(dividend: ktime_get_real_ns(), NSEC_PER_USEC);
152 device_qtimer = read_qtimer(addr: mqtsdev->qtimer_addr);
153 device_qtimer_us = QAIC_CONV_QTIMER_TO_US(device_qtimer);
154 /* Offset between host UTC and device time */
155 offset_us = host_time_us - device_qtimer_us;
156
157 host_sec = div_u64(dividend: offset_us, USEC_PER_SEC);
158 sync_msg->data.tv_usec = cpu_to_le64(offset_us - host_sec * USEC_PER_SEC);
159 sync_msg->data.tv_sec = cpu_to_le64(host_sec);
160 ret = mhi_queue_buf(mhi_dev: mqtsdev->mhi_dev, dir: DMA_TO_DEVICE, buf: sync_msg, len: sizeof(*sync_msg), mflags: MHI_EOT);
161 if (ret && (ret != -EAGAIN)) {
162 dev_err(mqtsdev->dev, "%s unable to queue to mhi:%d\n", __func__, ret);
163 return;
164 } else if (ret == -EAGAIN) {
165 atomic_set(v: &mqtsdev->buff_in_use, i: 0);
166 }
167
168mod_timer:
169 ret = mod_timer(timer: t, expires: jiffies + msecs_to_jiffies(m: timesync_delay_ms));
170 if (ret)
171 dev_err(mqtsdev->dev, "%s mod_timer error:%d\n", __func__, ret);
172}
173
174static int qaic_timesync_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
175{
176 struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
177 struct mqts_dev *mqtsdev;
178 struct timer_list *timer;
179 int ret;
180
181 mqtsdev = kzalloc(size: sizeof(*mqtsdev), GFP_KERNEL);
182 if (!mqtsdev) {
183 ret = -ENOMEM;
184 goto out;
185 }
186
187 timer = &mqtsdev->timer;
188 mqtsdev->mhi_dev = mhi_dev;
189 mqtsdev->qdev = qdev;
190 mqtsdev->dev = &qdev->pdev->dev;
191
192 mqtsdev->sync_msg = kzalloc(size: sizeof(*mqtsdev->sync_msg), GFP_KERNEL);
193 if (!mqtsdev->sync_msg) {
194 ret = -ENOMEM;
195 goto free_mqts_dev;
196 }
197 atomic_set(v: &mqtsdev->buff_in_use, i: 0);
198
199 ret = mhi_prepare_for_transfer(mhi_dev);
200 if (ret)
201 goto free_sync_msg;
202
203 /* Qtimer register pointer */
204 mqtsdev->qtimer_addr = qdev->bar_0 + QTIMER_REG_OFFSET;
205 timer_setup(timer, qaic_timesync_timer, 0);
206 timer->expires = jiffies + msecs_to_jiffies(m: timesync_delay_ms);
207 add_timer(timer);
208 dev_set_drvdata(dev: &mhi_dev->dev, data: mqtsdev);
209
210 return 0;
211
212free_sync_msg:
213 kfree(objp: mqtsdev->sync_msg);
214free_mqts_dev:
215 kfree(objp: mqtsdev);
216out:
217 return ret;
218};
219
220static void qaic_timesync_remove(struct mhi_device *mhi_dev)
221{
222 struct mqts_dev *mqtsdev = dev_get_drvdata(dev: &mhi_dev->dev);
223
224 del_timer_sync(timer: &mqtsdev->timer);
225 mhi_unprepare_from_transfer(mhi_dev: mqtsdev->mhi_dev);
226 kfree(objp: mqtsdev->sync_msg);
227 kfree(objp: mqtsdev);
228}
229
230static const struct mhi_device_id qaic_timesync_match_table[] = {
231 { .chan = "QAIC_TIMESYNC_PERIODIC"},
232 {},
233};
234
235MODULE_DEVICE_TABLE(mhi, qaic_timesync_match_table);
236
237static struct mhi_driver qaic_timesync_driver = {
238 .id_table = qaic_timesync_match_table,
239 .remove = qaic_timesync_remove,
240 .probe = qaic_timesync_probe,
241 .ul_xfer_cb = qaic_timesync_ul_xfer_cb,
242 .dl_xfer_cb = qaic_timesync_dl_xfer_cb,
243 .driver = {
244 .name = "qaic_timesync_periodic",
245 },
246};
247
248static void qaic_boot_timesync_worker(struct work_struct *work)
249{
250 struct qts_resp *resp = container_of(work, struct qts_resp, work);
251 struct qts_host_time_sync_msg_data *req;
252 struct qts_resp_msg data = resp->data;
253 struct qaic_device *qdev = resp->qdev;
254 struct mhi_device *mhi_dev;
255 struct timespec64 ts;
256 int ret;
257
258 mhi_dev = qdev->qts_ch;
259 /* Queue the response message beforehand to avoid race conditions */
260 ret = mhi_queue_buf(mhi_dev, dir: DMA_FROM_DEVICE, buf: &resp->data, len: sizeof(resp->data), mflags: MHI_EOT);
261 if (ret) {
262 kfree(objp: resp);
263 dev_warn(&mhi_dev->dev, "Failed to re-queue response buffer %d\n", ret);
264 return;
265 }
266
267 switch (data.hdr.msg_type) {
268 case QAIC_TS_CMD_TO_HOST:
269 req = kzalloc(size: sizeof(*req), GFP_KERNEL);
270 if (!req)
271 break;
272
273 req->header = data.hdr;
274 req->header.msg_type = QAIC_TS_SYNC_REQ;
275 ktime_get_real_ts64(tv: &ts);
276 req->data.tv_sec = cpu_to_le64(ts.tv_sec);
277 req->data.tv_usec = cpu_to_le64(div_u64(ts.tv_nsec, NSEC_PER_USEC));
278
279 ret = mhi_queue_buf(mhi_dev, dir: DMA_TO_DEVICE, buf: req, len: sizeof(*req), mflags: MHI_EOT);
280 if (ret) {
281 kfree(objp: req);
282 dev_dbg(&mhi_dev->dev, "Failed to send request message. Error %d\n", ret);
283 }
284 break;
285 case QAIC_TS_ACK_TO_HOST:
286 dev_dbg(&mhi_dev->dev, "ACK received from device\n");
287 break;
288 default:
289 dev_err(&mhi_dev->dev, "Invalid message type %u.\n", data.hdr.msg_type);
290 }
291}
292
293static int qaic_boot_timesync_queue_resp(struct mhi_device *mhi_dev, struct qaic_device *qdev)
294{
295 struct qts_resp *resp;
296 int ret;
297
298 resp = kzalloc(size: sizeof(*resp), GFP_KERNEL);
299 if (!resp)
300 return -ENOMEM;
301
302 resp->qdev = qdev;
303 INIT_WORK(&resp->work, qaic_boot_timesync_worker);
304
305 ret = mhi_queue_buf(mhi_dev, dir: DMA_FROM_DEVICE, buf: &resp->data, len: sizeof(resp->data), mflags: MHI_EOT);
306 if (ret) {
307 kfree(objp: resp);
308 dev_warn(&mhi_dev->dev, "Failed to queue response buffer %d\n", ret);
309 return ret;
310 }
311
312 return 0;
313}
314
315static void qaic_boot_timesync_remove(struct mhi_device *mhi_dev)
316{
317 struct qaic_device *qdev;
318
319 qdev = dev_get_drvdata(dev: &mhi_dev->dev);
320 mhi_unprepare_from_transfer(mhi_dev: qdev->qts_ch);
321 qdev->qts_ch = NULL;
322}
323
324static int qaic_boot_timesync_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
325{
326 struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
327 int ret;
328
329 ret = mhi_prepare_for_transfer(mhi_dev);
330 if (ret)
331 return ret;
332
333 qdev->qts_ch = mhi_dev;
334 dev_set_drvdata(dev: &mhi_dev->dev, data: qdev);
335
336 ret = qaic_boot_timesync_queue_resp(mhi_dev, qdev);
337 if (ret) {
338 dev_set_drvdata(dev: &mhi_dev->dev, NULL);
339 qdev->qts_ch = NULL;
340 mhi_unprepare_from_transfer(mhi_dev);
341 }
342
343 return ret;
344}
345
346static void qaic_boot_timesync_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
347{
348 kfree(objp: mhi_result->buf_addr);
349}
350
351static void qaic_boot_timesync_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
352{
353 struct qts_resp *resp = container_of(mhi_result->buf_addr, struct qts_resp, data);
354
355 if (mhi_result->transaction_status || mhi_result->bytes_xferd != sizeof(resp->data)) {
356 kfree(objp: resp);
357 return;
358 }
359
360 queue_work(wq: resp->qdev->qts_wq, work: &resp->work);
361}
362
363static const struct mhi_device_id qaic_boot_timesync_match_table[] = {
364 { .chan = "QAIC_TIMESYNC"},
365 {},
366};
367
368static struct mhi_driver qaic_boot_timesync_driver = {
369 .id_table = qaic_boot_timesync_match_table,
370 .remove = qaic_boot_timesync_remove,
371 .probe = qaic_boot_timesync_probe,
372 .ul_xfer_cb = qaic_boot_timesync_ul_xfer_cb,
373 .dl_xfer_cb = qaic_boot_timesync_dl_xfer_cb,
374 .driver = {
375 .name = "qaic_timesync",
376 },
377};
378
379int qaic_timesync_init(void)
380{
381 int ret;
382
383 ret = mhi_driver_register(&qaic_timesync_driver);
384 if (ret)
385 return ret;
386 ret = mhi_driver_register(&qaic_boot_timesync_driver);
387
388 return ret;
389}
390
391void qaic_timesync_deinit(void)
392{
393 mhi_driver_unregister(mhi_drv: &qaic_boot_timesync_driver);
394 mhi_driver_unregister(mhi_drv: &qaic_timesync_driver);
395}
396

source code of linux/drivers/accel/qaic/qaic_timesync.c