1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2023, Intel Corporation.
4 * Intel Visual Sensing Controller Transport Layer Linux driver
5 */
6
7#include <linux/acpi.h>
8#include <linux/cleanup.h>
9#include <linux/crc32.h>
10#include <linux/delay.h>
11#include <linux/device.h>
12#include <linux/interrupt.h>
13#include <linux/iopoll.h>
14#include <linux/irq.h>
15#include <linux/irqreturn.h>
16#include <linux/module.h>
17#include <linux/mutex.h>
18#include <linux/platform_device.h>
19#include <linux/spi/spi.h>
20#include <linux/types.h>
21
22#include "vsc-tp.h"
23
24#define VSC_TP_RESET_PIN_TOGGLE_INTERVAL_MS 20
25#define VSC_TP_ROM_BOOTUP_DELAY_MS 10
26#define VSC_TP_ROM_XFER_POLL_TIMEOUT_US (500 * USEC_PER_MSEC)
27#define VSC_TP_ROM_XFER_POLL_DELAY_US (20 * USEC_PER_MSEC)
28#define VSC_TP_WAIT_FW_POLL_TIMEOUT (2 * HZ)
29#define VSC_TP_WAIT_FW_POLL_DELAY_US (20 * USEC_PER_MSEC)
30#define VSC_TP_MAX_XFER_COUNT 5
31
32#define VSC_TP_PACKET_SYNC 0x31
33#define VSC_TP_CRC_SIZE sizeof(u32)
34#define VSC_TP_MAX_MSG_SIZE 2048
35/* SPI xfer timeout size */
36#define VSC_TP_XFER_TIMEOUT_BYTES 700
37#define VSC_TP_PACKET_PADDING_SIZE 1
38#define VSC_TP_PACKET_SIZE(pkt) \
39 (sizeof(struct vsc_tp_packet) + le16_to_cpu((pkt)->len) + VSC_TP_CRC_SIZE)
40#define VSC_TP_MAX_PACKET_SIZE \
41 (sizeof(struct vsc_tp_packet) + VSC_TP_MAX_MSG_SIZE + VSC_TP_CRC_SIZE)
42#define VSC_TP_MAX_XFER_SIZE \
43 (VSC_TP_MAX_PACKET_SIZE + VSC_TP_XFER_TIMEOUT_BYTES)
44#define VSC_TP_NEXT_XFER_LEN(len, offset) \
45 (len + sizeof(struct vsc_tp_packet) + VSC_TP_CRC_SIZE - offset + VSC_TP_PACKET_PADDING_SIZE)
46
47struct vsc_tp_packet {
48 __u8 sync;
49 __u8 cmd;
50 __le16 len;
51 __le32 seq;
52 __u8 buf[] __counted_by(len);
53};
54
55struct vsc_tp {
56 /* do the actual data transfer */
57 struct spi_device *spi;
58
59 /* bind with mei framework */
60 struct platform_device *pdev;
61
62 struct gpio_desc *wakeuphost;
63 struct gpio_desc *resetfw;
64 struct gpio_desc *wakeupfw;
65
66 /* command sequence number */
67 u32 seq;
68
69 /* command buffer */
70 void *tx_buf;
71 void *rx_buf;
72
73 atomic_t assert_cnt;
74 wait_queue_head_t xfer_wait;
75
76 vsc_tp_event_cb_t event_notify;
77 void *event_notify_context;
78
79 /* used to protect command download */
80 struct mutex mutex;
81};
82
83/* GPIO resources */
84static const struct acpi_gpio_params wakeuphost_gpio = { 0, 0, false };
85static const struct acpi_gpio_params wakeuphostint_gpio = { 1, 0, false };
86static const struct acpi_gpio_params resetfw_gpio = { 2, 0, false };
87static const struct acpi_gpio_params wakeupfw = { 3, 0, false };
88
89static const struct acpi_gpio_mapping vsc_tp_acpi_gpios[] = {
90 { "wakeuphost-gpios", &wakeuphost_gpio, 1 },
91 { "wakeuphostint-gpios", &wakeuphostint_gpio, 1 },
92 { "resetfw-gpios", &resetfw_gpio, 1 },
93 { "wakeupfw-gpios", &wakeupfw, 1 },
94 {}
95};
96
97static irqreturn_t vsc_tp_isr(int irq, void *data)
98{
99 struct vsc_tp *tp = data;
100
101 atomic_inc(v: &tp->assert_cnt);
102
103 wake_up(&tp->xfer_wait);
104
105 return IRQ_WAKE_THREAD;
106}
107
108static irqreturn_t vsc_tp_thread_isr(int irq, void *data)
109{
110 struct vsc_tp *tp = data;
111
112 if (tp->event_notify)
113 tp->event_notify(tp->event_notify_context);
114
115 return IRQ_HANDLED;
116}
117
118/* wakeup firmware and wait for response */
119static int vsc_tp_wakeup_request(struct vsc_tp *tp)
120{
121 int ret;
122
123 gpiod_set_value_cansleep(desc: tp->wakeupfw, value: 0);
124
125 ret = wait_event_timeout(tp->xfer_wait,
126 atomic_read(&tp->assert_cnt),
127 VSC_TP_WAIT_FW_POLL_TIMEOUT);
128 if (!ret)
129 return -ETIMEDOUT;
130
131 return read_poll_timeout(gpiod_get_value_cansleep, ret, ret,
132 VSC_TP_WAIT_FW_POLL_DELAY_US,
133 VSC_TP_WAIT_FW_POLL_TIMEOUT, false,
134 tp->wakeuphost);
135}
136
137static void vsc_tp_wakeup_release(struct vsc_tp *tp)
138{
139 atomic_dec_if_positive(v: &tp->assert_cnt);
140
141 gpiod_set_value_cansleep(desc: tp->wakeupfw, value: 1);
142}
143
144static int vsc_tp_dev_xfer(struct vsc_tp *tp, void *obuf, void *ibuf, size_t len)
145{
146 struct spi_message msg = { 0 };
147 struct spi_transfer xfer = {
148 .tx_buf = obuf,
149 .rx_buf = ibuf,
150 .len = len,
151 };
152
153 spi_message_init_with_transfers(m: &msg, xfers: &xfer, num_xfers: 1);
154
155 return spi_sync_locked(spi: tp->spi, message: &msg);
156}
157
158static int vsc_tp_xfer_helper(struct vsc_tp *tp, struct vsc_tp_packet *pkt,
159 void *ibuf, u16 ilen)
160{
161 int ret, offset = 0, cpy_len, src_len, dst_len = sizeof(struct vsc_tp_packet);
162 int next_xfer_len = VSC_TP_PACKET_SIZE(pkt) + VSC_TP_XFER_TIMEOUT_BYTES;
163 u8 *src, *crc_src, *rx_buf = tp->rx_buf;
164 int count_down = VSC_TP_MAX_XFER_COUNT;
165 u32 recv_crc = 0, crc = ~0;
166 struct vsc_tp_packet ack;
167 u8 *dst = (u8 *)&ack;
168 bool synced = false;
169
170 do {
171 ret = vsc_tp_dev_xfer(tp, obuf: pkt, ibuf: rx_buf, len: next_xfer_len);
172 if (ret)
173 return ret;
174 memset(pkt, 0, VSC_TP_MAX_XFER_SIZE);
175
176 if (synced) {
177 src = rx_buf;
178 src_len = next_xfer_len;
179 } else {
180 src = memchr(p: rx_buf, VSC_TP_PACKET_SYNC, size: next_xfer_len);
181 if (!src)
182 continue;
183 synced = true;
184 src_len = next_xfer_len - (src - rx_buf);
185 }
186
187 /* traverse received data */
188 while (src_len > 0) {
189 cpy_len = min(src_len, dst_len);
190 memcpy(dst, src, cpy_len);
191 crc_src = src;
192 src += cpy_len;
193 src_len -= cpy_len;
194 dst += cpy_len;
195 dst_len -= cpy_len;
196
197 if (offset < sizeof(ack)) {
198 offset += cpy_len;
199 crc = crc32(crc, crc_src, cpy_len);
200
201 if (!src_len)
202 continue;
203
204 if (le16_to_cpu(ack.len)) {
205 dst = ibuf;
206 dst_len = min(ilen, le16_to_cpu(ack.len));
207 } else {
208 dst = (u8 *)&recv_crc;
209 dst_len = sizeof(recv_crc);
210 }
211 } else if (offset < sizeof(ack) + le16_to_cpu(ack.len)) {
212 offset += cpy_len;
213 crc = crc32(crc, crc_src, cpy_len);
214
215 if (src_len) {
216 int remain = sizeof(ack) + le16_to_cpu(ack.len) - offset;
217
218 cpy_len = min(src_len, remain);
219 offset += cpy_len;
220 crc = crc32(crc, src, cpy_len);
221 src += cpy_len;
222 src_len -= cpy_len;
223 if (src_len) {
224 dst = (u8 *)&recv_crc;
225 dst_len = sizeof(recv_crc);
226 continue;
227 }
228 }
229 next_xfer_len = VSC_TP_NEXT_XFER_LEN(le16_to_cpu(ack.len), offset);
230 } else if (offset < sizeof(ack) + le16_to_cpu(ack.len) + VSC_TP_CRC_SIZE) {
231 offset += cpy_len;
232
233 if (src_len) {
234 /* terminate the traverse */
235 next_xfer_len = 0;
236 break;
237 }
238 next_xfer_len = VSC_TP_NEXT_XFER_LEN(le16_to_cpu(ack.len), offset);
239 }
240 }
241 } while (next_xfer_len > 0 && --count_down);
242
243 if (next_xfer_len > 0)
244 return -EAGAIN;
245
246 if (~recv_crc != crc || le32_to_cpu(ack.seq) != tp->seq) {
247 dev_err(&tp->spi->dev, "recv crc or seq error\n");
248 return -EINVAL;
249 }
250
251 if (ack.cmd == VSC_TP_CMD_ACK || ack.cmd == VSC_TP_CMD_NACK ||
252 ack.cmd == VSC_TP_CMD_BUSY) {
253 dev_err(&tp->spi->dev, "recv cmd ack error\n");
254 return -EAGAIN;
255 }
256
257 return min(le16_to_cpu(ack.len), ilen);
258}
259
260/**
261 * vsc_tp_xfer - transfer data to firmware
262 * @tp: vsc_tp device handle
263 * @cmd: the command to be sent to the device
264 * @obuf: the tx buffer to be sent to the device
265 * @olen: the length of tx buffer
266 * @ibuf: the rx buffer to receive from the device
267 * @ilen: the length of rx buffer
268 * Return: the length of received data in case of success,
269 * otherwise negative value
270 */
271int vsc_tp_xfer(struct vsc_tp *tp, u8 cmd, const void *obuf, size_t olen,
272 void *ibuf, size_t ilen)
273{
274 struct vsc_tp_packet *pkt = tp->tx_buf;
275 u32 crc;
276 int ret;
277
278 if (!obuf || !ibuf || olen > VSC_TP_MAX_MSG_SIZE)
279 return -EINVAL;
280
281 guard(mutex)(T: &tp->mutex);
282
283 pkt->sync = VSC_TP_PACKET_SYNC;
284 pkt->cmd = cmd;
285 pkt->len = cpu_to_le16(olen);
286 pkt->seq = cpu_to_le32(++tp->seq);
287 memcpy(pkt->buf, obuf, olen);
288
289 crc = ~crc32(~0, (u8 *)pkt, sizeof(pkt) + olen);
290 memcpy(pkt->buf + olen, &crc, sizeof(crc));
291
292 ret = vsc_tp_wakeup_request(tp);
293 if (unlikely(ret))
294 dev_err(&tp->spi->dev, "wakeup firmware failed ret: %d\n", ret);
295 else
296 ret = vsc_tp_xfer_helper(tp, pkt, ibuf, ilen);
297
298 vsc_tp_wakeup_release(tp);
299
300 return ret;
301}
302EXPORT_SYMBOL_NS_GPL(vsc_tp_xfer, VSC_TP);
303
304/**
305 * vsc_tp_rom_xfer - transfer data to rom code
306 * @tp: vsc_tp device handle
307 * @obuf: the data buffer to be sent to the device
308 * @ibuf: the buffer to receive data from the device
309 * @len: the length of tx buffer and rx buffer
310 * Return: 0 in case of success, negative value in case of error
311 */
312int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, size_t len)
313{
314 size_t words = len / sizeof(__be32);
315 int ret;
316
317 if (len % sizeof(__be32) || len > VSC_TP_MAX_MSG_SIZE)
318 return -EINVAL;
319
320 guard(mutex)(T: &tp->mutex);
321
322 /* rom xfer is big endian */
323 cpu_to_be32_array(dst: tp->tx_buf, src: obuf, len: words);
324
325 ret = read_poll_timeout(gpiod_get_value_cansleep, ret,
326 !ret, VSC_TP_ROM_XFER_POLL_DELAY_US,
327 VSC_TP_ROM_XFER_POLL_TIMEOUT_US, false,
328 tp->wakeuphost);
329 if (ret) {
330 dev_err(&tp->spi->dev, "wait rom failed ret: %d\n", ret);
331 return ret;
332 }
333
334 ret = vsc_tp_dev_xfer(tp, obuf: tp->tx_buf, ibuf: tp->rx_buf, len);
335 if (ret)
336 return ret;
337
338 if (ibuf)
339 cpu_to_be32_array(dst: ibuf, src: tp->rx_buf, len: words);
340
341 return ret;
342}
343
344/**
345 * vsc_tp_reset - reset vsc transport layer
346 * @tp: vsc_tp device handle
347 */
348void vsc_tp_reset(struct vsc_tp *tp)
349{
350 disable_irq(irq: tp->spi->irq);
351
352 /* toggle reset pin */
353 gpiod_set_value_cansleep(desc: tp->resetfw, value: 0);
354 msleep(VSC_TP_RESET_PIN_TOGGLE_INTERVAL_MS);
355 gpiod_set_value_cansleep(desc: tp->resetfw, value: 1);
356
357 /* wait for ROM */
358 msleep(VSC_TP_ROM_BOOTUP_DELAY_MS);
359
360 /*
361 * Set default host wakeup pin to non-active
362 * to avoid unexpected host irq interrupt.
363 */
364 gpiod_set_value_cansleep(desc: tp->wakeupfw, value: 1);
365
366 atomic_set(v: &tp->assert_cnt, i: 0);
367
368 enable_irq(irq: tp->spi->irq);
369}
370EXPORT_SYMBOL_NS_GPL(vsc_tp_reset, VSC_TP);
371
372/**
373 * vsc_tp_need_read - check if device has data to sent
374 * @tp: vsc_tp device handle
375 * Return: true if device has data to sent, otherwise false
376 */
377bool vsc_tp_need_read(struct vsc_tp *tp)
378{
379 if (!atomic_read(v: &tp->assert_cnt))
380 return false;
381 if (!gpiod_get_value_cansleep(desc: tp->wakeuphost))
382 return false;
383 if (!gpiod_get_value_cansleep(desc: tp->wakeupfw))
384 return false;
385
386 return true;
387}
388EXPORT_SYMBOL_NS_GPL(vsc_tp_need_read, VSC_TP);
389
390/**
391 * vsc_tp_register_event_cb - register a callback function to receive event
392 * @tp: vsc_tp device handle
393 * @event_cb: callback function
394 * @context: execution context of event callback
395 * Return: 0 in case of success, negative value in case of error
396 */
397int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb,
398 void *context)
399{
400 tp->event_notify = event_cb;
401 tp->event_notify_context = context;
402
403 return 0;
404}
405EXPORT_SYMBOL_NS_GPL(vsc_tp_register_event_cb, VSC_TP);
406
407/**
408 * vsc_tp_request_irq - request irq for vsc_tp device
409 * @tp: vsc_tp device handle
410 */
411int vsc_tp_request_irq(struct vsc_tp *tp)
412{
413 struct spi_device *spi = tp->spi;
414 struct device *dev = &spi->dev;
415 int ret;
416
417 irq_set_status_flags(irq: spi->irq, set: IRQ_DISABLE_UNLAZY);
418 ret = request_threaded_irq(irq: spi->irq, handler: vsc_tp_isr, thread_fn: vsc_tp_thread_isr,
419 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
420 name: dev_name(dev), dev: tp);
421 if (ret)
422 return ret;
423
424 return 0;
425}
426EXPORT_SYMBOL_NS_GPL(vsc_tp_request_irq, VSC_TP);
427
428/**
429 * vsc_tp_free_irq - free irq for vsc_tp device
430 * @tp: vsc_tp device handle
431 */
432void vsc_tp_free_irq(struct vsc_tp *tp)
433{
434 free_irq(tp->spi->irq, tp);
435}
436EXPORT_SYMBOL_NS_GPL(vsc_tp_free_irq, VSC_TP);
437
438/**
439 * vsc_tp_intr_synchronize - synchronize vsc_tp interrupt
440 * @tp: vsc_tp device handle
441 */
442void vsc_tp_intr_synchronize(struct vsc_tp *tp)
443{
444 synchronize_irq(irq: tp->spi->irq);
445}
446EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_synchronize, VSC_TP);
447
448/**
449 * vsc_tp_intr_enable - enable vsc_tp interrupt
450 * @tp: vsc_tp device handle
451 */
452void vsc_tp_intr_enable(struct vsc_tp *tp)
453{
454 enable_irq(irq: tp->spi->irq);
455}
456EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_enable, VSC_TP);
457
458/**
459 * vsc_tp_intr_disable - disable vsc_tp interrupt
460 * @tp: vsc_tp device handle
461 */
462void vsc_tp_intr_disable(struct vsc_tp *tp)
463{
464 disable_irq(irq: tp->spi->irq);
465}
466EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_disable, VSC_TP);
467
468static int vsc_tp_match_any(struct acpi_device *adev, void *data)
469{
470 struct acpi_device **__adev = data;
471
472 *__adev = adev;
473
474 return 1;
475}
476
477static int vsc_tp_probe(struct spi_device *spi)
478{
479 struct vsc_tp *tp;
480 struct platform_device_info pinfo = {
481 .name = "intel_vsc",
482 .data = &tp,
483 .size_data = sizeof(tp),
484 .id = PLATFORM_DEVID_NONE,
485 };
486 struct device *dev = &spi->dev;
487 struct platform_device *pdev;
488 struct acpi_device *adev;
489 int ret;
490
491 tp = devm_kzalloc(dev, size: sizeof(*tp), GFP_KERNEL);
492 if (!tp)
493 return -ENOMEM;
494
495 tp->tx_buf = devm_kzalloc(dev, VSC_TP_MAX_XFER_SIZE, GFP_KERNEL);
496 if (!tp->tx_buf)
497 return -ENOMEM;
498
499 tp->rx_buf = devm_kzalloc(dev, VSC_TP_MAX_XFER_SIZE, GFP_KERNEL);
500 if (!tp->rx_buf)
501 return -ENOMEM;
502
503 ret = devm_acpi_dev_add_driver_gpios(dev, gpios: vsc_tp_acpi_gpios);
504 if (ret)
505 return ret;
506
507 tp->wakeuphost = devm_gpiod_get(dev, con_id: "wakeuphost", flags: GPIOD_IN);
508 if (IS_ERR(ptr: tp->wakeuphost))
509 return PTR_ERR(ptr: tp->wakeuphost);
510
511 tp->resetfw = devm_gpiod_get(dev, con_id: "resetfw", flags: GPIOD_OUT_HIGH);
512 if (IS_ERR(ptr: tp->resetfw))
513 return PTR_ERR(ptr: tp->resetfw);
514
515 tp->wakeupfw = devm_gpiod_get(dev, con_id: "wakeupfw", flags: GPIOD_OUT_HIGH);
516 if (IS_ERR(ptr: tp->wakeupfw))
517 return PTR_ERR(ptr: tp->wakeupfw);
518
519 atomic_set(v: &tp->assert_cnt, i: 0);
520 init_waitqueue_head(&tp->xfer_wait);
521 tp->spi = spi;
522
523 irq_set_status_flags(irq: spi->irq, set: IRQ_DISABLE_UNLAZY);
524 ret = request_threaded_irq(irq: spi->irq, handler: vsc_tp_isr, thread_fn: vsc_tp_thread_isr,
525 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
526 name: dev_name(dev), dev: tp);
527 if (ret)
528 return ret;
529
530 mutex_init(&tp->mutex);
531
532 /* only one child acpi device */
533 ret = acpi_dev_for_each_child(ACPI_COMPANION(dev),
534 fn: vsc_tp_match_any, data: &adev);
535 if (!ret) {
536 ret = -ENODEV;
537 goto err_destroy_lock;
538 }
539
540 pinfo.fwnode = acpi_fwnode_handle(adev);
541 pdev = platform_device_register_full(pdevinfo: &pinfo);
542 if (IS_ERR(ptr: pdev)) {
543 ret = PTR_ERR(ptr: pdev);
544 goto err_destroy_lock;
545 }
546
547 tp->pdev = pdev;
548 spi_set_drvdata(spi, data: tp);
549
550 return 0;
551
552err_destroy_lock:
553 mutex_destroy(lock: &tp->mutex);
554
555 free_irq(spi->irq, tp);
556
557 return ret;
558}
559
560static void vsc_tp_remove(struct spi_device *spi)
561{
562 struct vsc_tp *tp = spi_get_drvdata(spi);
563
564 platform_device_unregister(tp->pdev);
565
566 mutex_destroy(lock: &tp->mutex);
567
568 free_irq(spi->irq, tp);
569}
570
571static const struct acpi_device_id vsc_tp_acpi_ids[] = {
572 { "INTC1009" }, /* Raptor Lake */
573 { "INTC1058" }, /* Tiger Lake */
574 { "INTC1094" }, /* Alder Lake */
575 { "INTC10D0" }, /* Meteor Lake */
576 {}
577};
578MODULE_DEVICE_TABLE(acpi, vsc_tp_acpi_ids);
579
580static struct spi_driver vsc_tp_driver = {
581 .probe = vsc_tp_probe,
582 .remove = vsc_tp_remove,
583 .driver = {
584 .name = "vsc-tp",
585 .acpi_match_table = vsc_tp_acpi_ids,
586 },
587};
588module_spi_driver(vsc_tp_driver);
589
590MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>");
591MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>");
592MODULE_DESCRIPTION("Intel Visual Sensing Controller Transport Layer");
593MODULE_LICENSE("GPL");
594

source code of linux/drivers/misc/mei/vsc-tp.c