| 1 | /* SPDX-License-Identifier: BSD-3-Clause */ |
| 2 | /* |
| 3 | * Copyright (c) 2020, MIPI Alliance, Inc. |
| 4 | * |
| 5 | * Author: Nicolas Pitre <npitre@baylibre.com> |
| 6 | * |
| 7 | * Common HCI stuff |
| 8 | */ |
| 9 | |
| 10 | #ifndef HCI_H |
| 11 | #define HCI_H |
| 12 | |
| 13 | #include <linux/io.h> |
| 14 | |
| 15 | /* Handy logging macro to save on line length */ |
| 16 | #define DBG(x, ...) pr_devel("%s: " x "\n", __func__, ##__VA_ARGS__) |
| 17 | |
| 18 | /* 32-bit word aware bit and mask macros */ |
| 19 | #define W0_MASK(h, l) GENMASK((h) - 0, (l) - 0) |
| 20 | #define W1_MASK(h, l) GENMASK((h) - 32, (l) - 32) |
| 21 | #define W2_MASK(h, l) GENMASK((h) - 64, (l) - 64) |
| 22 | #define W3_MASK(h, l) GENMASK((h) - 96, (l) - 96) |
| 23 | |
| 24 | /* Same for single bit macros (trailing _ to align with W*_MASK width) */ |
| 25 | #define W0_BIT_(x) BIT((x) - 0) |
| 26 | #define W1_BIT_(x) BIT((x) - 32) |
| 27 | #define W2_BIT_(x) BIT((x) - 64) |
| 28 | #define W3_BIT_(x) BIT((x) - 96) |
| 29 | |
| 30 | #define reg_read(r) readl(hci->base_regs + (r)) |
| 31 | #define reg_write(r, v) writel(v, hci->base_regs + (r)) |
| 32 | #define reg_set(r, v) reg_write(r, reg_read(r) | (v)) |
| 33 | #define reg_clear(r, v) reg_write(r, reg_read(r) & ~(v)) |
| 34 | |
| 35 | struct hci_cmd_ops; |
| 36 | |
| 37 | /* Our main structure */ |
| 38 | struct i3c_hci { |
| 39 | struct i3c_master_controller master; |
| 40 | void __iomem *base_regs; |
| 41 | void __iomem *DAT_regs; |
| 42 | void __iomem *DCT_regs; |
| 43 | void __iomem *RHS_regs; |
| 44 | void __iomem *PIO_regs; |
| 45 | void __iomem *EXTCAPS_regs; |
| 46 | void __iomem *AUTOCMD_regs; |
| 47 | void __iomem *DEBUG_regs; |
| 48 | const struct hci_io_ops *io; |
| 49 | void *io_data; |
| 50 | const struct hci_cmd_ops *cmd; |
| 51 | atomic_t next_cmd_tid; |
| 52 | u32 caps; |
| 53 | unsigned int quirks; |
| 54 | unsigned int DAT_entries; |
| 55 | unsigned int DAT_entry_size; |
| 56 | void *DAT_data; |
| 57 | unsigned int DCT_entries; |
| 58 | unsigned int DCT_entry_size; |
| 59 | u8 version_major; |
| 60 | u8 version_minor; |
| 61 | u8 revision; |
| 62 | u32 vendor_mipi_id; |
| 63 | u32 vendor_version_id; |
| 64 | u32 vendor_product_id; |
| 65 | void *vendor_data; |
| 66 | }; |
| 67 | |
| 68 | |
| 69 | /* |
| 70 | * Structure to represent a master initiated transfer. |
| 71 | * The rnw, data and data_len fields must be initialized before calling any |
| 72 | * hci->cmd->*() method. The cmd method will initialize cmd_desc[] and |
| 73 | * possibly modify (clear) the data field. Then xfer->cmd_desc[0] can |
| 74 | * be augmented with CMD_0_ROC and/or CMD_0_TOC. |
| 75 | * The completion field needs to be initialized before queueing with |
| 76 | * hci->io->queue_xfer(), and requires CMD_0_ROC to be set. |
| 77 | */ |
| 78 | struct hci_xfer { |
| 79 | u32 cmd_desc[4]; |
| 80 | u32 response; |
| 81 | bool rnw; |
| 82 | void *data; |
| 83 | unsigned int data_len; |
| 84 | unsigned int cmd_tid; |
| 85 | struct completion *completion; |
| 86 | union { |
| 87 | struct { |
| 88 | /* PIO specific */ |
| 89 | struct hci_xfer *next_xfer; |
| 90 | struct hci_xfer *next_data; |
| 91 | struct hci_xfer *next_resp; |
| 92 | unsigned int data_left; |
| 93 | u32 data_word_before_partial; |
| 94 | }; |
| 95 | struct { |
| 96 | /* DMA specific */ |
| 97 | dma_addr_t data_dma; |
| 98 | void *bounce_buf; |
| 99 | int ring_number; |
| 100 | int ring_entry; |
| 101 | }; |
| 102 | }; |
| 103 | }; |
| 104 | |
| 105 | static inline struct hci_xfer *hci_alloc_xfer(unsigned int n) |
| 106 | { |
| 107 | return kcalloc(n, sizeof(struct hci_xfer), GFP_KERNEL); |
| 108 | } |
| 109 | |
| 110 | static inline void hci_free_xfer(struct hci_xfer *xfer, unsigned int n) |
| 111 | { |
| 112 | kfree(objp: xfer); |
| 113 | } |
| 114 | |
| 115 | |
| 116 | /* This abstracts PIO vs DMA operations */ |
| 117 | struct hci_io_ops { |
| 118 | bool (*irq_handler)(struct i3c_hci *hci); |
| 119 | int (*queue_xfer)(struct i3c_hci *hci, struct hci_xfer *xfer, int n); |
| 120 | bool (*dequeue_xfer)(struct i3c_hci *hci, struct hci_xfer *xfer, int n); |
| 121 | int (*request_ibi)(struct i3c_hci *hci, struct i3c_dev_desc *dev, |
| 122 | const struct i3c_ibi_setup *req); |
| 123 | void (*free_ibi)(struct i3c_hci *hci, struct i3c_dev_desc *dev); |
| 124 | void (*recycle_ibi_slot)(struct i3c_hci *hci, struct i3c_dev_desc *dev, |
| 125 | struct i3c_ibi_slot *slot); |
| 126 | int (*init)(struct i3c_hci *hci); |
| 127 | void (*cleanup)(struct i3c_hci *hci); |
| 128 | }; |
| 129 | |
| 130 | extern const struct hci_io_ops mipi_i3c_hci_pio; |
| 131 | extern const struct hci_io_ops mipi_i3c_hci_dma; |
| 132 | |
| 133 | |
| 134 | /* Our per device master private data */ |
| 135 | struct i3c_hci_dev_data { |
| 136 | int dat_idx; |
| 137 | void *ibi_data; |
| 138 | }; |
| 139 | |
| 140 | |
| 141 | /* list of quirks */ |
| 142 | #define HCI_QUIRK_RAW_CCC BIT(1) /* CCC framing must be explicit */ |
| 143 | #define HCI_QUIRK_PIO_MODE BIT(2) /* Set PIO mode for AMD platforms */ |
| 144 | #define HCI_QUIRK_OD_PP_TIMING BIT(3) /* Set OD and PP timings for AMD platforms */ |
| 145 | #define HCI_QUIRK_RESP_BUF_THLD BIT(4) /* Set resp buf thld to 0 for AMD platforms */ |
| 146 | |
| 147 | |
| 148 | /* global functions */ |
| 149 | void mipi_i3c_hci_resume(struct i3c_hci *hci); |
| 150 | void mipi_i3c_hci_pio_reset(struct i3c_hci *hci); |
| 151 | void mipi_i3c_hci_dct_index_reset(struct i3c_hci *hci); |
| 152 | void amd_set_od_pp_timing(struct i3c_hci *hci); |
| 153 | void amd_set_resp_buf_thld(struct i3c_hci *hci); |
| 154 | |
| 155 | #endif |
| 156 | |