1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. |
4 | */ |
5 | /* |
6 | * QCOM BAM DMA engine driver |
7 | * |
8 | * QCOM BAM DMA blocks are distributed amongst a number of the on-chip |
9 | * peripherals on the MSM 8x74. The configuration of the channels are dependent |
10 | * on the way they are hard wired to that specific peripheral. The peripheral |
11 | * device tree entries specify the configuration of each channel. |
12 | * |
13 | * The DMA controller requires the use of external memory for storage of the |
14 | * hardware descriptors for each channel. The descriptor FIFO is accessed as a |
15 | * circular buffer and operations are managed according to the offset within the |
16 | * FIFO. After pipe/channel reset, all of the pipe registers and internal state |
17 | * are back to defaults. |
18 | * |
19 | * During DMA operations, we write descriptors to the FIFO, being careful to |
20 | * handle wrapping and then write the last FIFO offset to that channel's |
21 | * P_EVNT_REG register to kick off the transaction. The P_SW_OFSTS register |
22 | * indicates the current FIFO offset that is being processed, so there is some |
23 | * indication of where the hardware is currently working. |
24 | */ |
25 | |
26 | #include <linux/kernel.h> |
27 | #include <linux/io.h> |
28 | #include <linux/init.h> |
29 | #include <linux/slab.h> |
30 | #include <linux/module.h> |
31 | #include <linux/interrupt.h> |
32 | #include <linux/dma-mapping.h> |
33 | #include <linux/scatterlist.h> |
34 | #include <linux/device.h> |
35 | #include <linux/platform_device.h> |
36 | #include <linux/of.h> |
37 | #include <linux/of_address.h> |
38 | #include <linux/of_irq.h> |
39 | #include <linux/of_dma.h> |
40 | #include <linux/circ_buf.h> |
41 | #include <linux/clk.h> |
42 | #include <linux/dmaengine.h> |
43 | #include <linux/pm_runtime.h> |
44 | |
45 | #include "../dmaengine.h" |
46 | #include "../virt-dma.h" |
47 | |
48 | struct bam_desc_hw { |
49 | __le32 addr; /* Buffer physical address */ |
50 | __le16 size; /* Buffer size in bytes */ |
51 | __le16 flags; |
52 | }; |
53 | |
54 | #define BAM_DMA_AUTOSUSPEND_DELAY 100 |
55 | |
56 | #define DESC_FLAG_INT BIT(15) |
57 | #define DESC_FLAG_EOT BIT(14) |
58 | #define DESC_FLAG_EOB BIT(13) |
59 | #define DESC_FLAG_NWD BIT(12) |
60 | #define DESC_FLAG_CMD BIT(11) |
61 | |
62 | struct bam_async_desc { |
63 | struct virt_dma_desc vd; |
64 | |
65 | u32 num_desc; |
66 | u32 xfer_len; |
67 | |
68 | /* transaction flags, EOT|EOB|NWD */ |
69 | u16 flags; |
70 | |
71 | struct bam_desc_hw *curr_desc; |
72 | |
73 | /* list node for the desc in the bam_chan list of descriptors */ |
74 | struct list_head desc_node; |
75 | enum dma_transfer_direction dir; |
76 | size_t length; |
77 | struct bam_desc_hw desc[] __counted_by(num_desc); |
78 | }; |
79 | |
80 | enum bam_reg { |
81 | BAM_CTRL, |
82 | BAM_REVISION, |
83 | BAM_NUM_PIPES, |
84 | BAM_DESC_CNT_TRSHLD, |
85 | BAM_IRQ_SRCS, |
86 | BAM_IRQ_SRCS_MSK, |
87 | BAM_IRQ_SRCS_UNMASKED, |
88 | BAM_IRQ_STTS, |
89 | BAM_IRQ_CLR, |
90 | BAM_IRQ_EN, |
91 | BAM_CNFG_BITS, |
92 | BAM_IRQ_SRCS_EE, |
93 | BAM_IRQ_SRCS_MSK_EE, |
94 | BAM_P_CTRL, |
95 | BAM_P_RST, |
96 | BAM_P_HALT, |
97 | BAM_P_IRQ_STTS, |
98 | BAM_P_IRQ_CLR, |
99 | BAM_P_IRQ_EN, |
100 | BAM_P_EVNT_DEST_ADDR, |
101 | BAM_P_EVNT_REG, |
102 | BAM_P_SW_OFSTS, |
103 | BAM_P_DATA_FIFO_ADDR, |
104 | BAM_P_DESC_FIFO_ADDR, |
105 | BAM_P_EVNT_GEN_TRSHLD, |
106 | BAM_P_FIFO_SIZES, |
107 | }; |
108 | |
109 | struct reg_offset_data { |
110 | u32 base_offset; |
111 | unsigned int pipe_mult, evnt_mult, ee_mult; |
112 | }; |
113 | |
114 | static const struct reg_offset_data bam_v1_3_reg_info[] = { |
115 | [BAM_CTRL] = { 0x0F80, 0x00, 0x00, 0x00 }, |
116 | [BAM_REVISION] = { .base_offset: 0x0F84, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
117 | [BAM_NUM_PIPES] = { .base_offset: 0x0FBC, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
118 | [BAM_DESC_CNT_TRSHLD] = { .base_offset: 0x0F88, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
119 | [BAM_IRQ_SRCS] = { .base_offset: 0x0F8C, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
120 | [BAM_IRQ_SRCS_MSK] = { .base_offset: 0x0F90, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
121 | [BAM_IRQ_SRCS_UNMASKED] = { .base_offset: 0x0FB0, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
122 | [BAM_IRQ_STTS] = { .base_offset: 0x0F94, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
123 | [BAM_IRQ_CLR] = { .base_offset: 0x0F98, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
124 | [BAM_IRQ_EN] = { .base_offset: 0x0F9C, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
125 | [BAM_CNFG_BITS] = { .base_offset: 0x0FFC, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
126 | [BAM_IRQ_SRCS_EE] = { .base_offset: 0x1800, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x80 }, |
127 | [BAM_IRQ_SRCS_MSK_EE] = { .base_offset: 0x1804, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x80 }, |
128 | [BAM_P_CTRL] = { .base_offset: 0x0000, .pipe_mult: 0x80, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
129 | [BAM_P_RST] = { .base_offset: 0x0004, .pipe_mult: 0x80, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
130 | [BAM_P_HALT] = { .base_offset: 0x0008, .pipe_mult: 0x80, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
131 | [BAM_P_IRQ_STTS] = { .base_offset: 0x0010, .pipe_mult: 0x80, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
132 | [BAM_P_IRQ_CLR] = { .base_offset: 0x0014, .pipe_mult: 0x80, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
133 | [BAM_P_IRQ_EN] = { .base_offset: 0x0018, .pipe_mult: 0x80, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
134 | [BAM_P_EVNT_DEST_ADDR] = { .base_offset: 0x102C, .pipe_mult: 0x00, .evnt_mult: 0x40, .ee_mult: 0x00 }, |
135 | [BAM_P_EVNT_REG] = { .base_offset: 0x1018, .pipe_mult: 0x00, .evnt_mult: 0x40, .ee_mult: 0x00 }, |
136 | [BAM_P_SW_OFSTS] = { .base_offset: 0x1000, .pipe_mult: 0x00, .evnt_mult: 0x40, .ee_mult: 0x00 }, |
137 | [BAM_P_DATA_FIFO_ADDR] = { .base_offset: 0x1024, .pipe_mult: 0x00, .evnt_mult: 0x40, .ee_mult: 0x00 }, |
138 | [BAM_P_DESC_FIFO_ADDR] = { .base_offset: 0x101C, .pipe_mult: 0x00, .evnt_mult: 0x40, .ee_mult: 0x00 }, |
139 | [BAM_P_EVNT_GEN_TRSHLD] = { .base_offset: 0x1028, .pipe_mult: 0x00, .evnt_mult: 0x40, .ee_mult: 0x00 }, |
140 | [BAM_P_FIFO_SIZES] = { .base_offset: 0x1020, .pipe_mult: 0x00, .evnt_mult: 0x40, .ee_mult: 0x00 }, |
141 | }; |
142 | |
143 | static const struct reg_offset_data bam_v1_4_reg_info[] = { |
144 | [BAM_CTRL] = { .base_offset: 0x0000, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
145 | [BAM_REVISION] = { .base_offset: 0x0004, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
146 | [BAM_NUM_PIPES] = { .base_offset: 0x003C, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
147 | [BAM_DESC_CNT_TRSHLD] = { .base_offset: 0x0008, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
148 | [BAM_IRQ_SRCS] = { .base_offset: 0x000C, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
149 | [BAM_IRQ_SRCS_MSK] = { .base_offset: 0x0010, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
150 | [BAM_IRQ_SRCS_UNMASKED] = { .base_offset: 0x0030, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
151 | [BAM_IRQ_STTS] = { .base_offset: 0x0014, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
152 | [BAM_IRQ_CLR] = { .base_offset: 0x0018, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
153 | [BAM_IRQ_EN] = { .base_offset: 0x001C, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
154 | [BAM_CNFG_BITS] = { .base_offset: 0x007C, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
155 | [BAM_IRQ_SRCS_EE] = { .base_offset: 0x0800, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x80 }, |
156 | [BAM_IRQ_SRCS_MSK_EE] = { .base_offset: 0x0804, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x80 }, |
157 | [BAM_P_CTRL] = { .base_offset: 0x1000, .pipe_mult: 0x1000, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
158 | [BAM_P_RST] = { .base_offset: 0x1004, .pipe_mult: 0x1000, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
159 | [BAM_P_HALT] = { .base_offset: 0x1008, .pipe_mult: 0x1000, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
160 | [BAM_P_IRQ_STTS] = { .base_offset: 0x1010, .pipe_mult: 0x1000, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
161 | [BAM_P_IRQ_CLR] = { .base_offset: 0x1014, .pipe_mult: 0x1000, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
162 | [BAM_P_IRQ_EN] = { .base_offset: 0x1018, .pipe_mult: 0x1000, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
163 | [BAM_P_EVNT_DEST_ADDR] = { .base_offset: 0x182C, .pipe_mult: 0x00, .evnt_mult: 0x1000, .ee_mult: 0x00 }, |
164 | [BAM_P_EVNT_REG] = { .base_offset: 0x1818, .pipe_mult: 0x00, .evnt_mult: 0x1000, .ee_mult: 0x00 }, |
165 | [BAM_P_SW_OFSTS] = { .base_offset: 0x1800, .pipe_mult: 0x00, .evnt_mult: 0x1000, .ee_mult: 0x00 }, |
166 | [BAM_P_DATA_FIFO_ADDR] = { .base_offset: 0x1824, .pipe_mult: 0x00, .evnt_mult: 0x1000, .ee_mult: 0x00 }, |
167 | [BAM_P_DESC_FIFO_ADDR] = { .base_offset: 0x181C, .pipe_mult: 0x00, .evnt_mult: 0x1000, .ee_mult: 0x00 }, |
168 | [BAM_P_EVNT_GEN_TRSHLD] = { .base_offset: 0x1828, .pipe_mult: 0x00, .evnt_mult: 0x1000, .ee_mult: 0x00 }, |
169 | [BAM_P_FIFO_SIZES] = { .base_offset: 0x1820, .pipe_mult: 0x00, .evnt_mult: 0x1000, .ee_mult: 0x00 }, |
170 | }; |
171 | |
172 | static const struct reg_offset_data bam_v1_7_reg_info[] = { |
173 | [BAM_CTRL] = { .base_offset: 0x00000, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
174 | [BAM_REVISION] = { .base_offset: 0x01000, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
175 | [BAM_NUM_PIPES] = { .base_offset: 0x01008, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
176 | [BAM_DESC_CNT_TRSHLD] = { .base_offset: 0x00008, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
177 | [BAM_IRQ_SRCS] = { .base_offset: 0x03010, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
178 | [BAM_IRQ_SRCS_MSK] = { .base_offset: 0x03014, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
179 | [BAM_IRQ_SRCS_UNMASKED] = { .base_offset: 0x03018, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
180 | [BAM_IRQ_STTS] = { .base_offset: 0x00014, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
181 | [BAM_IRQ_CLR] = { .base_offset: 0x00018, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
182 | [BAM_IRQ_EN] = { .base_offset: 0x0001C, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
183 | [BAM_CNFG_BITS] = { .base_offset: 0x0007C, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
184 | [BAM_IRQ_SRCS_EE] = { .base_offset: 0x03000, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x1000 }, |
185 | [BAM_IRQ_SRCS_MSK_EE] = { .base_offset: 0x03004, .pipe_mult: 0x00, .evnt_mult: 0x00, .ee_mult: 0x1000 }, |
186 | [BAM_P_CTRL] = { .base_offset: 0x13000, .pipe_mult: 0x1000, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
187 | [BAM_P_RST] = { .base_offset: 0x13004, .pipe_mult: 0x1000, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
188 | [BAM_P_HALT] = { .base_offset: 0x13008, .pipe_mult: 0x1000, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
189 | [BAM_P_IRQ_STTS] = { .base_offset: 0x13010, .pipe_mult: 0x1000, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
190 | [BAM_P_IRQ_CLR] = { .base_offset: 0x13014, .pipe_mult: 0x1000, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
191 | [BAM_P_IRQ_EN] = { .base_offset: 0x13018, .pipe_mult: 0x1000, .evnt_mult: 0x00, .ee_mult: 0x00 }, |
192 | [BAM_P_EVNT_DEST_ADDR] = { .base_offset: 0x1382C, .pipe_mult: 0x00, .evnt_mult: 0x1000, .ee_mult: 0x00 }, |
193 | [BAM_P_EVNT_REG] = { .base_offset: 0x13818, .pipe_mult: 0x00, .evnt_mult: 0x1000, .ee_mult: 0x00 }, |
194 | [BAM_P_SW_OFSTS] = { .base_offset: 0x13800, .pipe_mult: 0x00, .evnt_mult: 0x1000, .ee_mult: 0x00 }, |
195 | [BAM_P_DATA_FIFO_ADDR] = { .base_offset: 0x13824, .pipe_mult: 0x00, .evnt_mult: 0x1000, .ee_mult: 0x00 }, |
196 | [BAM_P_DESC_FIFO_ADDR] = { .base_offset: 0x1381C, .pipe_mult: 0x00, .evnt_mult: 0x1000, .ee_mult: 0x00 }, |
197 | [BAM_P_EVNT_GEN_TRSHLD] = { .base_offset: 0x13828, .pipe_mult: 0x00, .evnt_mult: 0x1000, .ee_mult: 0x00 }, |
198 | [BAM_P_FIFO_SIZES] = { .base_offset: 0x13820, .pipe_mult: 0x00, .evnt_mult: 0x1000, .ee_mult: 0x00 }, |
199 | }; |
200 | |
201 | /* BAM CTRL */ |
202 | #define BAM_SW_RST BIT(0) |
203 | #define BAM_EN BIT(1) |
204 | #define BAM_EN_ACCUM BIT(4) |
205 | #define BAM_TESTBUS_SEL_SHIFT 5 |
206 | #define BAM_TESTBUS_SEL_MASK 0x3F |
207 | #define BAM_DESC_CACHE_SEL_SHIFT 13 |
208 | #define BAM_DESC_CACHE_SEL_MASK 0x3 |
209 | #define BAM_CACHED_DESC_STORE BIT(15) |
210 | #define IBC_DISABLE BIT(16) |
211 | |
212 | /* BAM REVISION */ |
213 | #define REVISION_SHIFT 0 |
214 | #define REVISION_MASK 0xFF |
215 | #define NUM_EES_SHIFT 8 |
216 | #define NUM_EES_MASK 0xF |
217 | #define CE_BUFFER_SIZE BIT(13) |
218 | #define AXI_ACTIVE BIT(14) |
219 | #define USE_VMIDMT BIT(15) |
220 | #define SECURED BIT(16) |
221 | #define BAM_HAS_NO_BYPASS BIT(17) |
222 | #define HIGH_FREQUENCY_BAM BIT(18) |
223 | #define INACTIV_TMRS_EXST BIT(19) |
224 | #define NUM_INACTIV_TMRS BIT(20) |
225 | #define DESC_CACHE_DEPTH_SHIFT 21 |
226 | #define DESC_CACHE_DEPTH_1 (0 << DESC_CACHE_DEPTH_SHIFT) |
227 | #define DESC_CACHE_DEPTH_2 (1 << DESC_CACHE_DEPTH_SHIFT) |
228 | #define DESC_CACHE_DEPTH_3 (2 << DESC_CACHE_DEPTH_SHIFT) |
229 | #define DESC_CACHE_DEPTH_4 (3 << DESC_CACHE_DEPTH_SHIFT) |
230 | #define CMD_DESC_EN BIT(23) |
231 | #define INACTIV_TMR_BASE_SHIFT 24 |
232 | #define INACTIV_TMR_BASE_MASK 0xFF |
233 | |
234 | /* BAM NUM PIPES */ |
235 | #define BAM_NUM_PIPES_SHIFT 0 |
236 | #define BAM_NUM_PIPES_MASK 0xFF |
237 | #define PERIPH_NON_PIPE_GRP_SHIFT 16 |
238 | #define PERIPH_NON_PIP_GRP_MASK 0xFF |
239 | #define BAM_NON_PIPE_GRP_SHIFT 24 |
240 | #define BAM_NON_PIPE_GRP_MASK 0xFF |
241 | |
242 | /* BAM CNFG BITS */ |
243 | #define BAM_PIPE_CNFG BIT(2) |
244 | #define BAM_FULL_PIPE BIT(11) |
245 | #define BAM_NO_EXT_P_RST BIT(12) |
246 | #define BAM_IBC_DISABLE BIT(13) |
247 | #define BAM_SB_CLK_REQ BIT(14) |
248 | #define BAM_PSM_CSW_REQ BIT(15) |
249 | #define BAM_PSM_P_RES BIT(16) |
250 | #define BAM_AU_P_RES BIT(17) |
251 | #define BAM_SI_P_RES BIT(18) |
252 | #define BAM_WB_P_RES BIT(19) |
253 | #define BAM_WB_BLK_CSW BIT(20) |
254 | #define BAM_WB_CSW_ACK_IDL BIT(21) |
255 | #define BAM_WB_RETR_SVPNT BIT(22) |
256 | #define BAM_WB_DSC_AVL_P_RST BIT(23) |
257 | #define BAM_REG_P_EN BIT(24) |
258 | #define BAM_PSM_P_HD_DATA BIT(25) |
259 | #define BAM_AU_ACCUMED BIT(26) |
260 | #define BAM_CMD_ENABLE BIT(27) |
261 | |
262 | #define BAM_CNFG_BITS_DEFAULT (BAM_PIPE_CNFG | \ |
263 | BAM_NO_EXT_P_RST | \ |
264 | BAM_IBC_DISABLE | \ |
265 | BAM_SB_CLK_REQ | \ |
266 | BAM_PSM_CSW_REQ | \ |
267 | BAM_PSM_P_RES | \ |
268 | BAM_AU_P_RES | \ |
269 | BAM_SI_P_RES | \ |
270 | BAM_WB_P_RES | \ |
271 | BAM_WB_BLK_CSW | \ |
272 | BAM_WB_CSW_ACK_IDL | \ |
273 | BAM_WB_RETR_SVPNT | \ |
274 | BAM_WB_DSC_AVL_P_RST | \ |
275 | BAM_REG_P_EN | \ |
276 | BAM_PSM_P_HD_DATA | \ |
277 | BAM_AU_ACCUMED | \ |
278 | BAM_CMD_ENABLE) |
279 | |
280 | /* PIPE CTRL */ |
281 | #define P_EN BIT(1) |
282 | #define P_DIRECTION BIT(3) |
283 | #define P_SYS_STRM BIT(4) |
284 | #define P_SYS_MODE BIT(5) |
285 | #define P_AUTO_EOB BIT(6) |
286 | #define P_AUTO_EOB_SEL_SHIFT 7 |
287 | #define P_AUTO_EOB_SEL_512 (0 << P_AUTO_EOB_SEL_SHIFT) |
288 | #define P_AUTO_EOB_SEL_256 (1 << P_AUTO_EOB_SEL_SHIFT) |
289 | #define P_AUTO_EOB_SEL_128 (2 << P_AUTO_EOB_SEL_SHIFT) |
290 | #define P_AUTO_EOB_SEL_64 (3 << P_AUTO_EOB_SEL_SHIFT) |
291 | #define P_PREFETCH_LIMIT_SHIFT 9 |
292 | #define P_PREFETCH_LIMIT_32 (0 << P_PREFETCH_LIMIT_SHIFT) |
293 | #define P_PREFETCH_LIMIT_16 (1 << P_PREFETCH_LIMIT_SHIFT) |
294 | #define P_PREFETCH_LIMIT_4 (2 << P_PREFETCH_LIMIT_SHIFT) |
295 | #define P_WRITE_NWD BIT(11) |
296 | #define P_LOCK_GROUP_SHIFT 16 |
297 | #define P_LOCK_GROUP_MASK 0x1F |
298 | |
299 | /* BAM_DESC_CNT_TRSHLD */ |
300 | #define CNT_TRSHLD 0xffff |
301 | #define DEFAULT_CNT_THRSHLD 0x4 |
302 | |
303 | /* BAM_IRQ_SRCS */ |
304 | #define BAM_IRQ BIT(31) |
305 | #define P_IRQ 0x7fffffff |
306 | |
307 | /* BAM_IRQ_SRCS_MSK */ |
308 | #define BAM_IRQ_MSK BAM_IRQ |
309 | #define P_IRQ_MSK P_IRQ |
310 | |
311 | /* BAM_IRQ_STTS */ |
312 | #define BAM_TIMER_IRQ BIT(4) |
313 | #define BAM_EMPTY_IRQ BIT(3) |
314 | #define BAM_ERROR_IRQ BIT(2) |
315 | #define BAM_HRESP_ERR_IRQ BIT(1) |
316 | |
317 | /* BAM_IRQ_CLR */ |
318 | #define BAM_TIMER_CLR BIT(4) |
319 | #define BAM_EMPTY_CLR BIT(3) |
320 | #define BAM_ERROR_CLR BIT(2) |
321 | #define BAM_HRESP_ERR_CLR BIT(1) |
322 | |
323 | /* BAM_IRQ_EN */ |
324 | #define BAM_TIMER_EN BIT(4) |
325 | #define BAM_EMPTY_EN BIT(3) |
326 | #define BAM_ERROR_EN BIT(2) |
327 | #define BAM_HRESP_ERR_EN BIT(1) |
328 | |
329 | /* BAM_P_IRQ_EN */ |
330 | #define P_PRCSD_DESC_EN BIT(0) |
331 | #define P_TIMER_EN BIT(1) |
332 | #define P_WAKE_EN BIT(2) |
333 | #define P_OUT_OF_DESC_EN BIT(3) |
334 | #define P_ERR_EN BIT(4) |
335 | #define P_TRNSFR_END_EN BIT(5) |
336 | #define P_DEFAULT_IRQS_EN (P_PRCSD_DESC_EN | P_ERR_EN | P_TRNSFR_END_EN) |
337 | |
338 | /* BAM_P_SW_OFSTS */ |
339 | #define P_SW_OFSTS_MASK 0xffff |
340 | |
341 | #define BAM_DESC_FIFO_SIZE SZ_32K |
342 | #define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1) |
343 | #define BAM_FIFO_SIZE (SZ_32K - 8) |
344 | #define IS_BUSY(chan) (CIRC_SPACE(bchan->tail, bchan->head,\ |
345 | MAX_DESCRIPTORS + 1) == 0) |
346 | |
347 | struct bam_chan { |
348 | struct virt_dma_chan vc; |
349 | |
350 | struct bam_device *bdev; |
351 | |
352 | /* configuration from device tree */ |
353 | u32 id; |
354 | |
355 | /* runtime configuration */ |
356 | struct dma_slave_config slave; |
357 | |
358 | /* fifo storage */ |
359 | struct bam_desc_hw *fifo_virt; |
360 | dma_addr_t fifo_phys; |
361 | |
362 | /* fifo markers */ |
363 | unsigned short head; /* start of active descriptor entries */ |
364 | unsigned short tail; /* end of active descriptor entries */ |
365 | |
366 | unsigned int initialized; /* is the channel hw initialized? */ |
367 | unsigned int paused; /* is the channel paused? */ |
368 | unsigned int reconfigure; /* new slave config? */ |
369 | /* list of descriptors currently processed */ |
370 | struct list_head desc_list; |
371 | |
372 | struct list_head node; |
373 | }; |
374 | |
375 | static inline struct bam_chan *to_bam_chan(struct dma_chan *common) |
376 | { |
377 | return container_of(common, struct bam_chan, vc.chan); |
378 | } |
379 | |
380 | struct bam_device { |
381 | void __iomem *regs; |
382 | struct device *dev; |
383 | struct dma_device common; |
384 | struct bam_chan *channels; |
385 | u32 num_channels; |
386 | u32 num_ees; |
387 | |
388 | /* execution environment ID, from DT */ |
389 | u32 ee; |
390 | bool controlled_remotely; |
391 | bool powered_remotely; |
392 | u32 active_channels; |
393 | |
394 | const struct reg_offset_data *layout; |
395 | |
396 | struct clk *bamclk; |
397 | int irq; |
398 | |
399 | /* dma start transaction tasklet */ |
400 | struct tasklet_struct task; |
401 | }; |
402 | |
403 | /** |
404 | * bam_addr - returns BAM register address |
405 | * @bdev: bam device |
406 | * @pipe: pipe instance (ignored when register doesn't have multiple instances) |
407 | * @reg: register enum |
408 | */ |
409 | static inline void __iomem *bam_addr(struct bam_device *bdev, u32 pipe, |
410 | enum bam_reg reg) |
411 | { |
412 | const struct reg_offset_data r = bdev->layout[reg]; |
413 | |
414 | return bdev->regs + r.base_offset + |
415 | r.pipe_mult * pipe + |
416 | r.evnt_mult * pipe + |
417 | r.ee_mult * bdev->ee; |
418 | } |
419 | |
420 | /** |
421 | * bam_reset() - reset and initialize BAM registers |
422 | * @bdev: bam device |
423 | */ |
424 | static void bam_reset(struct bam_device *bdev) |
425 | { |
426 | u32 val; |
427 | |
428 | /* s/w reset bam */ |
429 | /* after reset all pipes are disabled and idle */ |
430 | val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL)); |
431 | val |= BAM_SW_RST; |
432 | writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL)); |
433 | val &= ~BAM_SW_RST; |
434 | writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL)); |
435 | |
436 | /* make sure previous stores are visible before enabling BAM */ |
437 | wmb(); |
438 | |
439 | /* enable bam */ |
440 | val |= BAM_EN; |
441 | writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL)); |
442 | |
443 | /* set descriptor threshhold, start with 4 bytes */ |
444 | writel_relaxed(DEFAULT_CNT_THRSHLD, |
445 | bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD)); |
446 | |
447 | /* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */ |
448 | writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS)); |
449 | |
450 | /* enable irqs for errors */ |
451 | writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN, |
452 | bam_addr(bdev, 0, BAM_IRQ_EN)); |
453 | |
454 | /* unmask global bam interrupt */ |
455 | writel_relaxed(BAM_IRQ_MSK, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); |
456 | } |
457 | |
458 | /** |
459 | * bam_reset_channel - Reset individual BAM DMA channel |
460 | * @bchan: bam channel |
461 | * |
462 | * This function resets a specific BAM channel |
463 | */ |
464 | static void bam_reset_channel(struct bam_chan *bchan) |
465 | { |
466 | struct bam_device *bdev = bchan->bdev; |
467 | |
468 | lockdep_assert_held(&bchan->vc.lock); |
469 | |
470 | /* reset channel */ |
471 | writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_RST)); |
472 | writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_RST)); |
473 | |
474 | /* don't allow cpu to reorder BAM register accesses done after this */ |
475 | wmb(); |
476 | |
477 | /* make sure hw is initialized when channel is used the first time */ |
478 | bchan->initialized = 0; |
479 | } |
480 | |
481 | /** |
482 | * bam_chan_init_hw - Initialize channel hardware |
483 | * @bchan: bam channel |
484 | * @dir: DMA transfer direction |
485 | * |
486 | * This function resets and initializes the BAM channel |
487 | */ |
488 | static void bam_chan_init_hw(struct bam_chan *bchan, |
489 | enum dma_transfer_direction dir) |
490 | { |
491 | struct bam_device *bdev = bchan->bdev; |
492 | u32 val; |
493 | |
494 | /* Reset the channel to clear internal state of the FIFO */ |
495 | bam_reset_channel(bchan); |
496 | |
497 | /* |
498 | * write out 8 byte aligned address. We have enough space for this |
499 | * because we allocated 1 more descriptor (8 bytes) than we can use |
500 | */ |
501 | writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)), |
502 | bam_addr(bdev, bchan->id, BAM_P_DESC_FIFO_ADDR)); |
503 | writel_relaxed(BAM_FIFO_SIZE, |
504 | bam_addr(bdev, bchan->id, BAM_P_FIFO_SIZES)); |
505 | |
506 | /* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */ |
507 | writel_relaxed(P_DEFAULT_IRQS_EN, |
508 | bam_addr(bdev, bchan->id, BAM_P_IRQ_EN)); |
509 | |
510 | /* unmask the specific pipe and EE combo */ |
511 | val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); |
512 | val |= BIT(bchan->id); |
513 | writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); |
514 | |
515 | /* don't allow cpu to reorder the channel enable done below */ |
516 | wmb(); |
517 | |
518 | /* set fixed direction and mode, then enable channel */ |
519 | val = P_EN | P_SYS_MODE; |
520 | if (dir == DMA_DEV_TO_MEM) |
521 | val |= P_DIRECTION; |
522 | |
523 | writel_relaxed(val, bam_addr(bdev, bchan->id, BAM_P_CTRL)); |
524 | |
525 | bchan->initialized = 1; |
526 | |
527 | /* init FIFO pointers */ |
528 | bchan->head = 0; |
529 | bchan->tail = 0; |
530 | } |
531 | |
532 | /** |
533 | * bam_alloc_chan - Allocate channel resources for DMA channel. |
534 | * @chan: specified channel |
535 | * |
536 | * This function allocates the FIFO descriptor memory |
537 | */ |
538 | static int bam_alloc_chan(struct dma_chan *chan) |
539 | { |
540 | struct bam_chan *bchan = to_bam_chan(common: chan); |
541 | struct bam_device *bdev = bchan->bdev; |
542 | |
543 | if (bchan->fifo_virt) |
544 | return 0; |
545 | |
546 | /* allocate FIFO descriptor space, but only if necessary */ |
547 | bchan->fifo_virt = dma_alloc_wc(dev: bdev->dev, BAM_DESC_FIFO_SIZE, |
548 | dma_addr: &bchan->fifo_phys, GFP_KERNEL); |
549 | |
550 | if (!bchan->fifo_virt) { |
551 | dev_err(bdev->dev, "Failed to allocate desc fifo\n" ); |
552 | return -ENOMEM; |
553 | } |
554 | |
555 | if (bdev->active_channels++ == 0 && bdev->powered_remotely) |
556 | bam_reset(bdev); |
557 | |
558 | return 0; |
559 | } |
560 | |
561 | /** |
562 | * bam_free_chan - Frees dma resources associated with specific channel |
563 | * @chan: specified channel |
564 | * |
565 | * Free the allocated fifo descriptor memory and channel resources |
566 | * |
567 | */ |
568 | static void bam_free_chan(struct dma_chan *chan) |
569 | { |
570 | struct bam_chan *bchan = to_bam_chan(common: chan); |
571 | struct bam_device *bdev = bchan->bdev; |
572 | u32 val; |
573 | unsigned long flags; |
574 | int ret; |
575 | |
576 | ret = pm_runtime_get_sync(dev: bdev->dev); |
577 | if (ret < 0) |
578 | return; |
579 | |
580 | vchan_free_chan_resources(vc: to_virt_chan(chan)); |
581 | |
582 | if (!list_empty(head: &bchan->desc_list)) { |
583 | dev_err(bchan->bdev->dev, "Cannot free busy channel\n" ); |
584 | goto err; |
585 | } |
586 | |
587 | spin_lock_irqsave(&bchan->vc.lock, flags); |
588 | bam_reset_channel(bchan); |
589 | spin_unlock_irqrestore(lock: &bchan->vc.lock, flags); |
590 | |
591 | dma_free_wc(dev: bdev->dev, BAM_DESC_FIFO_SIZE, cpu_addr: bchan->fifo_virt, |
592 | dma_addr: bchan->fifo_phys); |
593 | bchan->fifo_virt = NULL; |
594 | |
595 | /* mask irq for pipe/channel */ |
596 | val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); |
597 | val &= ~BIT(bchan->id); |
598 | writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); |
599 | |
600 | /* disable irq */ |
601 | writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN)); |
602 | |
603 | if (--bdev->active_channels == 0 && bdev->powered_remotely) { |
604 | /* s/w reset bam */ |
605 | val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL)); |
606 | val |= BAM_SW_RST; |
607 | writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL)); |
608 | } |
609 | |
610 | err: |
611 | pm_runtime_mark_last_busy(dev: bdev->dev); |
612 | pm_runtime_put_autosuspend(dev: bdev->dev); |
613 | } |
614 | |
615 | /** |
616 | * bam_slave_config - set slave configuration for channel |
617 | * @chan: dma channel |
618 | * @cfg: slave configuration |
619 | * |
620 | * Sets slave configuration for channel |
621 | * |
622 | */ |
623 | static int bam_slave_config(struct dma_chan *chan, |
624 | struct dma_slave_config *cfg) |
625 | { |
626 | struct bam_chan *bchan = to_bam_chan(common: chan); |
627 | unsigned long flag; |
628 | |
629 | spin_lock_irqsave(&bchan->vc.lock, flag); |
630 | memcpy(&bchan->slave, cfg, sizeof(*cfg)); |
631 | bchan->reconfigure = 1; |
632 | spin_unlock_irqrestore(lock: &bchan->vc.lock, flags: flag); |
633 | |
634 | return 0; |
635 | } |
636 | |
637 | /** |
638 | * bam_prep_slave_sg - Prep slave sg transaction |
639 | * |
640 | * @chan: dma channel |
641 | * @sgl: scatter gather list |
642 | * @sg_len: length of sg |
643 | * @direction: DMA transfer direction |
644 | * @flags: DMA flags |
645 | * @context: transfer context (unused) |
646 | */ |
647 | static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan, |
648 | struct scatterlist *sgl, unsigned int sg_len, |
649 | enum dma_transfer_direction direction, unsigned long flags, |
650 | void *context) |
651 | { |
652 | struct bam_chan *bchan = to_bam_chan(common: chan); |
653 | struct bam_device *bdev = bchan->bdev; |
654 | struct bam_async_desc *async_desc; |
655 | struct scatterlist *sg; |
656 | u32 i; |
657 | struct bam_desc_hw *desc; |
658 | unsigned int num_alloc = 0; |
659 | |
660 | |
661 | if (!is_slave_direction(direction)) { |
662 | dev_err(bdev->dev, "invalid dma direction\n" ); |
663 | return NULL; |
664 | } |
665 | |
666 | /* calculate number of required entries */ |
667 | for_each_sg(sgl, sg, sg_len, i) |
668 | num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_FIFO_SIZE); |
669 | |
670 | /* allocate enough room to accomodate the number of entries */ |
671 | async_desc = kzalloc(struct_size(async_desc, desc, num_alloc), |
672 | GFP_NOWAIT); |
673 | |
674 | if (!async_desc) |
675 | return NULL; |
676 | |
677 | if (flags & DMA_PREP_FENCE) |
678 | async_desc->flags |= DESC_FLAG_NWD; |
679 | |
680 | if (flags & DMA_PREP_INTERRUPT) |
681 | async_desc->flags |= DESC_FLAG_EOT; |
682 | |
683 | async_desc->num_desc = num_alloc; |
684 | async_desc->curr_desc = async_desc->desc; |
685 | async_desc->dir = direction; |
686 | |
687 | /* fill in temporary descriptors */ |
688 | desc = async_desc->desc; |
689 | for_each_sg(sgl, sg, sg_len, i) { |
690 | unsigned int remainder = sg_dma_len(sg); |
691 | unsigned int curr_offset = 0; |
692 | |
693 | do { |
694 | if (flags & DMA_PREP_CMD) |
695 | desc->flags |= cpu_to_le16(DESC_FLAG_CMD); |
696 | |
697 | desc->addr = cpu_to_le32(sg_dma_address(sg) + |
698 | curr_offset); |
699 | |
700 | if (remainder > BAM_FIFO_SIZE) { |
701 | desc->size = cpu_to_le16(BAM_FIFO_SIZE); |
702 | remainder -= BAM_FIFO_SIZE; |
703 | curr_offset += BAM_FIFO_SIZE; |
704 | } else { |
705 | desc->size = cpu_to_le16(remainder); |
706 | remainder = 0; |
707 | } |
708 | |
709 | async_desc->length += le16_to_cpu(desc->size); |
710 | desc++; |
711 | } while (remainder > 0); |
712 | } |
713 | |
714 | return vchan_tx_prep(vc: &bchan->vc, vd: &async_desc->vd, tx_flags: flags); |
715 | } |
716 | |
717 | /** |
718 | * bam_dma_terminate_all - terminate all transactions on a channel |
719 | * @chan: bam dma channel |
720 | * |
721 | * Dequeues and frees all transactions |
722 | * No callbacks are done |
723 | * |
724 | */ |
725 | static int bam_dma_terminate_all(struct dma_chan *chan) |
726 | { |
727 | struct bam_chan *bchan = to_bam_chan(common: chan); |
728 | struct bam_async_desc *async_desc, *tmp; |
729 | unsigned long flag; |
730 | LIST_HEAD(head); |
731 | |
732 | /* remove all transactions, including active transaction */ |
733 | spin_lock_irqsave(&bchan->vc.lock, flag); |
734 | /* |
735 | * If we have transactions queued, then some might be committed to the |
736 | * hardware in the desc fifo. The only way to reset the desc fifo is |
737 | * to do a hardware reset (either by pipe or the entire block). |
738 | * bam_chan_init_hw() will trigger a pipe reset, and also reinit the |
739 | * pipe. If the pipe is left disabled (default state after pipe reset) |
740 | * and is accessed by a connected hardware engine, a fatal error in |
741 | * the BAM will occur. There is a small window where this could happen |
742 | * with bam_chan_init_hw(), but it is assumed that the caller has |
743 | * stopped activity on any attached hardware engine. Make sure to do |
744 | * this first so that the BAM hardware doesn't cause memory corruption |
745 | * by accessing freed resources. |
746 | */ |
747 | if (!list_empty(head: &bchan->desc_list)) { |
748 | async_desc = list_first_entry(&bchan->desc_list, |
749 | struct bam_async_desc, desc_node); |
750 | bam_chan_init_hw(bchan, dir: async_desc->dir); |
751 | } |
752 | |
753 | list_for_each_entry_safe(async_desc, tmp, |
754 | &bchan->desc_list, desc_node) { |
755 | list_add(new: &async_desc->vd.node, head: &bchan->vc.desc_issued); |
756 | list_del(entry: &async_desc->desc_node); |
757 | } |
758 | |
759 | vchan_get_all_descriptors(vc: &bchan->vc, head: &head); |
760 | spin_unlock_irqrestore(lock: &bchan->vc.lock, flags: flag); |
761 | |
762 | vchan_dma_desc_free_list(vc: &bchan->vc, head: &head); |
763 | |
764 | return 0; |
765 | } |
766 | |
767 | /** |
768 | * bam_pause - Pause DMA channel |
769 | * @chan: dma channel |
770 | * |
771 | */ |
772 | static int bam_pause(struct dma_chan *chan) |
773 | { |
774 | struct bam_chan *bchan = to_bam_chan(common: chan); |
775 | struct bam_device *bdev = bchan->bdev; |
776 | unsigned long flag; |
777 | int ret; |
778 | |
779 | ret = pm_runtime_get_sync(dev: bdev->dev); |
780 | if (ret < 0) |
781 | return ret; |
782 | |
783 | spin_lock_irqsave(&bchan->vc.lock, flag); |
784 | writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT)); |
785 | bchan->paused = 1; |
786 | spin_unlock_irqrestore(lock: &bchan->vc.lock, flags: flag); |
787 | pm_runtime_mark_last_busy(dev: bdev->dev); |
788 | pm_runtime_put_autosuspend(dev: bdev->dev); |
789 | |
790 | return 0; |
791 | } |
792 | |
793 | /** |
794 | * bam_resume - Resume DMA channel operations |
795 | * @chan: dma channel |
796 | * |
797 | */ |
798 | static int bam_resume(struct dma_chan *chan) |
799 | { |
800 | struct bam_chan *bchan = to_bam_chan(common: chan); |
801 | struct bam_device *bdev = bchan->bdev; |
802 | unsigned long flag; |
803 | int ret; |
804 | |
805 | ret = pm_runtime_get_sync(dev: bdev->dev); |
806 | if (ret < 0) |
807 | return ret; |
808 | |
809 | spin_lock_irqsave(&bchan->vc.lock, flag); |
810 | writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT)); |
811 | bchan->paused = 0; |
812 | spin_unlock_irqrestore(lock: &bchan->vc.lock, flags: flag); |
813 | pm_runtime_mark_last_busy(dev: bdev->dev); |
814 | pm_runtime_put_autosuspend(dev: bdev->dev); |
815 | |
816 | return 0; |
817 | } |
818 | |
819 | /** |
820 | * process_channel_irqs - processes the channel interrupts |
821 | * @bdev: bam controller |
822 | * |
823 | * This function processes the channel interrupts |
824 | * |
825 | */ |
826 | static u32 process_channel_irqs(struct bam_device *bdev) |
827 | { |
828 | u32 i, srcs, pipe_stts, offset, avail; |
829 | unsigned long flags; |
830 | struct bam_async_desc *async_desc, *tmp; |
831 | |
832 | srcs = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_EE)); |
833 | |
834 | /* return early if no pipe/channel interrupts are present */ |
835 | if (!(srcs & P_IRQ)) |
836 | return srcs; |
837 | |
838 | for (i = 0; i < bdev->num_channels; i++) { |
839 | struct bam_chan *bchan = &bdev->channels[i]; |
840 | |
841 | if (!(srcs & BIT(i))) |
842 | continue; |
843 | |
844 | /* clear pipe irq */ |
845 | pipe_stts = readl_relaxed(bam_addr(bdev, i, BAM_P_IRQ_STTS)); |
846 | |
847 | writel_relaxed(pipe_stts, bam_addr(bdev, i, BAM_P_IRQ_CLR)); |
848 | |
849 | spin_lock_irqsave(&bchan->vc.lock, flags); |
850 | |
851 | offset = readl_relaxed(bam_addr(bdev, i, BAM_P_SW_OFSTS)) & |
852 | P_SW_OFSTS_MASK; |
853 | offset /= sizeof(struct bam_desc_hw); |
854 | |
855 | /* Number of bytes available to read */ |
856 | avail = CIRC_CNT(offset, bchan->head, MAX_DESCRIPTORS + 1); |
857 | |
858 | if (offset < bchan->head) |
859 | avail--; |
860 | |
861 | list_for_each_entry_safe(async_desc, tmp, |
862 | &bchan->desc_list, desc_node) { |
863 | /* Not enough data to read */ |
864 | if (avail < async_desc->xfer_len) |
865 | break; |
866 | |
867 | /* manage FIFO */ |
868 | bchan->head += async_desc->xfer_len; |
869 | bchan->head %= MAX_DESCRIPTORS; |
870 | |
871 | async_desc->num_desc -= async_desc->xfer_len; |
872 | async_desc->curr_desc += async_desc->xfer_len; |
873 | avail -= async_desc->xfer_len; |
874 | |
875 | /* |
876 | * if complete, process cookie. Otherwise |
877 | * push back to front of desc_issued so that |
878 | * it gets restarted by the tasklet |
879 | */ |
880 | if (!async_desc->num_desc) { |
881 | vchan_cookie_complete(vd: &async_desc->vd); |
882 | } else { |
883 | list_add(new: &async_desc->vd.node, |
884 | head: &bchan->vc.desc_issued); |
885 | } |
886 | list_del(entry: &async_desc->desc_node); |
887 | } |
888 | |
889 | spin_unlock_irqrestore(lock: &bchan->vc.lock, flags); |
890 | } |
891 | |
892 | return srcs; |
893 | } |
894 | |
895 | /** |
896 | * bam_dma_irq - irq handler for bam controller |
897 | * @irq: IRQ of interrupt |
898 | * @data: callback data |
899 | * |
900 | * IRQ handler for the bam controller |
901 | */ |
902 | static irqreturn_t bam_dma_irq(int irq, void *data) |
903 | { |
904 | struct bam_device *bdev = data; |
905 | u32 clr_mask = 0, srcs = 0; |
906 | int ret; |
907 | |
908 | srcs |= process_channel_irqs(bdev); |
909 | |
910 | /* kick off tasklet to start next dma transfer */ |
911 | if (srcs & P_IRQ) |
912 | tasklet_schedule(t: &bdev->task); |
913 | |
914 | ret = pm_runtime_get_sync(dev: bdev->dev); |
915 | if (ret < 0) |
916 | return IRQ_NONE; |
917 | |
918 | if (srcs & BAM_IRQ) { |
919 | clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS)); |
920 | |
921 | /* |
922 | * don't allow reorder of the various accesses to the BAM |
923 | * registers |
924 | */ |
925 | mb(); |
926 | |
927 | writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR)); |
928 | } |
929 | |
930 | pm_runtime_mark_last_busy(dev: bdev->dev); |
931 | pm_runtime_put_autosuspend(dev: bdev->dev); |
932 | |
933 | return IRQ_HANDLED; |
934 | } |
935 | |
936 | /** |
937 | * bam_tx_status - returns status of transaction |
938 | * @chan: dma channel |
939 | * @cookie: transaction cookie |
940 | * @txstate: DMA transaction state |
941 | * |
942 | * Return status of dma transaction |
943 | */ |
944 | static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie, |
945 | struct dma_tx_state *txstate) |
946 | { |
947 | struct bam_chan *bchan = to_bam_chan(common: chan); |
948 | struct bam_async_desc *async_desc; |
949 | struct virt_dma_desc *vd; |
950 | int ret; |
951 | size_t residue = 0; |
952 | unsigned int i; |
953 | unsigned long flags; |
954 | |
955 | ret = dma_cookie_status(chan, cookie, state: txstate); |
956 | if (ret == DMA_COMPLETE) |
957 | return ret; |
958 | |
959 | if (!txstate) |
960 | return bchan->paused ? DMA_PAUSED : ret; |
961 | |
962 | spin_lock_irqsave(&bchan->vc.lock, flags); |
963 | vd = vchan_find_desc(&bchan->vc, cookie); |
964 | if (vd) { |
965 | residue = container_of(vd, struct bam_async_desc, vd)->length; |
966 | } else { |
967 | list_for_each_entry(async_desc, &bchan->desc_list, desc_node) { |
968 | if (async_desc->vd.tx.cookie != cookie) |
969 | continue; |
970 | |
971 | for (i = 0; i < async_desc->num_desc; i++) |
972 | residue += le16_to_cpu( |
973 | async_desc->curr_desc[i].size); |
974 | } |
975 | } |
976 | |
977 | spin_unlock_irqrestore(lock: &bchan->vc.lock, flags); |
978 | |
979 | dma_set_residue(state: txstate, residue); |
980 | |
981 | if (ret == DMA_IN_PROGRESS && bchan->paused) |
982 | ret = DMA_PAUSED; |
983 | |
984 | return ret; |
985 | } |
986 | |
987 | /** |
988 | * bam_apply_new_config |
989 | * @bchan: bam dma channel |
990 | * @dir: DMA direction |
991 | */ |
992 | static void bam_apply_new_config(struct bam_chan *bchan, |
993 | enum dma_transfer_direction dir) |
994 | { |
995 | struct bam_device *bdev = bchan->bdev; |
996 | u32 maxburst; |
997 | |
998 | if (!bdev->controlled_remotely) { |
999 | if (dir == DMA_DEV_TO_MEM) |
1000 | maxburst = bchan->slave.src_maxburst; |
1001 | else |
1002 | maxburst = bchan->slave.dst_maxburst; |
1003 | |
1004 | writel_relaxed(maxburst, |
1005 | bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD)); |
1006 | } |
1007 | |
1008 | bchan->reconfigure = 0; |
1009 | } |
1010 | |
1011 | /** |
1012 | * bam_start_dma - start next transaction |
1013 | * @bchan: bam dma channel |
1014 | */ |
1015 | static void bam_start_dma(struct bam_chan *bchan) |
1016 | { |
1017 | struct virt_dma_desc *vd = vchan_next_desc(vc: &bchan->vc); |
1018 | struct bam_device *bdev = bchan->bdev; |
1019 | struct bam_async_desc *async_desc = NULL; |
1020 | struct bam_desc_hw *desc; |
1021 | struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt, |
1022 | sizeof(struct bam_desc_hw)); |
1023 | int ret; |
1024 | unsigned int avail; |
1025 | struct dmaengine_desc_callback cb; |
1026 | |
1027 | lockdep_assert_held(&bchan->vc.lock); |
1028 | |
1029 | if (!vd) |
1030 | return; |
1031 | |
1032 | ret = pm_runtime_get_sync(dev: bdev->dev); |
1033 | if (ret < 0) |
1034 | return; |
1035 | |
1036 | while (vd && !IS_BUSY(bchan)) { |
1037 | list_del(entry: &vd->node); |
1038 | |
1039 | async_desc = container_of(vd, struct bam_async_desc, vd); |
1040 | |
1041 | /* on first use, initialize the channel hardware */ |
1042 | if (!bchan->initialized) |
1043 | bam_chan_init_hw(bchan, dir: async_desc->dir); |
1044 | |
1045 | /* apply new slave config changes, if necessary */ |
1046 | if (bchan->reconfigure) |
1047 | bam_apply_new_config(bchan, dir: async_desc->dir); |
1048 | |
1049 | desc = async_desc->curr_desc; |
1050 | avail = CIRC_SPACE(bchan->tail, bchan->head, |
1051 | MAX_DESCRIPTORS + 1); |
1052 | |
1053 | if (async_desc->num_desc > avail) |
1054 | async_desc->xfer_len = avail; |
1055 | else |
1056 | async_desc->xfer_len = async_desc->num_desc; |
1057 | |
1058 | /* set any special flags on the last descriptor */ |
1059 | if (async_desc->num_desc == async_desc->xfer_len) |
1060 | desc[async_desc->xfer_len - 1].flags |= |
1061 | cpu_to_le16(async_desc->flags); |
1062 | |
1063 | vd = vchan_next_desc(vc: &bchan->vc); |
1064 | |
1065 | dmaengine_desc_get_callback(tx: &async_desc->vd.tx, cb: &cb); |
1066 | |
1067 | /* |
1068 | * An interrupt is generated at this desc, if |
1069 | * - FIFO is FULL. |
1070 | * - No more descriptors to add. |
1071 | * - If a callback completion was requested for this DESC, |
1072 | * In this case, BAM will deliver the completion callback |
1073 | * for this desc and continue processing the next desc. |
1074 | */ |
1075 | if (((avail <= async_desc->xfer_len) || !vd || |
1076 | dmaengine_desc_callback_valid(cb: &cb)) && |
1077 | !(async_desc->flags & DESC_FLAG_EOT)) |
1078 | desc[async_desc->xfer_len - 1].flags |= |
1079 | cpu_to_le16(DESC_FLAG_INT); |
1080 | |
1081 | if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) { |
1082 | u32 partial = MAX_DESCRIPTORS - bchan->tail; |
1083 | |
1084 | memcpy(&fifo[bchan->tail], desc, |
1085 | partial * sizeof(struct bam_desc_hw)); |
1086 | memcpy(fifo, &desc[partial], |
1087 | (async_desc->xfer_len - partial) * |
1088 | sizeof(struct bam_desc_hw)); |
1089 | } else { |
1090 | memcpy(&fifo[bchan->tail], desc, |
1091 | async_desc->xfer_len * |
1092 | sizeof(struct bam_desc_hw)); |
1093 | } |
1094 | |
1095 | bchan->tail += async_desc->xfer_len; |
1096 | bchan->tail %= MAX_DESCRIPTORS; |
1097 | list_add_tail(new: &async_desc->desc_node, head: &bchan->desc_list); |
1098 | } |
1099 | |
1100 | /* ensure descriptor writes and dma start not reordered */ |
1101 | wmb(); |
1102 | writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw), |
1103 | bam_addr(bdev, bchan->id, BAM_P_EVNT_REG)); |
1104 | |
1105 | pm_runtime_mark_last_busy(dev: bdev->dev); |
1106 | pm_runtime_put_autosuspend(dev: bdev->dev); |
1107 | } |
1108 | |
1109 | /** |
1110 | * dma_tasklet - DMA IRQ tasklet |
1111 | * @t: tasklet argument (bam controller structure) |
1112 | * |
1113 | * Sets up next DMA operation and then processes all completed transactions |
1114 | */ |
1115 | static void dma_tasklet(struct tasklet_struct *t) |
1116 | { |
1117 | struct bam_device *bdev = from_tasklet(bdev, t, task); |
1118 | struct bam_chan *bchan; |
1119 | unsigned long flags; |
1120 | unsigned int i; |
1121 | |
1122 | /* go through the channels and kick off transactions */ |
1123 | for (i = 0; i < bdev->num_channels; i++) { |
1124 | bchan = &bdev->channels[i]; |
1125 | spin_lock_irqsave(&bchan->vc.lock, flags); |
1126 | |
1127 | if (!list_empty(head: &bchan->vc.desc_issued) && !IS_BUSY(bchan)) |
1128 | bam_start_dma(bchan); |
1129 | spin_unlock_irqrestore(lock: &bchan->vc.lock, flags); |
1130 | } |
1131 | |
1132 | } |
1133 | |
1134 | /** |
1135 | * bam_issue_pending - starts pending transactions |
1136 | * @chan: dma channel |
1137 | * |
1138 | * Calls tasklet directly which in turn starts any pending transactions |
1139 | */ |
1140 | static void bam_issue_pending(struct dma_chan *chan) |
1141 | { |
1142 | struct bam_chan *bchan = to_bam_chan(common: chan); |
1143 | unsigned long flags; |
1144 | |
1145 | spin_lock_irqsave(&bchan->vc.lock, flags); |
1146 | |
1147 | /* if work pending and idle, start a transaction */ |
1148 | if (vchan_issue_pending(vc: &bchan->vc) && !IS_BUSY(bchan)) |
1149 | bam_start_dma(bchan); |
1150 | |
1151 | spin_unlock_irqrestore(lock: &bchan->vc.lock, flags); |
1152 | } |
1153 | |
1154 | /** |
1155 | * bam_dma_free_desc - free descriptor memory |
1156 | * @vd: virtual descriptor |
1157 | * |
1158 | */ |
1159 | static void bam_dma_free_desc(struct virt_dma_desc *vd) |
1160 | { |
1161 | struct bam_async_desc *async_desc = container_of(vd, |
1162 | struct bam_async_desc, vd); |
1163 | |
1164 | kfree(objp: async_desc); |
1165 | } |
1166 | |
1167 | static struct dma_chan *bam_dma_xlate(struct of_phandle_args *dma_spec, |
1168 | struct of_dma *of) |
1169 | { |
1170 | struct bam_device *bdev = container_of(of->of_dma_data, |
1171 | struct bam_device, common); |
1172 | unsigned int request; |
1173 | |
1174 | if (dma_spec->args_count != 1) |
1175 | return NULL; |
1176 | |
1177 | request = dma_spec->args[0]; |
1178 | if (request >= bdev->num_channels) |
1179 | return NULL; |
1180 | |
1181 | return dma_get_slave_channel(chan: &(bdev->channels[request].vc.chan)); |
1182 | } |
1183 | |
1184 | /** |
1185 | * bam_init |
1186 | * @bdev: bam device |
1187 | * |
1188 | * Initialization helper for global bam registers |
1189 | */ |
1190 | static int bam_init(struct bam_device *bdev) |
1191 | { |
1192 | u32 val; |
1193 | |
1194 | /* read revision and configuration information */ |
1195 | if (!bdev->num_ees) { |
1196 | val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION)); |
1197 | bdev->num_ees = (val >> NUM_EES_SHIFT) & NUM_EES_MASK; |
1198 | } |
1199 | |
1200 | /* check that configured EE is within range */ |
1201 | if (bdev->ee >= bdev->num_ees) |
1202 | return -EINVAL; |
1203 | |
1204 | if (!bdev->num_channels) { |
1205 | val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES)); |
1206 | bdev->num_channels = val & BAM_NUM_PIPES_MASK; |
1207 | } |
1208 | |
1209 | /* Reset BAM now if fully controlled locally */ |
1210 | if (!bdev->controlled_remotely && !bdev->powered_remotely) |
1211 | bam_reset(bdev); |
1212 | |
1213 | return 0; |
1214 | } |
1215 | |
1216 | static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan, |
1217 | u32 index) |
1218 | { |
1219 | bchan->id = index; |
1220 | bchan->bdev = bdev; |
1221 | |
1222 | vchan_init(vc: &bchan->vc, dmadev: &bdev->common); |
1223 | bchan->vc.desc_free = bam_dma_free_desc; |
1224 | INIT_LIST_HEAD(list: &bchan->desc_list); |
1225 | } |
1226 | |
1227 | static const struct of_device_id bam_of_match[] = { |
1228 | { .compatible = "qcom,bam-v1.3.0" , .data = &bam_v1_3_reg_info }, |
1229 | { .compatible = "qcom,bam-v1.4.0" , .data = &bam_v1_4_reg_info }, |
1230 | { .compatible = "qcom,bam-v1.7.0" , .data = &bam_v1_7_reg_info }, |
1231 | {} |
1232 | }; |
1233 | |
1234 | MODULE_DEVICE_TABLE(of, bam_of_match); |
1235 | |
1236 | static int bam_dma_probe(struct platform_device *pdev) |
1237 | { |
1238 | struct bam_device *bdev; |
1239 | const struct of_device_id *match; |
1240 | int ret, i; |
1241 | |
1242 | bdev = devm_kzalloc(dev: &pdev->dev, size: sizeof(*bdev), GFP_KERNEL); |
1243 | if (!bdev) |
1244 | return -ENOMEM; |
1245 | |
1246 | bdev->dev = &pdev->dev; |
1247 | |
1248 | match = of_match_node(matches: bam_of_match, node: pdev->dev.of_node); |
1249 | if (!match) { |
1250 | dev_err(&pdev->dev, "Unsupported BAM module\n" ); |
1251 | return -ENODEV; |
1252 | } |
1253 | |
1254 | bdev->layout = match->data; |
1255 | |
1256 | bdev->regs = devm_platform_ioremap_resource(pdev, index: 0); |
1257 | if (IS_ERR(ptr: bdev->regs)) |
1258 | return PTR_ERR(ptr: bdev->regs); |
1259 | |
1260 | bdev->irq = platform_get_irq(pdev, 0); |
1261 | if (bdev->irq < 0) |
1262 | return bdev->irq; |
1263 | |
1264 | ret = of_property_read_u32(np: pdev->dev.of_node, propname: "qcom,ee" , out_value: &bdev->ee); |
1265 | if (ret) { |
1266 | dev_err(bdev->dev, "Execution environment unspecified\n" ); |
1267 | return ret; |
1268 | } |
1269 | |
1270 | bdev->controlled_remotely = of_property_read_bool(np: pdev->dev.of_node, |
1271 | propname: "qcom,controlled-remotely" ); |
1272 | bdev->powered_remotely = of_property_read_bool(np: pdev->dev.of_node, |
1273 | propname: "qcom,powered-remotely" ); |
1274 | |
1275 | if (bdev->controlled_remotely || bdev->powered_remotely) |
1276 | bdev->bamclk = devm_clk_get_optional(dev: bdev->dev, id: "bam_clk" ); |
1277 | else |
1278 | bdev->bamclk = devm_clk_get(dev: bdev->dev, id: "bam_clk" ); |
1279 | |
1280 | if (IS_ERR(ptr: bdev->bamclk)) |
1281 | return PTR_ERR(ptr: bdev->bamclk); |
1282 | |
1283 | if (!bdev->bamclk) { |
1284 | ret = of_property_read_u32(np: pdev->dev.of_node, propname: "num-channels" , |
1285 | out_value: &bdev->num_channels); |
1286 | if (ret) |
1287 | dev_err(bdev->dev, "num-channels unspecified in dt\n" ); |
1288 | |
1289 | ret = of_property_read_u32(np: pdev->dev.of_node, propname: "qcom,num-ees" , |
1290 | out_value: &bdev->num_ees); |
1291 | if (ret) |
1292 | dev_err(bdev->dev, "num-ees unspecified in dt\n" ); |
1293 | } |
1294 | |
1295 | ret = clk_prepare_enable(clk: bdev->bamclk); |
1296 | if (ret) { |
1297 | dev_err(bdev->dev, "failed to prepare/enable clock\n" ); |
1298 | return ret; |
1299 | } |
1300 | |
1301 | ret = bam_init(bdev); |
1302 | if (ret) |
1303 | goto err_disable_clk; |
1304 | |
1305 | tasklet_setup(t: &bdev->task, callback: dma_tasklet); |
1306 | |
1307 | bdev->channels = devm_kcalloc(dev: bdev->dev, n: bdev->num_channels, |
1308 | size: sizeof(*bdev->channels), GFP_KERNEL); |
1309 | |
1310 | if (!bdev->channels) { |
1311 | ret = -ENOMEM; |
1312 | goto err_tasklet_kill; |
1313 | } |
1314 | |
1315 | /* allocate and initialize channels */ |
1316 | INIT_LIST_HEAD(list: &bdev->common.channels); |
1317 | |
1318 | for (i = 0; i < bdev->num_channels; i++) |
1319 | bam_channel_init(bdev, bchan: &bdev->channels[i], index: i); |
1320 | |
1321 | ret = devm_request_irq(dev: bdev->dev, irq: bdev->irq, handler: bam_dma_irq, |
1322 | IRQF_TRIGGER_HIGH, devname: "bam_dma" , dev_id: bdev); |
1323 | if (ret) |
1324 | goto err_bam_channel_exit; |
1325 | |
1326 | /* set max dma segment size */ |
1327 | bdev->common.dev = bdev->dev; |
1328 | ret = dma_set_max_seg_size(dev: bdev->common.dev, BAM_FIFO_SIZE); |
1329 | if (ret) { |
1330 | dev_err(bdev->dev, "cannot set maximum segment size\n" ); |
1331 | goto err_bam_channel_exit; |
1332 | } |
1333 | |
1334 | platform_set_drvdata(pdev, data: bdev); |
1335 | |
1336 | /* set capabilities */ |
1337 | dma_cap_zero(bdev->common.cap_mask); |
1338 | dma_cap_set(DMA_SLAVE, bdev->common.cap_mask); |
1339 | |
1340 | /* initialize dmaengine apis */ |
1341 | bdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); |
1342 | bdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; |
1343 | bdev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES; |
1344 | bdev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES; |
1345 | bdev->common.device_alloc_chan_resources = bam_alloc_chan; |
1346 | bdev->common.device_free_chan_resources = bam_free_chan; |
1347 | bdev->common.device_prep_slave_sg = bam_prep_slave_sg; |
1348 | bdev->common.device_config = bam_slave_config; |
1349 | bdev->common.device_pause = bam_pause; |
1350 | bdev->common.device_resume = bam_resume; |
1351 | bdev->common.device_terminate_all = bam_dma_terminate_all; |
1352 | bdev->common.device_issue_pending = bam_issue_pending; |
1353 | bdev->common.device_tx_status = bam_tx_status; |
1354 | bdev->common.dev = bdev->dev; |
1355 | |
1356 | ret = dma_async_device_register(device: &bdev->common); |
1357 | if (ret) { |
1358 | dev_err(bdev->dev, "failed to register dma async device\n" ); |
1359 | goto err_bam_channel_exit; |
1360 | } |
1361 | |
1362 | ret = of_dma_controller_register(np: pdev->dev.of_node, of_dma_xlate: bam_dma_xlate, |
1363 | data: &bdev->common); |
1364 | if (ret) |
1365 | goto err_unregister_dma; |
1366 | |
1367 | pm_runtime_irq_safe(dev: &pdev->dev); |
1368 | pm_runtime_set_autosuspend_delay(dev: &pdev->dev, BAM_DMA_AUTOSUSPEND_DELAY); |
1369 | pm_runtime_use_autosuspend(dev: &pdev->dev); |
1370 | pm_runtime_mark_last_busy(dev: &pdev->dev); |
1371 | pm_runtime_set_active(dev: &pdev->dev); |
1372 | pm_runtime_enable(dev: &pdev->dev); |
1373 | |
1374 | return 0; |
1375 | |
1376 | err_unregister_dma: |
1377 | dma_async_device_unregister(device: &bdev->common); |
1378 | err_bam_channel_exit: |
1379 | for (i = 0; i < bdev->num_channels; i++) |
1380 | tasklet_kill(t: &bdev->channels[i].vc.task); |
1381 | err_tasklet_kill: |
1382 | tasklet_kill(t: &bdev->task); |
1383 | err_disable_clk: |
1384 | clk_disable_unprepare(clk: bdev->bamclk); |
1385 | |
1386 | return ret; |
1387 | } |
1388 | |
1389 | static void bam_dma_remove(struct platform_device *pdev) |
1390 | { |
1391 | struct bam_device *bdev = platform_get_drvdata(pdev); |
1392 | u32 i; |
1393 | |
1394 | pm_runtime_force_suspend(dev: &pdev->dev); |
1395 | |
1396 | of_dma_controller_free(np: pdev->dev.of_node); |
1397 | dma_async_device_unregister(device: &bdev->common); |
1398 | |
1399 | /* mask all interrupts for this execution environment */ |
1400 | writel_relaxed(0, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE)); |
1401 | |
1402 | devm_free_irq(dev: bdev->dev, irq: bdev->irq, dev_id: bdev); |
1403 | |
1404 | for (i = 0; i < bdev->num_channels; i++) { |
1405 | bam_dma_terminate_all(chan: &bdev->channels[i].vc.chan); |
1406 | tasklet_kill(t: &bdev->channels[i].vc.task); |
1407 | |
1408 | if (!bdev->channels[i].fifo_virt) |
1409 | continue; |
1410 | |
1411 | dma_free_wc(dev: bdev->dev, BAM_DESC_FIFO_SIZE, |
1412 | cpu_addr: bdev->channels[i].fifo_virt, |
1413 | dma_addr: bdev->channels[i].fifo_phys); |
1414 | } |
1415 | |
1416 | tasklet_kill(t: &bdev->task); |
1417 | |
1418 | clk_disable_unprepare(clk: bdev->bamclk); |
1419 | } |
1420 | |
1421 | static int __maybe_unused bam_dma_runtime_suspend(struct device *dev) |
1422 | { |
1423 | struct bam_device *bdev = dev_get_drvdata(dev); |
1424 | |
1425 | clk_disable(clk: bdev->bamclk); |
1426 | |
1427 | return 0; |
1428 | } |
1429 | |
1430 | static int __maybe_unused bam_dma_runtime_resume(struct device *dev) |
1431 | { |
1432 | struct bam_device *bdev = dev_get_drvdata(dev); |
1433 | int ret; |
1434 | |
1435 | ret = clk_enable(clk: bdev->bamclk); |
1436 | if (ret < 0) { |
1437 | dev_err(dev, "clk_enable failed: %d\n" , ret); |
1438 | return ret; |
1439 | } |
1440 | |
1441 | return 0; |
1442 | } |
1443 | |
1444 | static int __maybe_unused bam_dma_suspend(struct device *dev) |
1445 | { |
1446 | struct bam_device *bdev = dev_get_drvdata(dev); |
1447 | |
1448 | pm_runtime_force_suspend(dev); |
1449 | clk_unprepare(clk: bdev->bamclk); |
1450 | |
1451 | return 0; |
1452 | } |
1453 | |
1454 | static int __maybe_unused bam_dma_resume(struct device *dev) |
1455 | { |
1456 | struct bam_device *bdev = dev_get_drvdata(dev); |
1457 | int ret; |
1458 | |
1459 | ret = clk_prepare(clk: bdev->bamclk); |
1460 | if (ret) |
1461 | return ret; |
1462 | |
1463 | pm_runtime_force_resume(dev); |
1464 | |
1465 | return 0; |
1466 | } |
1467 | |
1468 | static const struct dev_pm_ops bam_dma_pm_ops = { |
1469 | SET_LATE_SYSTEM_SLEEP_PM_OPS(bam_dma_suspend, bam_dma_resume) |
1470 | SET_RUNTIME_PM_OPS(bam_dma_runtime_suspend, bam_dma_runtime_resume, |
1471 | NULL) |
1472 | }; |
1473 | |
1474 | static struct platform_driver bam_dma_driver = { |
1475 | .probe = bam_dma_probe, |
1476 | .remove_new = bam_dma_remove, |
1477 | .driver = { |
1478 | .name = "bam-dma-engine" , |
1479 | .pm = &bam_dma_pm_ops, |
1480 | .of_match_table = bam_of_match, |
1481 | }, |
1482 | }; |
1483 | |
1484 | module_platform_driver(bam_dma_driver); |
1485 | |
1486 | MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>" ); |
1487 | MODULE_DESCRIPTION("QCOM BAM DMA engine driver" ); |
1488 | MODULE_LICENSE("GPL v2" ); |
1489 | |