1 | // SPDX-License-Identifier: GPL-2.0 |
2 | |
3 | /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. |
4 | * Copyright (C) 2018-2023 Linaro Ltd. |
5 | */ |
6 | |
7 | #include <linux/types.h> |
8 | #include <linux/bits.h> |
9 | #include <linux/bitfield.h> |
10 | #include <linux/mutex.h> |
11 | #include <linux/completion.h> |
12 | #include <linux/io.h> |
13 | #include <linux/bug.h> |
14 | #include <linux/interrupt.h> |
15 | #include <linux/platform_device.h> |
16 | #include <linux/netdevice.h> |
17 | |
18 | #include "gsi.h" |
19 | #include "reg.h" |
20 | #include "gsi_reg.h" |
21 | #include "gsi_private.h" |
22 | #include "gsi_trans.h" |
23 | #include "ipa_gsi.h" |
24 | #include "ipa_data.h" |
25 | #include "ipa_version.h" |
26 | |
27 | /** |
28 | * DOC: The IPA Generic Software Interface |
29 | * |
30 | * The generic software interface (GSI) is an integral component of the IPA, |
31 | * providing a well-defined communication layer between the AP subsystem |
32 | * and the IPA core. The modem uses the GSI layer as well. |
33 | * |
34 | * -------- --------- |
35 | * | | | | |
36 | * | AP +<---. .----+ Modem | |
37 | * | +--. | | .->+ | |
38 | * | | | | | | | | |
39 | * -------- | | | | --------- |
40 | * v | v | |
41 | * --+-+---+-+-- |
42 | * | GSI | |
43 | * |-----------| |
44 | * | | |
45 | * | IPA | |
46 | * | | |
47 | * ------------- |
48 | * |
49 | * In the above diagram, the AP and Modem represent "execution environments" |
50 | * (EEs), which are independent operating environments that use the IPA for |
51 | * data transfer. |
52 | * |
53 | * Each EE uses a set of unidirectional GSI "channels," which allow transfer |
54 | * of data to or from the IPA. A channel is implemented as a ring buffer, |
55 | * with a DRAM-resident array of "transfer elements" (TREs) available to |
56 | * describe transfers to or from other EEs through the IPA. A transfer |
57 | * element can also contain an immediate command, requesting the IPA perform |
58 | * actions other than data transfer. |
59 | * |
60 | * Each TRE refers to a block of data--also located in DRAM. After writing |
61 | * one or more TREs to a channel, the writer (either the IPA or an EE) writes |
62 | * a doorbell register to inform the receiving side how many elements have |
63 | * been written. |
64 | * |
65 | * Each channel has a GSI "event ring" associated with it. An event ring |
66 | * is implemented very much like a channel ring, but is always directed from |
67 | * the IPA to an EE. The IPA notifies an EE (such as the AP) about channel |
68 | * events by adding an entry to the event ring associated with the channel. |
69 | * The GSI then writes its doorbell for the event ring, causing the target |
70 | * EE to be interrupted. Each entry in an event ring contains a pointer |
71 | * to the channel TRE whose completion the event represents. |
72 | * |
73 | * Each TRE in a channel ring has a set of flags. One flag indicates whether |
74 | * the completion of the transfer operation generates an entry (and possibly |
75 | * an interrupt) in the channel's event ring. Other flags allow transfer |
76 | * elements to be chained together, forming a single logical transaction. |
77 | * TRE flags are used to control whether and when interrupts are generated |
78 | * to signal completion of channel transfers. |
79 | * |
80 | * Elements in channel and event rings are completed (or consumed) strictly |
81 | * in order. Completion of one entry implies the completion of all preceding |
82 | * entries. A single completion interrupt can therefore communicate the |
83 | * completion of many transfers. |
84 | * |
85 | * Note that all GSI registers are little-endian, which is the assumed |
86 | * endianness of I/O space accesses. The accessor functions perform byte |
87 | * swapping if needed (i.e., for a big endian CPU). |
88 | */ |
89 | |
90 | /* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */ |
91 | #define GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */ |
92 | |
93 | #define GSI_CMD_TIMEOUT 50 /* milliseconds */ |
94 | |
95 | #define GSI_CHANNEL_STOP_RETRIES 10 |
96 | #define GSI_CHANNEL_MODEM_HALT_RETRIES 10 |
97 | #define GSI_CHANNEL_MODEM_FLOW_RETRIES 5 /* disable flow control only */ |
98 | |
99 | #define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */ |
100 | #define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */ |
101 | |
102 | #define GSI_ISR_MAX_ITER 50 /* Detect interrupt storms */ |
103 | |
104 | /* An entry in an event ring */ |
105 | struct gsi_event { |
106 | __le64 xfer_ptr; |
107 | __le16 len; |
108 | u8 reserved1; |
109 | u8 code; |
110 | __le16 reserved2; |
111 | u8 type; |
112 | u8 chid; |
113 | }; |
114 | |
115 | /** gsi_channel_scratch_gpi - GPI protocol scratch register |
116 | * @max_outstanding_tre: |
117 | * Defines the maximum number of TREs allowed in a single transaction |
118 | * on a channel (in bytes). This determines the amount of prefetch |
119 | * performed by the hardware. We configure this to equal the size of |
120 | * the TLV FIFO for the channel. |
121 | * @outstanding_threshold: |
122 | * Defines the threshold (in bytes) determining when the sequencer |
123 | * should update the channel doorbell. We configure this to equal |
124 | * the size of two TREs. |
125 | */ |
126 | struct gsi_channel_scratch_gpi { |
127 | u64 reserved1; |
128 | u16 reserved2; |
129 | u16 max_outstanding_tre; |
130 | u16 reserved3; |
131 | u16 outstanding_threshold; |
132 | }; |
133 | |
134 | /** gsi_channel_scratch - channel scratch configuration area |
135 | * |
136 | * The exact interpretation of this register is protocol-specific. |
137 | * We only use GPI channels; see struct gsi_channel_scratch_gpi, above. |
138 | */ |
139 | union gsi_channel_scratch { |
140 | struct gsi_channel_scratch_gpi gpi; |
141 | struct { |
142 | u32 word1; |
143 | u32 word2; |
144 | u32 word3; |
145 | u32 word4; |
146 | } data; |
147 | }; |
148 | |
149 | /* Check things that can be validated at build time. */ |
150 | static void gsi_validate_build(void) |
151 | { |
152 | /* This is used as a divisor */ |
153 | BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE); |
154 | |
155 | /* Code assumes the size of channel and event ring element are |
156 | * the same (and fixed). Make sure the size of an event ring |
157 | * element is what's expected. |
158 | */ |
159 | BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE); |
160 | |
161 | /* Hardware requires a 2^n ring size. We ensure the number of |
162 | * elements in an event ring is a power of 2 elsewhere; this |
163 | * ensure the elements themselves meet the requirement. |
164 | */ |
165 | BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE)); |
166 | } |
167 | |
168 | /* Return the channel id associated with a given channel */ |
169 | static u32 gsi_channel_id(struct gsi_channel *channel) |
170 | { |
171 | return channel - &channel->gsi->channel[0]; |
172 | } |
173 | |
174 | /* An initialized channel has a non-null GSI pointer */ |
175 | static bool gsi_channel_initialized(struct gsi_channel *channel) |
176 | { |
177 | return !!channel->gsi; |
178 | } |
179 | |
180 | /* Encode the channel protocol for the CH_C_CNTXT_0 register */ |
181 | static u32 ch_c_cntxt_0_type_encode(enum ipa_version version, |
182 | const struct reg *reg, |
183 | enum gsi_channel_type type) |
184 | { |
185 | u32 val; |
186 | |
187 | val = reg_encode(reg, field_id: CHTYPE_PROTOCOL, val: type); |
188 | if (version < IPA_VERSION_4_5 || version >= IPA_VERSION_5_0) |
189 | return val; |
190 | |
191 | type >>= hweight32(reg_fmask(reg, CHTYPE_PROTOCOL)); |
192 | |
193 | return val | reg_encode(reg, field_id: CHTYPE_PROTOCOL_MSB, val: type); |
194 | } |
195 | |
196 | /* Update the GSI IRQ type register with the cached value */ |
197 | static void gsi_irq_type_update(struct gsi *gsi, u32 val) |
198 | { |
199 | const struct reg *reg = gsi_reg(gsi, reg_id: CNTXT_TYPE_IRQ_MSK); |
200 | |
201 | gsi->type_enabled_bitmap = val; |
202 | iowrite32(val, gsi->virt + reg_offset(reg)); |
203 | } |
204 | |
205 | static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id) |
206 | { |
207 | gsi_irq_type_update(gsi, val: gsi->type_enabled_bitmap | type_id); |
208 | } |
209 | |
210 | static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id) |
211 | { |
212 | gsi_irq_type_update(gsi, val: gsi->type_enabled_bitmap & ~type_id); |
213 | } |
214 | |
215 | /* Event ring commands are performed one at a time. Their completion |
216 | * is signaled by the event ring control GSI interrupt type, which is |
217 | * only enabled when we issue an event ring command. Only the event |
218 | * ring being operated on has this interrupt enabled. |
219 | */ |
220 | static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id) |
221 | { |
222 | u32 val = BIT(evt_ring_id); |
223 | const struct reg *reg; |
224 | |
225 | /* There's a small chance that a previous command completed |
226 | * after the interrupt was disabled, so make sure we have no |
227 | * pending interrupts before we enable them. |
228 | */ |
229 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_EV_CH_IRQ_CLR); |
230 | iowrite32(~0, gsi->virt + reg_offset(reg)); |
231 | |
232 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_EV_CH_IRQ_MSK); |
233 | iowrite32(val, gsi->virt + reg_offset(reg)); |
234 | gsi_irq_type_enable(gsi, type_id: GSI_EV_CTRL); |
235 | } |
236 | |
237 | /* Disable event ring control interrupts */ |
238 | static void gsi_irq_ev_ctrl_disable(struct gsi *gsi) |
239 | { |
240 | const struct reg *reg; |
241 | |
242 | gsi_irq_type_disable(gsi, type_id: GSI_EV_CTRL); |
243 | |
244 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_EV_CH_IRQ_MSK); |
245 | iowrite32(0, gsi->virt + reg_offset(reg)); |
246 | } |
247 | |
248 | /* Channel commands are performed one at a time. Their completion is |
249 | * signaled by the channel control GSI interrupt type, which is only |
250 | * enabled when we issue a channel command. Only the channel being |
251 | * operated on has this interrupt enabled. |
252 | */ |
253 | static void gsi_irq_ch_ctrl_enable(struct gsi *gsi, u32 channel_id) |
254 | { |
255 | u32 val = BIT(channel_id); |
256 | const struct reg *reg; |
257 | |
258 | /* There's a small chance that a previous command completed |
259 | * after the interrupt was disabled, so make sure we have no |
260 | * pending interrupts before we enable them. |
261 | */ |
262 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_CH_IRQ_CLR); |
263 | iowrite32(~0, gsi->virt + reg_offset(reg)); |
264 | |
265 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_CH_IRQ_MSK); |
266 | iowrite32(val, gsi->virt + reg_offset(reg)); |
267 | |
268 | gsi_irq_type_enable(gsi, type_id: GSI_CH_CTRL); |
269 | } |
270 | |
271 | /* Disable channel control interrupts */ |
272 | static void gsi_irq_ch_ctrl_disable(struct gsi *gsi) |
273 | { |
274 | const struct reg *reg; |
275 | |
276 | gsi_irq_type_disable(gsi, type_id: GSI_CH_CTRL); |
277 | |
278 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_CH_IRQ_MSK); |
279 | iowrite32(0, gsi->virt + reg_offset(reg)); |
280 | } |
281 | |
282 | static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id) |
283 | { |
284 | bool enable_ieob = !gsi->ieob_enabled_bitmap; |
285 | const struct reg *reg; |
286 | u32 val; |
287 | |
288 | gsi->ieob_enabled_bitmap |= BIT(evt_ring_id); |
289 | |
290 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_IEOB_IRQ_MSK); |
291 | val = gsi->ieob_enabled_bitmap; |
292 | iowrite32(val, gsi->virt + reg_offset(reg)); |
293 | |
294 | /* Enable the interrupt type if this is the first channel enabled */ |
295 | if (enable_ieob) |
296 | gsi_irq_type_enable(gsi, type_id: GSI_IEOB); |
297 | } |
298 | |
299 | static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask) |
300 | { |
301 | const struct reg *reg; |
302 | u32 val; |
303 | |
304 | gsi->ieob_enabled_bitmap &= ~event_mask; |
305 | |
306 | /* Disable the interrupt type if this was the last enabled channel */ |
307 | if (!gsi->ieob_enabled_bitmap) |
308 | gsi_irq_type_disable(gsi, type_id: GSI_IEOB); |
309 | |
310 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_IEOB_IRQ_MSK); |
311 | val = gsi->ieob_enabled_bitmap; |
312 | iowrite32(val, gsi->virt + reg_offset(reg)); |
313 | } |
314 | |
315 | static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id) |
316 | { |
317 | gsi_irq_ieob_disable(gsi, BIT(evt_ring_id)); |
318 | } |
319 | |
320 | /* Enable all GSI_interrupt types */ |
321 | static void gsi_irq_enable(struct gsi *gsi) |
322 | { |
323 | const struct reg *reg; |
324 | u32 val; |
325 | |
326 | /* Global interrupts include hardware error reports. Enable |
327 | * that so we can at least report the error should it occur. |
328 | */ |
329 | reg = gsi_reg(gsi, reg_id: CNTXT_GLOB_IRQ_EN); |
330 | iowrite32(ERROR_INT, gsi->virt + reg_offset(reg)); |
331 | |
332 | gsi_irq_type_update(gsi, val: gsi->type_enabled_bitmap | GSI_GLOB_EE); |
333 | |
334 | /* General GSI interrupts are reported to all EEs; if they occur |
335 | * they are unrecoverable (without reset). A breakpoint interrupt |
336 | * also exists, but we don't support that. We want to be notified |
337 | * of errors so we can report them, even if they can't be handled. |
338 | */ |
339 | reg = gsi_reg(gsi, reg_id: CNTXT_GSI_IRQ_EN); |
340 | val = BUS_ERROR; |
341 | val |= CMD_FIFO_OVRFLOW; |
342 | val |= MCS_STACK_OVRFLOW; |
343 | iowrite32(val, gsi->virt + reg_offset(reg)); |
344 | |
345 | gsi_irq_type_update(gsi, val: gsi->type_enabled_bitmap | GSI_GENERAL); |
346 | } |
347 | |
348 | /* Disable all GSI interrupt types */ |
349 | static void gsi_irq_disable(struct gsi *gsi) |
350 | { |
351 | const struct reg *reg; |
352 | |
353 | gsi_irq_type_update(gsi, val: 0); |
354 | |
355 | /* Clear the type-specific interrupt masks set by gsi_irq_enable() */ |
356 | reg = gsi_reg(gsi, reg_id: CNTXT_GSI_IRQ_EN); |
357 | iowrite32(0, gsi->virt + reg_offset(reg)); |
358 | |
359 | reg = gsi_reg(gsi, reg_id: CNTXT_GLOB_IRQ_EN); |
360 | iowrite32(0, gsi->virt + reg_offset(reg)); |
361 | } |
362 | |
363 | /* Return the virtual address associated with a ring index */ |
364 | void *gsi_ring_virt(struct gsi_ring *ring, u32 index) |
365 | { |
366 | /* Note: index *must* be used modulo the ring count here */ |
367 | return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE; |
368 | } |
369 | |
370 | /* Return the 32-bit DMA address associated with a ring index */ |
371 | static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index) |
372 | { |
373 | return lower_32_bits(ring->addr) + index * GSI_RING_ELEMENT_SIZE; |
374 | } |
375 | |
376 | /* Return the ring index of a 32-bit ring offset */ |
377 | static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset) |
378 | { |
379 | return (offset - gsi_ring_addr(ring, index: 0)) / GSI_RING_ELEMENT_SIZE; |
380 | } |
381 | |
382 | /* Issue a GSI command by writing a value to a register, then wait for |
383 | * completion to be signaled. Returns true if the command completes |
384 | * or false if it times out. |
385 | */ |
386 | static bool gsi_command(struct gsi *gsi, u32 reg, u32 val) |
387 | { |
388 | unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT); |
389 | struct completion *completion = &gsi->completion; |
390 | |
391 | reinit_completion(x: completion); |
392 | |
393 | iowrite32(val, gsi->virt + reg); |
394 | |
395 | return !!wait_for_completion_timeout(x: completion, timeout); |
396 | } |
397 | |
398 | /* Return the hardware's notion of the current state of an event ring */ |
399 | static enum gsi_evt_ring_state |
400 | gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id) |
401 | { |
402 | const struct reg *reg = gsi_reg(gsi, reg_id: EV_CH_E_CNTXT_0); |
403 | u32 val; |
404 | |
405 | val = ioread32(gsi->virt + reg_n_offset(reg, n: evt_ring_id)); |
406 | |
407 | return reg_decode(reg, field_id: EV_CHSTATE, val); |
408 | } |
409 | |
410 | /* Issue an event ring command and wait for it to complete */ |
411 | static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id, |
412 | enum gsi_evt_cmd_opcode opcode) |
413 | { |
414 | struct device *dev = gsi->dev; |
415 | const struct reg *reg; |
416 | bool timeout; |
417 | u32 val; |
418 | |
419 | /* Enable the completion interrupt for the command */ |
420 | gsi_irq_ev_ctrl_enable(gsi, evt_ring_id); |
421 | |
422 | reg = gsi_reg(gsi, reg_id: EV_CH_CMD); |
423 | val = reg_encode(reg, field_id: EV_CHID, val: evt_ring_id); |
424 | val |= reg_encode(reg, field_id: EV_OPCODE, val: opcode); |
425 | |
426 | timeout = !gsi_command(gsi, reg: reg_offset(reg), val); |
427 | |
428 | gsi_irq_ev_ctrl_disable(gsi); |
429 | |
430 | if (!timeout) |
431 | return; |
432 | |
433 | dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n" , |
434 | opcode, evt_ring_id, gsi_evt_ring_state(gsi, evt_ring_id)); |
435 | } |
436 | |
437 | /* Allocate an event ring in NOT_ALLOCATED state */ |
438 | static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id) |
439 | { |
440 | enum gsi_evt_ring_state state; |
441 | |
442 | /* Get initial event ring state */ |
443 | state = gsi_evt_ring_state(gsi, evt_ring_id); |
444 | if (state != GSI_EVT_RING_STATE_NOT_ALLOCATED) { |
445 | dev_err(gsi->dev, "event ring %u bad state %u before alloc\n" , |
446 | evt_ring_id, state); |
447 | return -EINVAL; |
448 | } |
449 | |
450 | gsi_evt_ring_command(gsi, evt_ring_id, opcode: GSI_EVT_ALLOCATE); |
451 | |
452 | /* If successful the event ring state will have changed */ |
453 | state = gsi_evt_ring_state(gsi, evt_ring_id); |
454 | if (state == GSI_EVT_RING_STATE_ALLOCATED) |
455 | return 0; |
456 | |
457 | dev_err(gsi->dev, "event ring %u bad state %u after alloc\n" , |
458 | evt_ring_id, state); |
459 | |
460 | return -EIO; |
461 | } |
462 | |
463 | /* Reset a GSI event ring in ALLOCATED or ERROR state. */ |
464 | static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id) |
465 | { |
466 | enum gsi_evt_ring_state state; |
467 | |
468 | state = gsi_evt_ring_state(gsi, evt_ring_id); |
469 | if (state != GSI_EVT_RING_STATE_ALLOCATED && |
470 | state != GSI_EVT_RING_STATE_ERROR) { |
471 | dev_err(gsi->dev, "event ring %u bad state %u before reset\n" , |
472 | evt_ring_id, state); |
473 | return; |
474 | } |
475 | |
476 | gsi_evt_ring_command(gsi, evt_ring_id, opcode: GSI_EVT_RESET); |
477 | |
478 | /* If successful the event ring state will have changed */ |
479 | state = gsi_evt_ring_state(gsi, evt_ring_id); |
480 | if (state == GSI_EVT_RING_STATE_ALLOCATED) |
481 | return; |
482 | |
483 | dev_err(gsi->dev, "event ring %u bad state %u after reset\n" , |
484 | evt_ring_id, state); |
485 | } |
486 | |
487 | /* Issue a hardware de-allocation request for an allocated event ring */ |
488 | static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id) |
489 | { |
490 | enum gsi_evt_ring_state state; |
491 | |
492 | state = gsi_evt_ring_state(gsi, evt_ring_id); |
493 | if (state != GSI_EVT_RING_STATE_ALLOCATED) { |
494 | dev_err(gsi->dev, "event ring %u state %u before dealloc\n" , |
495 | evt_ring_id, state); |
496 | return; |
497 | } |
498 | |
499 | gsi_evt_ring_command(gsi, evt_ring_id, opcode: GSI_EVT_DE_ALLOC); |
500 | |
501 | /* If successful the event ring state will have changed */ |
502 | state = gsi_evt_ring_state(gsi, evt_ring_id); |
503 | if (state == GSI_EVT_RING_STATE_NOT_ALLOCATED) |
504 | return; |
505 | |
506 | dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n" , |
507 | evt_ring_id, state); |
508 | } |
509 | |
510 | /* Fetch the current state of a channel from hardware */ |
511 | static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel) |
512 | { |
513 | const struct reg *reg = gsi_reg(gsi: channel->gsi, reg_id: CH_C_CNTXT_0); |
514 | u32 channel_id = gsi_channel_id(channel); |
515 | struct gsi *gsi = channel->gsi; |
516 | void __iomem *virt = gsi->virt; |
517 | u32 val; |
518 | |
519 | reg = gsi_reg(gsi, reg_id: CH_C_CNTXT_0); |
520 | val = ioread32(virt + reg_n_offset(reg, n: channel_id)); |
521 | |
522 | return reg_decode(reg, field_id: CHSTATE, val); |
523 | } |
524 | |
525 | /* Issue a channel command and wait for it to complete */ |
526 | static void |
527 | gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode) |
528 | { |
529 | u32 channel_id = gsi_channel_id(channel); |
530 | struct gsi *gsi = channel->gsi; |
531 | struct device *dev = gsi->dev; |
532 | const struct reg *reg; |
533 | bool timeout; |
534 | u32 val; |
535 | |
536 | /* Enable the completion interrupt for the command */ |
537 | gsi_irq_ch_ctrl_enable(gsi, channel_id); |
538 | |
539 | reg = gsi_reg(gsi, reg_id: CH_CMD); |
540 | val = reg_encode(reg, field_id: CH_CHID, val: channel_id); |
541 | val |= reg_encode(reg, field_id: CH_OPCODE, val: opcode); |
542 | |
543 | timeout = !gsi_command(gsi, reg: reg_offset(reg), val); |
544 | |
545 | gsi_irq_ch_ctrl_disable(gsi); |
546 | |
547 | if (!timeout) |
548 | return; |
549 | |
550 | dev_err(dev, "GSI command %u for channel %u timed out, state %u\n" , |
551 | opcode, channel_id, gsi_channel_state(channel)); |
552 | } |
553 | |
554 | /* Allocate GSI channel in NOT_ALLOCATED state */ |
555 | static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id) |
556 | { |
557 | struct gsi_channel *channel = &gsi->channel[channel_id]; |
558 | struct device *dev = gsi->dev; |
559 | enum gsi_channel_state state; |
560 | |
561 | /* Get initial channel state */ |
562 | state = gsi_channel_state(channel); |
563 | if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) { |
564 | dev_err(dev, "channel %u bad state %u before alloc\n" , |
565 | channel_id, state); |
566 | return -EINVAL; |
567 | } |
568 | |
569 | gsi_channel_command(channel, opcode: GSI_CH_ALLOCATE); |
570 | |
571 | /* If successful the channel state will have changed */ |
572 | state = gsi_channel_state(channel); |
573 | if (state == GSI_CHANNEL_STATE_ALLOCATED) |
574 | return 0; |
575 | |
576 | dev_err(dev, "channel %u bad state %u after alloc\n" , |
577 | channel_id, state); |
578 | |
579 | return -EIO; |
580 | } |
581 | |
582 | /* Start an ALLOCATED channel */ |
583 | static int gsi_channel_start_command(struct gsi_channel *channel) |
584 | { |
585 | struct device *dev = channel->gsi->dev; |
586 | enum gsi_channel_state state; |
587 | |
588 | state = gsi_channel_state(channel); |
589 | if (state != GSI_CHANNEL_STATE_ALLOCATED && |
590 | state != GSI_CHANNEL_STATE_STOPPED) { |
591 | dev_err(dev, "channel %u bad state %u before start\n" , |
592 | gsi_channel_id(channel), state); |
593 | return -EINVAL; |
594 | } |
595 | |
596 | gsi_channel_command(channel, opcode: GSI_CH_START); |
597 | |
598 | /* If successful the channel state will have changed */ |
599 | state = gsi_channel_state(channel); |
600 | if (state == GSI_CHANNEL_STATE_STARTED) |
601 | return 0; |
602 | |
603 | dev_err(dev, "channel %u bad state %u after start\n" , |
604 | gsi_channel_id(channel), state); |
605 | |
606 | return -EIO; |
607 | } |
608 | |
609 | /* Stop a GSI channel in STARTED state */ |
610 | static int gsi_channel_stop_command(struct gsi_channel *channel) |
611 | { |
612 | struct device *dev = channel->gsi->dev; |
613 | enum gsi_channel_state state; |
614 | |
615 | state = gsi_channel_state(channel); |
616 | |
617 | /* Channel could have entered STOPPED state since last call |
618 | * if it timed out. If so, we're done. |
619 | */ |
620 | if (state == GSI_CHANNEL_STATE_STOPPED) |
621 | return 0; |
622 | |
623 | if (state != GSI_CHANNEL_STATE_STARTED && |
624 | state != GSI_CHANNEL_STATE_STOP_IN_PROC) { |
625 | dev_err(dev, "channel %u bad state %u before stop\n" , |
626 | gsi_channel_id(channel), state); |
627 | return -EINVAL; |
628 | } |
629 | |
630 | gsi_channel_command(channel, opcode: GSI_CH_STOP); |
631 | |
632 | /* If successful the channel state will have changed */ |
633 | state = gsi_channel_state(channel); |
634 | if (state == GSI_CHANNEL_STATE_STOPPED) |
635 | return 0; |
636 | |
637 | /* We may have to try again if stop is in progress */ |
638 | if (state == GSI_CHANNEL_STATE_STOP_IN_PROC) |
639 | return -EAGAIN; |
640 | |
641 | dev_err(dev, "channel %u bad state %u after stop\n" , |
642 | gsi_channel_id(channel), state); |
643 | |
644 | return -EIO; |
645 | } |
646 | |
647 | /* Reset a GSI channel in ALLOCATED or ERROR state. */ |
648 | static void gsi_channel_reset_command(struct gsi_channel *channel) |
649 | { |
650 | struct device *dev = channel->gsi->dev; |
651 | enum gsi_channel_state state; |
652 | |
653 | /* A short delay is required before a RESET command */ |
654 | usleep_range(USEC_PER_MSEC, max: 2 * USEC_PER_MSEC); |
655 | |
656 | state = gsi_channel_state(channel); |
657 | if (state != GSI_CHANNEL_STATE_STOPPED && |
658 | state != GSI_CHANNEL_STATE_ERROR) { |
659 | /* No need to reset a channel already in ALLOCATED state */ |
660 | if (state != GSI_CHANNEL_STATE_ALLOCATED) |
661 | dev_err(dev, "channel %u bad state %u before reset\n" , |
662 | gsi_channel_id(channel), state); |
663 | return; |
664 | } |
665 | |
666 | gsi_channel_command(channel, opcode: GSI_CH_RESET); |
667 | |
668 | /* If successful the channel state will have changed */ |
669 | state = gsi_channel_state(channel); |
670 | if (state != GSI_CHANNEL_STATE_ALLOCATED) |
671 | dev_err(dev, "channel %u bad state %u after reset\n" , |
672 | gsi_channel_id(channel), state); |
673 | } |
674 | |
675 | /* Deallocate an ALLOCATED GSI channel */ |
676 | static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id) |
677 | { |
678 | struct gsi_channel *channel = &gsi->channel[channel_id]; |
679 | struct device *dev = gsi->dev; |
680 | enum gsi_channel_state state; |
681 | |
682 | state = gsi_channel_state(channel); |
683 | if (state != GSI_CHANNEL_STATE_ALLOCATED) { |
684 | dev_err(dev, "channel %u bad state %u before dealloc\n" , |
685 | channel_id, state); |
686 | return; |
687 | } |
688 | |
689 | gsi_channel_command(channel, opcode: GSI_CH_DE_ALLOC); |
690 | |
691 | /* If successful the channel state will have changed */ |
692 | state = gsi_channel_state(channel); |
693 | |
694 | if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) |
695 | dev_err(dev, "channel %u bad state %u after dealloc\n" , |
696 | channel_id, state); |
697 | } |
698 | |
699 | /* Ring an event ring doorbell, reporting the last entry processed by the AP. |
700 | * The index argument (modulo the ring count) is the first unfilled entry, so |
701 | * we supply one less than that with the doorbell. Update the event ring |
702 | * index field with the value provided. |
703 | */ |
704 | static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index) |
705 | { |
706 | const struct reg *reg = gsi_reg(gsi, reg_id: EV_CH_E_DOORBELL_0); |
707 | struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring; |
708 | u32 val; |
709 | |
710 | ring->index = index; /* Next unused entry */ |
711 | |
712 | /* Note: index *must* be used modulo the ring count here */ |
713 | val = gsi_ring_addr(ring, index: (index - 1) % ring->count); |
714 | iowrite32(val, gsi->virt + reg_n_offset(reg, n: evt_ring_id)); |
715 | } |
716 | |
717 | /* Program an event ring for use */ |
718 | static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id) |
719 | { |
720 | struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; |
721 | struct gsi_ring *ring = &evt_ring->ring; |
722 | const struct reg *reg; |
723 | u32 val; |
724 | |
725 | reg = gsi_reg(gsi, reg_id: EV_CH_E_CNTXT_0); |
726 | /* We program all event rings as GPI type/protocol */ |
727 | val = reg_encode(reg, field_id: EV_CHTYPE, val: GSI_CHANNEL_TYPE_GPI); |
728 | /* EV_EE field is 0 (GSI_EE_AP) */ |
729 | val |= reg_bit(reg, field_id: EV_INTYPE); |
730 | val |= reg_encode(reg, field_id: EV_ELEMENT_SIZE, GSI_RING_ELEMENT_SIZE); |
731 | iowrite32(val, gsi->virt + reg_n_offset(reg, n: evt_ring_id)); |
732 | |
733 | reg = gsi_reg(gsi, reg_id: EV_CH_E_CNTXT_1); |
734 | val = reg_encode(reg, field_id: R_LENGTH, val: ring->count * GSI_RING_ELEMENT_SIZE); |
735 | iowrite32(val, gsi->virt + reg_n_offset(reg, n: evt_ring_id)); |
736 | |
737 | /* The context 2 and 3 registers store the low-order and |
738 | * high-order 32 bits of the address of the event ring, |
739 | * respectively. |
740 | */ |
741 | reg = gsi_reg(gsi, reg_id: EV_CH_E_CNTXT_2); |
742 | val = lower_32_bits(ring->addr); |
743 | iowrite32(val, gsi->virt + reg_n_offset(reg, n: evt_ring_id)); |
744 | |
745 | reg = gsi_reg(gsi, reg_id: EV_CH_E_CNTXT_3); |
746 | val = upper_32_bits(ring->addr); |
747 | iowrite32(val, gsi->virt + reg_n_offset(reg, n: evt_ring_id)); |
748 | |
749 | /* Enable interrupt moderation by setting the moderation delay */ |
750 | reg = gsi_reg(gsi, reg_id: EV_CH_E_CNTXT_8); |
751 | val = reg_encode(reg, field_id: EV_MODT, GSI_EVT_RING_INT_MODT); |
752 | val |= reg_encode(reg, field_id: EV_MODC, val: 1); /* comes from channel */ |
753 | /* EV_MOD_CNT is 0 (no counter-based interrupt coalescing) */ |
754 | iowrite32(val, gsi->virt + reg_n_offset(reg, n: evt_ring_id)); |
755 | |
756 | /* No MSI write data, and MSI high and low address is 0 */ |
757 | reg = gsi_reg(gsi, reg_id: EV_CH_E_CNTXT_9); |
758 | iowrite32(0, gsi->virt + reg_n_offset(reg, n: evt_ring_id)); |
759 | |
760 | reg = gsi_reg(gsi, reg_id: EV_CH_E_CNTXT_10); |
761 | iowrite32(0, gsi->virt + reg_n_offset(reg, n: evt_ring_id)); |
762 | |
763 | reg = gsi_reg(gsi, reg_id: EV_CH_E_CNTXT_11); |
764 | iowrite32(0, gsi->virt + reg_n_offset(reg, n: evt_ring_id)); |
765 | |
766 | /* We don't need to get event read pointer updates */ |
767 | reg = gsi_reg(gsi, reg_id: EV_CH_E_CNTXT_12); |
768 | iowrite32(0, gsi->virt + reg_n_offset(reg, n: evt_ring_id)); |
769 | |
770 | reg = gsi_reg(gsi, reg_id: EV_CH_E_CNTXT_13); |
771 | iowrite32(0, gsi->virt + reg_n_offset(reg, n: evt_ring_id)); |
772 | |
773 | /* Finally, tell the hardware our "last processed" event (arbitrary) */ |
774 | gsi_evt_ring_doorbell(gsi, evt_ring_id, index: ring->index); |
775 | } |
776 | |
777 | /* Find the transaction whose completion indicates a channel is quiesced */ |
778 | static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel) |
779 | { |
780 | struct gsi_trans_info *trans_info = &channel->trans_info; |
781 | u32 pending_id = trans_info->pending_id; |
782 | struct gsi_trans *trans; |
783 | u16 trans_id; |
784 | |
785 | if (channel->toward_ipa && pending_id != trans_info->free_id) { |
786 | /* There is a small chance a TX transaction got allocated |
787 | * just before we disabled transmits, so check for that. |
788 | * The last allocated, committed, or pending transaction |
789 | * precedes the first free transaction. |
790 | */ |
791 | trans_id = trans_info->free_id - 1; |
792 | } else if (trans_info->polled_id != pending_id) { |
793 | /* Otherwise (TX or RX) we want to wait for anything that |
794 | * has completed, or has been polled but not released yet. |
795 | * |
796 | * The last completed or polled transaction precedes the |
797 | * first pending transaction. |
798 | */ |
799 | trans_id = pending_id - 1; |
800 | } else { |
801 | return NULL; |
802 | } |
803 | |
804 | /* Caller will wait for this, so take a reference */ |
805 | trans = &trans_info->trans[trans_id % channel->tre_count]; |
806 | refcount_inc(r: &trans->refcount); |
807 | |
808 | return trans; |
809 | } |
810 | |
811 | /* Wait for transaction activity on a channel to complete */ |
812 | static void gsi_channel_trans_quiesce(struct gsi_channel *channel) |
813 | { |
814 | struct gsi_trans *trans; |
815 | |
816 | /* Get the last transaction, and wait for it to complete */ |
817 | trans = gsi_channel_trans_last(channel); |
818 | if (trans) { |
819 | wait_for_completion(&trans->completion); |
820 | gsi_trans_free(trans); |
821 | } |
822 | } |
823 | |
824 | /* Program a channel for use; there is no gsi_channel_deprogram() */ |
825 | static void gsi_channel_program(struct gsi_channel *channel, bool doorbell) |
826 | { |
827 | size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE; |
828 | u32 channel_id = gsi_channel_id(channel); |
829 | union gsi_channel_scratch scr = { }; |
830 | struct gsi_channel_scratch_gpi *gpi; |
831 | struct gsi *gsi = channel->gsi; |
832 | const struct reg *reg; |
833 | u32 wrr_weight = 0; |
834 | u32 offset; |
835 | u32 val; |
836 | |
837 | reg = gsi_reg(gsi, reg_id: CH_C_CNTXT_0); |
838 | |
839 | /* We program all channels as GPI type/protocol */ |
840 | val = ch_c_cntxt_0_type_encode(version: gsi->version, reg, type: GSI_CHANNEL_TYPE_GPI); |
841 | if (channel->toward_ipa) |
842 | val |= reg_bit(reg, field_id: CHTYPE_DIR); |
843 | if (gsi->version < IPA_VERSION_5_0) |
844 | val |= reg_encode(reg, field_id: ERINDEX, val: channel->evt_ring_id); |
845 | val |= reg_encode(reg, field_id: ELEMENT_SIZE, GSI_RING_ELEMENT_SIZE); |
846 | iowrite32(val, gsi->virt + reg_n_offset(reg, n: channel_id)); |
847 | |
848 | reg = gsi_reg(gsi, reg_id: CH_C_CNTXT_1); |
849 | val = reg_encode(reg, field_id: CH_R_LENGTH, val: size); |
850 | if (gsi->version >= IPA_VERSION_5_0) |
851 | val |= reg_encode(reg, field_id: CH_ERINDEX, val: channel->evt_ring_id); |
852 | iowrite32(val, gsi->virt + reg_n_offset(reg, n: channel_id)); |
853 | |
854 | /* The context 2 and 3 registers store the low-order and |
855 | * high-order 32 bits of the address of the channel ring, |
856 | * respectively. |
857 | */ |
858 | reg = gsi_reg(gsi, reg_id: CH_C_CNTXT_2); |
859 | val = lower_32_bits(channel->tre_ring.addr); |
860 | iowrite32(val, gsi->virt + reg_n_offset(reg, n: channel_id)); |
861 | |
862 | reg = gsi_reg(gsi, reg_id: CH_C_CNTXT_3); |
863 | val = upper_32_bits(channel->tre_ring.addr); |
864 | iowrite32(val, gsi->virt + reg_n_offset(reg, n: channel_id)); |
865 | |
866 | reg = gsi_reg(gsi, reg_id: CH_C_QOS); |
867 | |
868 | /* Command channel gets low weighted round-robin priority */ |
869 | if (channel->command) |
870 | wrr_weight = reg_field_max(reg, field_id: WRR_WEIGHT); |
871 | val = reg_encode(reg, field_id: WRR_WEIGHT, val: wrr_weight); |
872 | |
873 | /* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */ |
874 | |
875 | /* No need to use the doorbell engine starting at IPA v4.0 */ |
876 | if (gsi->version < IPA_VERSION_4_0 && doorbell) |
877 | val |= reg_bit(reg, field_id: USE_DB_ENG); |
878 | |
879 | /* v4.0 introduces an escape buffer for prefetch. We use it |
880 | * on all but the AP command channel. |
881 | */ |
882 | if (gsi->version >= IPA_VERSION_4_0 && !channel->command) { |
883 | /* If not otherwise set, prefetch buffers are used */ |
884 | if (gsi->version < IPA_VERSION_4_5) |
885 | val |= reg_bit(reg, field_id: USE_ESCAPE_BUF_ONLY); |
886 | else |
887 | val |= reg_encode(reg, field_id: PREFETCH_MODE, val: ESCAPE_BUF_ONLY); |
888 | } |
889 | /* All channels set DB_IN_BYTES */ |
890 | if (gsi->version >= IPA_VERSION_4_9) |
891 | val |= reg_bit(reg, field_id: DB_IN_BYTES); |
892 | |
893 | iowrite32(val, gsi->virt + reg_n_offset(reg, n: channel_id)); |
894 | |
895 | /* Now update the scratch registers for GPI protocol */ |
896 | gpi = &scr.gpi; |
897 | gpi->max_outstanding_tre = channel->trans_tre_max * |
898 | GSI_RING_ELEMENT_SIZE; |
899 | gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE; |
900 | |
901 | reg = gsi_reg(gsi, reg_id: CH_C_SCRATCH_0); |
902 | val = scr.data.word1; |
903 | iowrite32(val, gsi->virt + reg_n_offset(reg, n: channel_id)); |
904 | |
905 | reg = gsi_reg(gsi, reg_id: CH_C_SCRATCH_1); |
906 | val = scr.data.word2; |
907 | iowrite32(val, gsi->virt + reg_n_offset(reg, n: channel_id)); |
908 | |
909 | reg = gsi_reg(gsi, reg_id: CH_C_SCRATCH_2); |
910 | val = scr.data.word3; |
911 | iowrite32(val, gsi->virt + reg_n_offset(reg, n: channel_id)); |
912 | |
913 | /* We must preserve the upper 16 bits of the last scratch register. |
914 | * The next sequence assumes those bits remain unchanged between the |
915 | * read and the write. |
916 | */ |
917 | reg = gsi_reg(gsi, reg_id: CH_C_SCRATCH_3); |
918 | offset = reg_n_offset(reg, n: channel_id); |
919 | val = ioread32(gsi->virt + offset); |
920 | val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0)); |
921 | iowrite32(val, gsi->virt + offset); |
922 | |
923 | /* All done! */ |
924 | } |
925 | |
926 | static int __gsi_channel_start(struct gsi_channel *channel, bool resume) |
927 | { |
928 | struct gsi *gsi = channel->gsi; |
929 | int ret; |
930 | |
931 | /* Prior to IPA v4.0 suspend/resume is not implemented by GSI */ |
932 | if (resume && gsi->version < IPA_VERSION_4_0) |
933 | return 0; |
934 | |
935 | mutex_lock(&gsi->mutex); |
936 | |
937 | ret = gsi_channel_start_command(channel); |
938 | |
939 | mutex_unlock(lock: &gsi->mutex); |
940 | |
941 | return ret; |
942 | } |
943 | |
944 | /* Start an allocated GSI channel */ |
945 | int gsi_channel_start(struct gsi *gsi, u32 channel_id) |
946 | { |
947 | struct gsi_channel *channel = &gsi->channel[channel_id]; |
948 | int ret; |
949 | |
950 | /* Enable NAPI and the completion interrupt */ |
951 | napi_enable(n: &channel->napi); |
952 | gsi_irq_ieob_enable_one(gsi, evt_ring_id: channel->evt_ring_id); |
953 | |
954 | ret = __gsi_channel_start(channel, resume: false); |
955 | if (ret) { |
956 | gsi_irq_ieob_disable_one(gsi, evt_ring_id: channel->evt_ring_id); |
957 | napi_disable(n: &channel->napi); |
958 | } |
959 | |
960 | return ret; |
961 | } |
962 | |
963 | static int gsi_channel_stop_retry(struct gsi_channel *channel) |
964 | { |
965 | u32 retries = GSI_CHANNEL_STOP_RETRIES; |
966 | int ret; |
967 | |
968 | do { |
969 | ret = gsi_channel_stop_command(channel); |
970 | if (ret != -EAGAIN) |
971 | break; |
972 | usleep_range(min: 3 * USEC_PER_MSEC, max: 5 * USEC_PER_MSEC); |
973 | } while (retries--); |
974 | |
975 | return ret; |
976 | } |
977 | |
978 | static int __gsi_channel_stop(struct gsi_channel *channel, bool suspend) |
979 | { |
980 | struct gsi *gsi = channel->gsi; |
981 | int ret; |
982 | |
983 | /* Wait for any underway transactions to complete before stopping. */ |
984 | gsi_channel_trans_quiesce(channel); |
985 | |
986 | /* Prior to IPA v4.0 suspend/resume is not implemented by GSI */ |
987 | if (suspend && gsi->version < IPA_VERSION_4_0) |
988 | return 0; |
989 | |
990 | mutex_lock(&gsi->mutex); |
991 | |
992 | ret = gsi_channel_stop_retry(channel); |
993 | |
994 | mutex_unlock(lock: &gsi->mutex); |
995 | |
996 | return ret; |
997 | } |
998 | |
999 | /* Stop a started channel */ |
1000 | int gsi_channel_stop(struct gsi *gsi, u32 channel_id) |
1001 | { |
1002 | struct gsi_channel *channel = &gsi->channel[channel_id]; |
1003 | int ret; |
1004 | |
1005 | ret = __gsi_channel_stop(channel, suspend: false); |
1006 | if (ret) |
1007 | return ret; |
1008 | |
1009 | /* Disable the completion interrupt and NAPI if successful */ |
1010 | gsi_irq_ieob_disable_one(gsi, evt_ring_id: channel->evt_ring_id); |
1011 | napi_disable(n: &channel->napi); |
1012 | |
1013 | return 0; |
1014 | } |
1015 | |
1016 | /* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */ |
1017 | void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell) |
1018 | { |
1019 | struct gsi_channel *channel = &gsi->channel[channel_id]; |
1020 | |
1021 | mutex_lock(&gsi->mutex); |
1022 | |
1023 | gsi_channel_reset_command(channel); |
1024 | /* Due to a hardware quirk we may need to reset RX channels twice. */ |
1025 | if (gsi->version < IPA_VERSION_4_0 && !channel->toward_ipa) |
1026 | gsi_channel_reset_command(channel); |
1027 | |
1028 | /* Hardware assumes this is 0 following reset */ |
1029 | channel->tre_ring.index = 0; |
1030 | gsi_channel_program(channel, doorbell); |
1031 | gsi_channel_trans_cancel_pending(channel); |
1032 | |
1033 | mutex_unlock(lock: &gsi->mutex); |
1034 | } |
1035 | |
1036 | /* Stop a started channel for suspend */ |
1037 | int gsi_channel_suspend(struct gsi *gsi, u32 channel_id) |
1038 | { |
1039 | struct gsi_channel *channel = &gsi->channel[channel_id]; |
1040 | int ret; |
1041 | |
1042 | ret = __gsi_channel_stop(channel, suspend: true); |
1043 | if (ret) |
1044 | return ret; |
1045 | |
1046 | /* Ensure NAPI polling has finished. */ |
1047 | napi_synchronize(n: &channel->napi); |
1048 | |
1049 | return 0; |
1050 | } |
1051 | |
1052 | /* Resume a suspended channel (starting if stopped) */ |
1053 | int gsi_channel_resume(struct gsi *gsi, u32 channel_id) |
1054 | { |
1055 | struct gsi_channel *channel = &gsi->channel[channel_id]; |
1056 | |
1057 | return __gsi_channel_start(channel, resume: true); |
1058 | } |
1059 | |
1060 | /* Prevent all GSI interrupts while suspended */ |
1061 | void gsi_suspend(struct gsi *gsi) |
1062 | { |
1063 | disable_irq(irq: gsi->irq); |
1064 | } |
1065 | |
1066 | /* Allow all GSI interrupts again when resuming */ |
1067 | void gsi_resume(struct gsi *gsi) |
1068 | { |
1069 | enable_irq(irq: gsi->irq); |
1070 | } |
1071 | |
1072 | void gsi_trans_tx_committed(struct gsi_trans *trans) |
1073 | { |
1074 | struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id]; |
1075 | |
1076 | channel->trans_count++; |
1077 | channel->byte_count += trans->len; |
1078 | |
1079 | trans->trans_count = channel->trans_count; |
1080 | trans->byte_count = channel->byte_count; |
1081 | } |
1082 | |
1083 | void gsi_trans_tx_queued(struct gsi_trans *trans) |
1084 | { |
1085 | u32 channel_id = trans->channel_id; |
1086 | struct gsi *gsi = trans->gsi; |
1087 | struct gsi_channel *channel; |
1088 | u32 trans_count; |
1089 | u32 byte_count; |
1090 | |
1091 | channel = &gsi->channel[channel_id]; |
1092 | |
1093 | byte_count = channel->byte_count - channel->queued_byte_count; |
1094 | trans_count = channel->trans_count - channel->queued_trans_count; |
1095 | channel->queued_byte_count = channel->byte_count; |
1096 | channel->queued_trans_count = channel->trans_count; |
1097 | |
1098 | ipa_gsi_channel_tx_queued(gsi, channel_id, count: trans_count, byte_count); |
1099 | } |
1100 | |
1101 | /** |
1102 | * gsi_trans_tx_completed() - Report completed TX transactions |
1103 | * @trans: TX channel transaction that has completed |
1104 | * |
1105 | * Report that a transaction on a TX channel has completed. At the time a |
1106 | * transaction is committed, we record *in the transaction* its channel's |
1107 | * committed transaction and byte counts. Transactions are completed in |
1108 | * order, and the difference between the channel's byte/transaction count |
1109 | * when the transaction was committed and when it completes tells us |
1110 | * exactly how much data has been transferred while the transaction was |
1111 | * pending. |
1112 | * |
1113 | * We report this information to the network stack, which uses it to manage |
1114 | * the rate at which data is sent to hardware. |
1115 | */ |
1116 | static void gsi_trans_tx_completed(struct gsi_trans *trans) |
1117 | { |
1118 | u32 channel_id = trans->channel_id; |
1119 | struct gsi *gsi = trans->gsi; |
1120 | struct gsi_channel *channel; |
1121 | u32 trans_count; |
1122 | u32 byte_count; |
1123 | |
1124 | channel = &gsi->channel[channel_id]; |
1125 | trans_count = trans->trans_count - channel->compl_trans_count; |
1126 | byte_count = trans->byte_count - channel->compl_byte_count; |
1127 | |
1128 | channel->compl_trans_count += trans_count; |
1129 | channel->compl_byte_count += byte_count; |
1130 | |
1131 | ipa_gsi_channel_tx_completed(gsi, channel_id, count: trans_count, byte_count); |
1132 | } |
1133 | |
1134 | /* Channel control interrupt handler */ |
1135 | static void gsi_isr_chan_ctrl(struct gsi *gsi) |
1136 | { |
1137 | const struct reg *reg; |
1138 | u32 channel_mask; |
1139 | |
1140 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_CH_IRQ); |
1141 | channel_mask = ioread32(gsi->virt + reg_offset(reg)); |
1142 | |
1143 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_CH_IRQ_CLR); |
1144 | iowrite32(channel_mask, gsi->virt + reg_offset(reg)); |
1145 | |
1146 | while (channel_mask) { |
1147 | u32 channel_id = __ffs(channel_mask); |
1148 | |
1149 | channel_mask ^= BIT(channel_id); |
1150 | |
1151 | complete(&gsi->completion); |
1152 | } |
1153 | } |
1154 | |
1155 | /* Event ring control interrupt handler */ |
1156 | static void gsi_isr_evt_ctrl(struct gsi *gsi) |
1157 | { |
1158 | const struct reg *reg; |
1159 | u32 event_mask; |
1160 | |
1161 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_EV_CH_IRQ); |
1162 | event_mask = ioread32(gsi->virt + reg_offset(reg)); |
1163 | |
1164 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_EV_CH_IRQ_CLR); |
1165 | iowrite32(event_mask, gsi->virt + reg_offset(reg)); |
1166 | |
1167 | while (event_mask) { |
1168 | u32 evt_ring_id = __ffs(event_mask); |
1169 | |
1170 | event_mask ^= BIT(evt_ring_id); |
1171 | |
1172 | complete(&gsi->completion); |
1173 | } |
1174 | } |
1175 | |
1176 | /* Global channel error interrupt handler */ |
1177 | static void |
1178 | gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code) |
1179 | { |
1180 | if (code == GSI_OUT_OF_RESOURCES) { |
1181 | dev_err(gsi->dev, "channel %u out of resources\n" , channel_id); |
1182 | complete(&gsi->completion); |
1183 | return; |
1184 | } |
1185 | |
1186 | /* Report, but otherwise ignore all other error codes */ |
1187 | dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n" , |
1188 | channel_id, err_ee, code); |
1189 | } |
1190 | |
1191 | /* Global event error interrupt handler */ |
1192 | static void |
1193 | gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code) |
1194 | { |
1195 | if (code == GSI_OUT_OF_RESOURCES) { |
1196 | struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; |
1197 | u32 channel_id = gsi_channel_id(channel: evt_ring->channel); |
1198 | |
1199 | complete(&gsi->completion); |
1200 | dev_err(gsi->dev, "evt_ring for channel %u out of resources\n" , |
1201 | channel_id); |
1202 | return; |
1203 | } |
1204 | |
1205 | /* Report, but otherwise ignore all other error codes */ |
1206 | dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n" , |
1207 | evt_ring_id, err_ee, code); |
1208 | } |
1209 | |
1210 | /* Global error interrupt handler */ |
1211 | static void gsi_isr_glob_err(struct gsi *gsi) |
1212 | { |
1213 | const struct reg *log_reg; |
1214 | const struct reg *clr_reg; |
1215 | enum gsi_err_type type; |
1216 | enum gsi_err_code code; |
1217 | u32 offset; |
1218 | u32 which; |
1219 | u32 val; |
1220 | u32 ee; |
1221 | |
1222 | /* Get the logged error, then reinitialize the log */ |
1223 | log_reg = gsi_reg(gsi, reg_id: ERROR_LOG); |
1224 | offset = reg_offset(reg: log_reg); |
1225 | val = ioread32(gsi->virt + offset); |
1226 | iowrite32(0, gsi->virt + offset); |
1227 | |
1228 | clr_reg = gsi_reg(gsi, reg_id: ERROR_LOG_CLR); |
1229 | iowrite32(~0, gsi->virt + reg_offset(reg: clr_reg)); |
1230 | |
1231 | /* Parse the error value */ |
1232 | ee = reg_decode(reg: log_reg, field_id: ERR_EE, val); |
1233 | type = reg_decode(reg: log_reg, field_id: ERR_TYPE, val); |
1234 | which = reg_decode(reg: log_reg, field_id: ERR_VIRT_IDX, val); |
1235 | code = reg_decode(reg: log_reg, field_id: ERR_CODE, val); |
1236 | |
1237 | if (type == GSI_ERR_TYPE_CHAN) |
1238 | gsi_isr_glob_chan_err(gsi, err_ee: ee, channel_id: which, code); |
1239 | else if (type == GSI_ERR_TYPE_EVT) |
1240 | gsi_isr_glob_evt_err(gsi, err_ee: ee, evt_ring_id: which, code); |
1241 | else /* type GSI_ERR_TYPE_GLOB should be fatal */ |
1242 | dev_err(gsi->dev, "unexpected global error 0x%08x\n" , type); |
1243 | } |
1244 | |
1245 | /* Generic EE interrupt handler */ |
1246 | static void gsi_isr_gp_int1(struct gsi *gsi) |
1247 | { |
1248 | const struct reg *reg; |
1249 | u32 result; |
1250 | u32 val; |
1251 | |
1252 | /* This interrupt is used to handle completions of GENERIC GSI |
1253 | * commands. We use these to allocate and halt channels on the |
1254 | * modem's behalf due to a hardware quirk on IPA v4.2. The modem |
1255 | * "owns" channels even when the AP allocates them, and have no |
1256 | * way of knowing whether a modem channel's state has been changed. |
1257 | * |
1258 | * We also use GENERIC commands to enable/disable channel flow |
1259 | * control for IPA v4.2+. |
1260 | * |
1261 | * It is recommended that we halt the modem channels we allocated |
1262 | * when shutting down, but it's possible the channel isn't running |
1263 | * at the time we issue the HALT command. We'll get an error in |
1264 | * that case, but it's harmless (the channel is already halted). |
1265 | * Similarly, we could get an error back when updating flow control |
1266 | * on a channel because it's not in the proper state. |
1267 | * |
1268 | * In either case, we silently ignore a INCORRECT_CHANNEL_STATE |
1269 | * error if we receive it. |
1270 | */ |
1271 | reg = gsi_reg(gsi, reg_id: CNTXT_SCRATCH_0); |
1272 | val = ioread32(gsi->virt + reg_offset(reg)); |
1273 | result = reg_decode(reg, field_id: GENERIC_EE_RESULT, val); |
1274 | |
1275 | switch (result) { |
1276 | case GENERIC_EE_SUCCESS: |
1277 | case GENERIC_EE_INCORRECT_CHANNEL_STATE: |
1278 | gsi->result = 0; |
1279 | break; |
1280 | |
1281 | case GENERIC_EE_RETRY: |
1282 | gsi->result = -EAGAIN; |
1283 | break; |
1284 | |
1285 | default: |
1286 | dev_err(gsi->dev, "global INT1 generic result %u\n" , result); |
1287 | gsi->result = -EIO; |
1288 | break; |
1289 | } |
1290 | |
1291 | complete(&gsi->completion); |
1292 | } |
1293 | |
1294 | /* Inter-EE interrupt handler */ |
1295 | static void gsi_isr_glob_ee(struct gsi *gsi) |
1296 | { |
1297 | const struct reg *reg; |
1298 | u32 val; |
1299 | |
1300 | reg = gsi_reg(gsi, reg_id: CNTXT_GLOB_IRQ_STTS); |
1301 | val = ioread32(gsi->virt + reg_offset(reg)); |
1302 | |
1303 | if (val & ERROR_INT) |
1304 | gsi_isr_glob_err(gsi); |
1305 | |
1306 | reg = gsi_reg(gsi, reg_id: CNTXT_GLOB_IRQ_CLR); |
1307 | iowrite32(val, gsi->virt + reg_offset(reg)); |
1308 | |
1309 | val &= ~ERROR_INT; |
1310 | |
1311 | if (val & GP_INT1) { |
1312 | val ^= GP_INT1; |
1313 | gsi_isr_gp_int1(gsi); |
1314 | } |
1315 | |
1316 | if (val) |
1317 | dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n" , val); |
1318 | } |
1319 | |
1320 | /* I/O completion interrupt event */ |
1321 | static void gsi_isr_ieob(struct gsi *gsi) |
1322 | { |
1323 | const struct reg *reg; |
1324 | u32 event_mask; |
1325 | |
1326 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_IEOB_IRQ); |
1327 | event_mask = ioread32(gsi->virt + reg_offset(reg)); |
1328 | |
1329 | gsi_irq_ieob_disable(gsi, event_mask); |
1330 | |
1331 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_IEOB_IRQ_CLR); |
1332 | iowrite32(event_mask, gsi->virt + reg_offset(reg)); |
1333 | |
1334 | while (event_mask) { |
1335 | u32 evt_ring_id = __ffs(event_mask); |
1336 | |
1337 | event_mask ^= BIT(evt_ring_id); |
1338 | |
1339 | napi_schedule(n: &gsi->evt_ring[evt_ring_id].channel->napi); |
1340 | } |
1341 | } |
1342 | |
1343 | /* General event interrupts represent serious problems, so report them */ |
1344 | static void gsi_isr_general(struct gsi *gsi) |
1345 | { |
1346 | struct device *dev = gsi->dev; |
1347 | const struct reg *reg; |
1348 | u32 val; |
1349 | |
1350 | reg = gsi_reg(gsi, reg_id: CNTXT_GSI_IRQ_STTS); |
1351 | val = ioread32(gsi->virt + reg_offset(reg)); |
1352 | |
1353 | reg = gsi_reg(gsi, reg_id: CNTXT_GSI_IRQ_CLR); |
1354 | iowrite32(val, gsi->virt + reg_offset(reg)); |
1355 | |
1356 | dev_err(dev, "unexpected general interrupt 0x%08x\n" , val); |
1357 | } |
1358 | |
1359 | /** |
1360 | * gsi_isr() - Top level GSI interrupt service routine |
1361 | * @irq: Interrupt number (ignored) |
1362 | * @dev_id: GSI pointer supplied to request_irq() |
1363 | * |
1364 | * This is the main handler function registered for the GSI IRQ. Each type |
1365 | * of interrupt has a separate handler function that is called from here. |
1366 | */ |
1367 | static irqreturn_t gsi_isr(int irq, void *dev_id) |
1368 | { |
1369 | struct gsi *gsi = dev_id; |
1370 | const struct reg *reg; |
1371 | u32 intr_mask; |
1372 | u32 cnt = 0; |
1373 | u32 offset; |
1374 | |
1375 | reg = gsi_reg(gsi, reg_id: CNTXT_TYPE_IRQ); |
1376 | offset = reg_offset(reg); |
1377 | |
1378 | /* enum gsi_irq_type_id defines GSI interrupt types */ |
1379 | while ((intr_mask = ioread32(gsi->virt + offset))) { |
1380 | /* intr_mask contains bitmask of pending GSI interrupts */ |
1381 | do { |
1382 | u32 gsi_intr = BIT(__ffs(intr_mask)); |
1383 | |
1384 | intr_mask ^= gsi_intr; |
1385 | |
1386 | /* Note: the IRQ condition for each type is cleared |
1387 | * when the type-specific register is updated. |
1388 | */ |
1389 | switch (gsi_intr) { |
1390 | case GSI_CH_CTRL: |
1391 | gsi_isr_chan_ctrl(gsi); |
1392 | break; |
1393 | case GSI_EV_CTRL: |
1394 | gsi_isr_evt_ctrl(gsi); |
1395 | break; |
1396 | case GSI_GLOB_EE: |
1397 | gsi_isr_glob_ee(gsi); |
1398 | break; |
1399 | case GSI_IEOB: |
1400 | gsi_isr_ieob(gsi); |
1401 | break; |
1402 | case GSI_GENERAL: |
1403 | gsi_isr_general(gsi); |
1404 | break; |
1405 | default: |
1406 | dev_err(gsi->dev, |
1407 | "unrecognized interrupt type 0x%08x\n" , |
1408 | gsi_intr); |
1409 | break; |
1410 | } |
1411 | } while (intr_mask); |
1412 | |
1413 | if (++cnt > GSI_ISR_MAX_ITER) { |
1414 | dev_err(gsi->dev, "interrupt flood\n" ); |
1415 | break; |
1416 | } |
1417 | } |
1418 | |
1419 | return IRQ_HANDLED; |
1420 | } |
1421 | |
1422 | /* Init function for GSI IRQ lookup; there is no gsi_irq_exit() */ |
1423 | static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev) |
1424 | { |
1425 | int ret; |
1426 | |
1427 | ret = platform_get_irq_byname(pdev, "gsi" ); |
1428 | if (ret <= 0) |
1429 | return ret ? : -EINVAL; |
1430 | |
1431 | gsi->irq = ret; |
1432 | |
1433 | return 0; |
1434 | } |
1435 | |
1436 | /* Return the transaction associated with a transfer completion event */ |
1437 | static struct gsi_trans * |
1438 | gsi_event_trans(struct gsi *gsi, struct gsi_event *event) |
1439 | { |
1440 | u32 channel_id = event->chid; |
1441 | struct gsi_channel *channel; |
1442 | struct gsi_trans *trans; |
1443 | u32 tre_offset; |
1444 | u32 tre_index; |
1445 | |
1446 | channel = &gsi->channel[channel_id]; |
1447 | if (WARN(!channel->gsi, "event has bad channel %u\n" , channel_id)) |
1448 | return NULL; |
1449 | |
1450 | /* Event xfer_ptr records the TRE it's associated with */ |
1451 | tre_offset = lower_32_bits(le64_to_cpu(event->xfer_ptr)); |
1452 | tre_index = gsi_ring_index(ring: &channel->tre_ring, offset: tre_offset); |
1453 | |
1454 | trans = gsi_channel_trans_mapped(channel, index: tre_index); |
1455 | |
1456 | if (WARN(!trans, "channel %u event with no transaction\n" , channel_id)) |
1457 | return NULL; |
1458 | |
1459 | return trans; |
1460 | } |
1461 | |
1462 | /** |
1463 | * gsi_evt_ring_update() - Update transaction state from hardware |
1464 | * @gsi: GSI pointer |
1465 | * @evt_ring_id: Event ring ID |
1466 | * @index: Event index in ring reported by hardware |
1467 | * |
1468 | * Events for RX channels contain the actual number of bytes received into |
1469 | * the buffer. Every event has a transaction associated with it, and here |
1470 | * we update transactions to record their actual received lengths. |
1471 | * |
1472 | * When an event for a TX channel arrives we use information in the |
1473 | * transaction to report the number of requests and bytes that have |
1474 | * been transferred. |
1475 | * |
1476 | * This function is called whenever we learn that the GSI hardware has filled |
1477 | * new events since the last time we checked. The ring's index field tells |
1478 | * the first entry in need of processing. The index provided is the |
1479 | * first *unfilled* event in the ring (following the last filled one). |
1480 | * |
1481 | * Events are sequential within the event ring, and transactions are |
1482 | * sequential within the transaction array. |
1483 | * |
1484 | * Note that @index always refers to an element *within* the event ring. |
1485 | */ |
1486 | static void gsi_evt_ring_update(struct gsi *gsi, u32 evt_ring_id, u32 index) |
1487 | { |
1488 | struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; |
1489 | struct gsi_ring *ring = &evt_ring->ring; |
1490 | struct gsi_event *event_done; |
1491 | struct gsi_event *event; |
1492 | u32 event_avail; |
1493 | u32 old_index; |
1494 | |
1495 | /* Starting with the oldest un-processed event, determine which |
1496 | * transaction (and which channel) is associated with the event. |
1497 | * For RX channels, update each completed transaction with the |
1498 | * number of bytes that were actually received. For TX channels |
1499 | * associated with a network device, report to the network stack |
1500 | * the number of transfers and bytes this completion represents. |
1501 | */ |
1502 | old_index = ring->index; |
1503 | event = gsi_ring_virt(ring, index: old_index); |
1504 | |
1505 | /* Compute the number of events to process before we wrap, |
1506 | * and determine when we'll be done processing events. |
1507 | */ |
1508 | event_avail = ring->count - old_index % ring->count; |
1509 | event_done = gsi_ring_virt(ring, index); |
1510 | do { |
1511 | struct gsi_trans *trans; |
1512 | |
1513 | trans = gsi_event_trans(gsi, event); |
1514 | if (!trans) |
1515 | return; |
1516 | |
1517 | if (trans->direction == DMA_FROM_DEVICE) |
1518 | trans->len = __le16_to_cpu(event->len); |
1519 | else |
1520 | gsi_trans_tx_completed(trans); |
1521 | |
1522 | gsi_trans_move_complete(trans); |
1523 | |
1524 | /* Move on to the next event and transaction */ |
1525 | if (--event_avail) |
1526 | event++; |
1527 | else |
1528 | event = gsi_ring_virt(ring, index: 0); |
1529 | } while (event != event_done); |
1530 | |
1531 | /* Tell the hardware we've handled these events */ |
1532 | gsi_evt_ring_doorbell(gsi, evt_ring_id, index); |
1533 | } |
1534 | |
1535 | /* Initialize a ring, including allocating DMA memory for its entries */ |
1536 | static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count) |
1537 | { |
1538 | u32 size = count * GSI_RING_ELEMENT_SIZE; |
1539 | struct device *dev = gsi->dev; |
1540 | dma_addr_t addr; |
1541 | |
1542 | /* Hardware requires a 2^n ring size, with alignment equal to size. |
1543 | * The DMA address returned by dma_alloc_coherent() is guaranteed to |
1544 | * be a power-of-2 number of pages, which satisfies the requirement. |
1545 | */ |
1546 | ring->virt = dma_alloc_coherent(dev, size, dma_handle: &addr, GFP_KERNEL); |
1547 | if (!ring->virt) |
1548 | return -ENOMEM; |
1549 | |
1550 | ring->addr = addr; |
1551 | ring->count = count; |
1552 | ring->index = 0; |
1553 | |
1554 | return 0; |
1555 | } |
1556 | |
1557 | /* Free a previously-allocated ring */ |
1558 | static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring) |
1559 | { |
1560 | size_t size = ring->count * GSI_RING_ELEMENT_SIZE; |
1561 | |
1562 | dma_free_coherent(dev: gsi->dev, size, cpu_addr: ring->virt, dma_handle: ring->addr); |
1563 | } |
1564 | |
1565 | /* Allocate an available event ring id */ |
1566 | static int gsi_evt_ring_id_alloc(struct gsi *gsi) |
1567 | { |
1568 | u32 evt_ring_id; |
1569 | |
1570 | if (gsi->event_bitmap == ~0U) { |
1571 | dev_err(gsi->dev, "event rings exhausted\n" ); |
1572 | return -ENOSPC; |
1573 | } |
1574 | |
1575 | evt_ring_id = ffz(gsi->event_bitmap); |
1576 | gsi->event_bitmap |= BIT(evt_ring_id); |
1577 | |
1578 | return (int)evt_ring_id; |
1579 | } |
1580 | |
1581 | /* Free a previously-allocated event ring id */ |
1582 | static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id) |
1583 | { |
1584 | gsi->event_bitmap &= ~BIT(evt_ring_id); |
1585 | } |
1586 | |
1587 | /* Ring a channel doorbell, reporting the first un-filled entry */ |
1588 | void gsi_channel_doorbell(struct gsi_channel *channel) |
1589 | { |
1590 | struct gsi_ring *tre_ring = &channel->tre_ring; |
1591 | u32 channel_id = gsi_channel_id(channel); |
1592 | struct gsi *gsi = channel->gsi; |
1593 | const struct reg *reg; |
1594 | u32 val; |
1595 | |
1596 | reg = gsi_reg(gsi, reg_id: CH_C_DOORBELL_0); |
1597 | /* Note: index *must* be used modulo the ring count here */ |
1598 | val = gsi_ring_addr(ring: tre_ring, index: tre_ring->index % tre_ring->count); |
1599 | iowrite32(val, gsi->virt + reg_n_offset(reg, n: channel_id)); |
1600 | } |
1601 | |
1602 | /* Consult hardware, move newly completed transactions to completed state */ |
1603 | void gsi_channel_update(struct gsi_channel *channel) |
1604 | { |
1605 | u32 evt_ring_id = channel->evt_ring_id; |
1606 | struct gsi *gsi = channel->gsi; |
1607 | struct gsi_evt_ring *evt_ring; |
1608 | struct gsi_trans *trans; |
1609 | struct gsi_ring *ring; |
1610 | const struct reg *reg; |
1611 | u32 offset; |
1612 | u32 index; |
1613 | |
1614 | evt_ring = &gsi->evt_ring[evt_ring_id]; |
1615 | ring = &evt_ring->ring; |
1616 | |
1617 | /* See if there's anything new to process; if not, we're done. Note |
1618 | * that index always refers to an entry *within* the event ring. |
1619 | */ |
1620 | reg = gsi_reg(gsi, reg_id: EV_CH_E_CNTXT_4); |
1621 | offset = reg_n_offset(reg, n: evt_ring_id); |
1622 | index = gsi_ring_index(ring, offset: ioread32(gsi->virt + offset)); |
1623 | if (index == ring->index % ring->count) |
1624 | return; |
1625 | |
1626 | /* Get the transaction for the latest completed event. */ |
1627 | trans = gsi_event_trans(gsi, event: gsi_ring_virt(ring, index: index - 1)); |
1628 | if (!trans) |
1629 | return; |
1630 | |
1631 | /* For RX channels, update each completed transaction with the number |
1632 | * of bytes that were actually received. For TX channels, report |
1633 | * the number of transactions and bytes this completion represents |
1634 | * up the network stack. |
1635 | */ |
1636 | gsi_evt_ring_update(gsi, evt_ring_id, index); |
1637 | } |
1638 | |
1639 | /** |
1640 | * gsi_channel_poll_one() - Return a single completed transaction on a channel |
1641 | * @channel: Channel to be polled |
1642 | * |
1643 | * Return: Transaction pointer, or null if none are available |
1644 | * |
1645 | * This function returns the first of a channel's completed transactions. |
1646 | * If no transactions are in completed state, the hardware is consulted to |
1647 | * determine whether any new transactions have completed. If so, they're |
1648 | * moved to completed state and the first such transaction is returned. |
1649 | * If there are no more completed transactions, a null pointer is returned. |
1650 | */ |
1651 | static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel) |
1652 | { |
1653 | struct gsi_trans *trans; |
1654 | |
1655 | /* Get the first completed transaction */ |
1656 | trans = gsi_channel_trans_complete(channel); |
1657 | if (trans) |
1658 | gsi_trans_move_polled(trans); |
1659 | |
1660 | return trans; |
1661 | } |
1662 | |
1663 | /** |
1664 | * gsi_channel_poll() - NAPI poll function for a channel |
1665 | * @napi: NAPI structure for the channel |
1666 | * @budget: Budget supplied by NAPI core |
1667 | * |
1668 | * Return: Number of items polled (<= budget) |
1669 | * |
1670 | * Single transactions completed by hardware are polled until either |
1671 | * the budget is exhausted, or there are no more. Each transaction |
1672 | * polled is passed to gsi_trans_complete(), to perform remaining |
1673 | * completion processing and retire/free the transaction. |
1674 | */ |
1675 | static int gsi_channel_poll(struct napi_struct *napi, int budget) |
1676 | { |
1677 | struct gsi_channel *channel; |
1678 | int count; |
1679 | |
1680 | channel = container_of(napi, struct gsi_channel, napi); |
1681 | for (count = 0; count < budget; count++) { |
1682 | struct gsi_trans *trans; |
1683 | |
1684 | trans = gsi_channel_poll_one(channel); |
1685 | if (!trans) |
1686 | break; |
1687 | gsi_trans_complete(trans); |
1688 | } |
1689 | |
1690 | if (count < budget && napi_complete(n: napi)) |
1691 | gsi_irq_ieob_enable_one(gsi: channel->gsi, evt_ring_id: channel->evt_ring_id); |
1692 | |
1693 | return count; |
1694 | } |
1695 | |
1696 | /* The event bitmap represents which event ids are available for allocation. |
1697 | * Set bits are not available, clear bits can be used. This function |
1698 | * initializes the map so all events supported by the hardware are available, |
1699 | * then precludes any reserved events from being allocated. |
1700 | */ |
1701 | static u32 gsi_event_bitmap_init(u32 evt_ring_max) |
1702 | { |
1703 | u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max); |
1704 | |
1705 | event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START); |
1706 | |
1707 | return event_bitmap; |
1708 | } |
1709 | |
1710 | /* Setup function for a single channel */ |
1711 | static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id) |
1712 | { |
1713 | struct gsi_channel *channel = &gsi->channel[channel_id]; |
1714 | u32 evt_ring_id = channel->evt_ring_id; |
1715 | int ret; |
1716 | |
1717 | if (!gsi_channel_initialized(channel)) |
1718 | return 0; |
1719 | |
1720 | ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id); |
1721 | if (ret) |
1722 | return ret; |
1723 | |
1724 | gsi_evt_ring_program(gsi, evt_ring_id); |
1725 | |
1726 | ret = gsi_channel_alloc_command(gsi, channel_id); |
1727 | if (ret) |
1728 | goto err_evt_ring_de_alloc; |
1729 | |
1730 | gsi_channel_program(channel, doorbell: true); |
1731 | |
1732 | if (channel->toward_ipa) |
1733 | netif_napi_add_tx(dev: &gsi->dummy_dev, napi: &channel->napi, |
1734 | poll: gsi_channel_poll); |
1735 | else |
1736 | netif_napi_add(dev: &gsi->dummy_dev, napi: &channel->napi, |
1737 | poll: gsi_channel_poll); |
1738 | |
1739 | return 0; |
1740 | |
1741 | err_evt_ring_de_alloc: |
1742 | /* We've done nothing with the event ring yet so don't reset */ |
1743 | gsi_evt_ring_de_alloc_command(gsi, evt_ring_id); |
1744 | |
1745 | return ret; |
1746 | } |
1747 | |
1748 | /* Inverse of gsi_channel_setup_one() */ |
1749 | static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id) |
1750 | { |
1751 | struct gsi_channel *channel = &gsi->channel[channel_id]; |
1752 | u32 evt_ring_id = channel->evt_ring_id; |
1753 | |
1754 | if (!gsi_channel_initialized(channel)) |
1755 | return; |
1756 | |
1757 | netif_napi_del(napi: &channel->napi); |
1758 | |
1759 | gsi_channel_de_alloc_command(gsi, channel_id); |
1760 | gsi_evt_ring_reset_command(gsi, evt_ring_id); |
1761 | gsi_evt_ring_de_alloc_command(gsi, evt_ring_id); |
1762 | } |
1763 | |
1764 | /* We use generic commands only to operate on modem channels. We don't have |
1765 | * the ability to determine channel state for a modem channel, so we simply |
1766 | * issue the command and wait for it to complete. |
1767 | */ |
1768 | static int gsi_generic_command(struct gsi *gsi, u32 channel_id, |
1769 | enum gsi_generic_cmd_opcode opcode, |
1770 | u8 params) |
1771 | { |
1772 | const struct reg *reg; |
1773 | bool timeout; |
1774 | u32 offset; |
1775 | u32 val; |
1776 | |
1777 | /* The error global interrupt type is always enabled (until we tear |
1778 | * down), so we will keep it enabled. |
1779 | * |
1780 | * A generic EE command completes with a GSI global interrupt of |
1781 | * type GP_INT1. We only perform one generic command at a time |
1782 | * (to allocate, halt, or enable/disable flow control on a modem |
1783 | * channel), and only from this function. So we enable the GP_INT1 |
1784 | * IRQ type here, and disable it again after the command completes. |
1785 | */ |
1786 | reg = gsi_reg(gsi, reg_id: CNTXT_GLOB_IRQ_EN); |
1787 | val = ERROR_INT | GP_INT1; |
1788 | iowrite32(val, gsi->virt + reg_offset(reg)); |
1789 | |
1790 | /* First zero the result code field */ |
1791 | reg = gsi_reg(gsi, reg_id: CNTXT_SCRATCH_0); |
1792 | offset = reg_offset(reg); |
1793 | val = ioread32(gsi->virt + offset); |
1794 | |
1795 | val &= ~reg_fmask(reg, field_id: GENERIC_EE_RESULT); |
1796 | iowrite32(val, gsi->virt + offset); |
1797 | |
1798 | /* Now issue the command */ |
1799 | reg = gsi_reg(gsi, reg_id: GENERIC_CMD); |
1800 | val = reg_encode(reg, field_id: GENERIC_OPCODE, val: opcode); |
1801 | val |= reg_encode(reg, field_id: GENERIC_CHID, val: channel_id); |
1802 | val |= reg_encode(reg, field_id: GENERIC_EE, val: GSI_EE_MODEM); |
1803 | if (gsi->version >= IPA_VERSION_4_11) |
1804 | val |= reg_encode(reg, field_id: GENERIC_PARAMS, val: params); |
1805 | |
1806 | timeout = !gsi_command(gsi, reg: reg_offset(reg), val); |
1807 | |
1808 | /* Disable the GP_INT1 IRQ type again */ |
1809 | reg = gsi_reg(gsi, reg_id: CNTXT_GLOB_IRQ_EN); |
1810 | iowrite32(ERROR_INT, gsi->virt + reg_offset(reg)); |
1811 | |
1812 | if (!timeout) |
1813 | return gsi->result; |
1814 | |
1815 | dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n" , |
1816 | opcode, channel_id); |
1817 | |
1818 | return -ETIMEDOUT; |
1819 | } |
1820 | |
1821 | static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id) |
1822 | { |
1823 | return gsi_generic_command(gsi, channel_id, |
1824 | opcode: GSI_GENERIC_ALLOCATE_CHANNEL, params: 0); |
1825 | } |
1826 | |
1827 | static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id) |
1828 | { |
1829 | u32 retries = GSI_CHANNEL_MODEM_HALT_RETRIES; |
1830 | int ret; |
1831 | |
1832 | do |
1833 | ret = gsi_generic_command(gsi, channel_id, |
1834 | opcode: GSI_GENERIC_HALT_CHANNEL, params: 0); |
1835 | while (ret == -EAGAIN && retries--); |
1836 | |
1837 | if (ret) |
1838 | dev_err(gsi->dev, "error %d halting modem channel %u\n" , |
1839 | ret, channel_id); |
1840 | } |
1841 | |
1842 | /* Enable or disable flow control for a modem GSI TX channel (IPA v4.2+) */ |
1843 | void |
1844 | gsi_modem_channel_flow_control(struct gsi *gsi, u32 channel_id, bool enable) |
1845 | { |
1846 | u32 retries = 0; |
1847 | u32 command; |
1848 | int ret; |
1849 | |
1850 | command = enable ? GSI_GENERIC_ENABLE_FLOW_CONTROL |
1851 | : GSI_GENERIC_DISABLE_FLOW_CONTROL; |
1852 | /* Disabling flow control on IPA v4.11+ can return -EAGAIN if enable |
1853 | * is underway. In this case we need to retry the command. |
1854 | */ |
1855 | if (!enable && gsi->version >= IPA_VERSION_4_11) |
1856 | retries = GSI_CHANNEL_MODEM_FLOW_RETRIES; |
1857 | |
1858 | do |
1859 | ret = gsi_generic_command(gsi, channel_id, opcode: command, params: 0); |
1860 | while (ret == -EAGAIN && retries--); |
1861 | |
1862 | if (ret) |
1863 | dev_err(gsi->dev, |
1864 | "error %d %sabling mode channel %u flow control\n" , |
1865 | ret, enable ? "en" : "dis" , channel_id); |
1866 | } |
1867 | |
1868 | /* Setup function for channels */ |
1869 | static int gsi_channel_setup(struct gsi *gsi) |
1870 | { |
1871 | u32 channel_id = 0; |
1872 | u32 mask; |
1873 | int ret; |
1874 | |
1875 | gsi_irq_enable(gsi); |
1876 | |
1877 | mutex_lock(&gsi->mutex); |
1878 | |
1879 | do { |
1880 | ret = gsi_channel_setup_one(gsi, channel_id); |
1881 | if (ret) |
1882 | goto err_unwind; |
1883 | } while (++channel_id < gsi->channel_count); |
1884 | |
1885 | /* Make sure no channels were defined that hardware does not support */ |
1886 | while (channel_id < GSI_CHANNEL_COUNT_MAX) { |
1887 | struct gsi_channel *channel = &gsi->channel[channel_id++]; |
1888 | |
1889 | if (!gsi_channel_initialized(channel)) |
1890 | continue; |
1891 | |
1892 | ret = -EINVAL; |
1893 | dev_err(gsi->dev, "channel %u not supported by hardware\n" , |
1894 | channel_id - 1); |
1895 | channel_id = gsi->channel_count; |
1896 | goto err_unwind; |
1897 | } |
1898 | |
1899 | /* Allocate modem channels if necessary */ |
1900 | mask = gsi->modem_channel_bitmap; |
1901 | while (mask) { |
1902 | u32 modem_channel_id = __ffs(mask); |
1903 | |
1904 | ret = gsi_modem_channel_alloc(gsi, channel_id: modem_channel_id); |
1905 | if (ret) |
1906 | goto err_unwind_modem; |
1907 | |
1908 | /* Clear bit from mask only after success (for unwind) */ |
1909 | mask ^= BIT(modem_channel_id); |
1910 | } |
1911 | |
1912 | mutex_unlock(lock: &gsi->mutex); |
1913 | |
1914 | return 0; |
1915 | |
1916 | err_unwind_modem: |
1917 | /* Compute which modem channels need to be deallocated */ |
1918 | mask ^= gsi->modem_channel_bitmap; |
1919 | while (mask) { |
1920 | channel_id = __fls(word: mask); |
1921 | |
1922 | mask ^= BIT(channel_id); |
1923 | |
1924 | gsi_modem_channel_halt(gsi, channel_id); |
1925 | } |
1926 | |
1927 | err_unwind: |
1928 | while (channel_id--) |
1929 | gsi_channel_teardown_one(gsi, channel_id); |
1930 | |
1931 | mutex_unlock(lock: &gsi->mutex); |
1932 | |
1933 | gsi_irq_disable(gsi); |
1934 | |
1935 | return ret; |
1936 | } |
1937 | |
1938 | /* Inverse of gsi_channel_setup() */ |
1939 | static void gsi_channel_teardown(struct gsi *gsi) |
1940 | { |
1941 | u32 mask = gsi->modem_channel_bitmap; |
1942 | u32 channel_id; |
1943 | |
1944 | mutex_lock(&gsi->mutex); |
1945 | |
1946 | while (mask) { |
1947 | channel_id = __fls(word: mask); |
1948 | |
1949 | mask ^= BIT(channel_id); |
1950 | |
1951 | gsi_modem_channel_halt(gsi, channel_id); |
1952 | } |
1953 | |
1954 | channel_id = gsi->channel_count - 1; |
1955 | do |
1956 | gsi_channel_teardown_one(gsi, channel_id); |
1957 | while (channel_id--); |
1958 | |
1959 | mutex_unlock(lock: &gsi->mutex); |
1960 | |
1961 | gsi_irq_disable(gsi); |
1962 | } |
1963 | |
1964 | /* Turn off all GSI interrupts initially */ |
1965 | static int gsi_irq_setup(struct gsi *gsi) |
1966 | { |
1967 | const struct reg *reg; |
1968 | int ret; |
1969 | |
1970 | /* Writing 1 indicates IRQ interrupts; 0 would be MSI */ |
1971 | reg = gsi_reg(gsi, reg_id: CNTXT_INTSET); |
1972 | iowrite32(reg_bit(reg, field_id: INTYPE), gsi->virt + reg_offset(reg)); |
1973 | |
1974 | /* Disable all interrupt types */ |
1975 | gsi_irq_type_update(gsi, val: 0); |
1976 | |
1977 | /* Clear all type-specific interrupt masks */ |
1978 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_CH_IRQ_MSK); |
1979 | iowrite32(0, gsi->virt + reg_offset(reg)); |
1980 | |
1981 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_EV_CH_IRQ_MSK); |
1982 | iowrite32(0, gsi->virt + reg_offset(reg)); |
1983 | |
1984 | reg = gsi_reg(gsi, reg_id: CNTXT_GLOB_IRQ_EN); |
1985 | iowrite32(0, gsi->virt + reg_offset(reg)); |
1986 | |
1987 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_IEOB_IRQ_MSK); |
1988 | iowrite32(0, gsi->virt + reg_offset(reg)); |
1989 | |
1990 | /* The inter-EE interrupts are not supported for IPA v3.0-v3.1 */ |
1991 | if (gsi->version > IPA_VERSION_3_1) { |
1992 | reg = gsi_reg(gsi, reg_id: INTER_EE_SRC_CH_IRQ_MSK); |
1993 | iowrite32(0, gsi->virt + reg_offset(reg)); |
1994 | |
1995 | reg = gsi_reg(gsi, reg_id: INTER_EE_SRC_EV_CH_IRQ_MSK); |
1996 | iowrite32(0, gsi->virt + reg_offset(reg)); |
1997 | } |
1998 | |
1999 | reg = gsi_reg(gsi, reg_id: CNTXT_GSI_IRQ_EN); |
2000 | iowrite32(0, gsi->virt + reg_offset(reg)); |
2001 | |
2002 | ret = request_irq(irq: gsi->irq, handler: gsi_isr, flags: 0, name: "gsi" , dev: gsi); |
2003 | if (ret) |
2004 | dev_err(gsi->dev, "error %d requesting \"gsi\" IRQ\n" , ret); |
2005 | |
2006 | return ret; |
2007 | } |
2008 | |
2009 | static void gsi_irq_teardown(struct gsi *gsi) |
2010 | { |
2011 | free_irq(gsi->irq, gsi); |
2012 | } |
2013 | |
2014 | /* Get # supported channel and event rings; there is no gsi_ring_teardown() */ |
2015 | static int gsi_ring_setup(struct gsi *gsi) |
2016 | { |
2017 | struct device *dev = gsi->dev; |
2018 | const struct reg *reg; |
2019 | u32 count; |
2020 | u32 val; |
2021 | |
2022 | if (gsi->version < IPA_VERSION_3_5_1) { |
2023 | /* No HW_PARAM_2 register prior to IPA v3.5.1, assume the max */ |
2024 | gsi->channel_count = GSI_CHANNEL_COUNT_MAX; |
2025 | gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX; |
2026 | |
2027 | return 0; |
2028 | } |
2029 | |
2030 | reg = gsi_reg(gsi, reg_id: HW_PARAM_2); |
2031 | val = ioread32(gsi->virt + reg_offset(reg)); |
2032 | |
2033 | count = reg_decode(reg, field_id: NUM_CH_PER_EE, val); |
2034 | if (!count) { |
2035 | dev_err(dev, "GSI reports zero channels supported\n" ); |
2036 | return -EINVAL; |
2037 | } |
2038 | if (count > GSI_CHANNEL_COUNT_MAX) { |
2039 | dev_warn(dev, "limiting to %u channels; hardware supports %u\n" , |
2040 | GSI_CHANNEL_COUNT_MAX, count); |
2041 | count = GSI_CHANNEL_COUNT_MAX; |
2042 | } |
2043 | gsi->channel_count = count; |
2044 | |
2045 | if (gsi->version < IPA_VERSION_5_0) { |
2046 | count = reg_decode(reg, field_id: NUM_EV_PER_EE, val); |
2047 | } else { |
2048 | reg = gsi_reg(gsi, reg_id: HW_PARAM_4); |
2049 | count = reg_decode(reg, field_id: EV_PER_EE, val); |
2050 | } |
2051 | if (!count) { |
2052 | dev_err(dev, "GSI reports zero event rings supported\n" ); |
2053 | return -EINVAL; |
2054 | } |
2055 | if (count > GSI_EVT_RING_COUNT_MAX) { |
2056 | dev_warn(dev, |
2057 | "limiting to %u event rings; hardware supports %u\n" , |
2058 | GSI_EVT_RING_COUNT_MAX, count); |
2059 | count = GSI_EVT_RING_COUNT_MAX; |
2060 | } |
2061 | gsi->evt_ring_count = count; |
2062 | |
2063 | return 0; |
2064 | } |
2065 | |
2066 | /* Setup function for GSI. GSI firmware must be loaded and initialized */ |
2067 | int gsi_setup(struct gsi *gsi) |
2068 | { |
2069 | const struct reg *reg; |
2070 | u32 val; |
2071 | int ret; |
2072 | |
2073 | /* Here is where we first touch the GSI hardware */ |
2074 | reg = gsi_reg(gsi, reg_id: GSI_STATUS); |
2075 | val = ioread32(gsi->virt + reg_offset(reg)); |
2076 | if (!(val & reg_bit(reg, field_id: ENABLED))) { |
2077 | dev_err(gsi->dev, "GSI has not been enabled\n" ); |
2078 | return -EIO; |
2079 | } |
2080 | |
2081 | ret = gsi_irq_setup(gsi); |
2082 | if (ret) |
2083 | return ret; |
2084 | |
2085 | ret = gsi_ring_setup(gsi); /* No matching teardown required */ |
2086 | if (ret) |
2087 | goto err_irq_teardown; |
2088 | |
2089 | /* Initialize the error log */ |
2090 | reg = gsi_reg(gsi, reg_id: ERROR_LOG); |
2091 | iowrite32(0, gsi->virt + reg_offset(reg)); |
2092 | |
2093 | ret = gsi_channel_setup(gsi); |
2094 | if (ret) |
2095 | goto err_irq_teardown; |
2096 | |
2097 | return 0; |
2098 | |
2099 | err_irq_teardown: |
2100 | gsi_irq_teardown(gsi); |
2101 | |
2102 | return ret; |
2103 | } |
2104 | |
2105 | /* Inverse of gsi_setup() */ |
2106 | void gsi_teardown(struct gsi *gsi) |
2107 | { |
2108 | gsi_channel_teardown(gsi); |
2109 | gsi_irq_teardown(gsi); |
2110 | } |
2111 | |
2112 | /* Initialize a channel's event ring */ |
2113 | static int gsi_channel_evt_ring_init(struct gsi_channel *channel) |
2114 | { |
2115 | struct gsi *gsi = channel->gsi; |
2116 | struct gsi_evt_ring *evt_ring; |
2117 | int ret; |
2118 | |
2119 | ret = gsi_evt_ring_id_alloc(gsi); |
2120 | if (ret < 0) |
2121 | return ret; |
2122 | channel->evt_ring_id = ret; |
2123 | |
2124 | evt_ring = &gsi->evt_ring[channel->evt_ring_id]; |
2125 | evt_ring->channel = channel; |
2126 | |
2127 | ret = gsi_ring_alloc(gsi, ring: &evt_ring->ring, count: channel->event_count); |
2128 | if (!ret) |
2129 | return 0; /* Success! */ |
2130 | |
2131 | dev_err(gsi->dev, "error %d allocating channel %u event ring\n" , |
2132 | ret, gsi_channel_id(channel)); |
2133 | |
2134 | gsi_evt_ring_id_free(gsi, evt_ring_id: channel->evt_ring_id); |
2135 | |
2136 | return ret; |
2137 | } |
2138 | |
2139 | /* Inverse of gsi_channel_evt_ring_init() */ |
2140 | static void gsi_channel_evt_ring_exit(struct gsi_channel *channel) |
2141 | { |
2142 | u32 evt_ring_id = channel->evt_ring_id; |
2143 | struct gsi *gsi = channel->gsi; |
2144 | struct gsi_evt_ring *evt_ring; |
2145 | |
2146 | evt_ring = &gsi->evt_ring[evt_ring_id]; |
2147 | gsi_ring_free(gsi, ring: &evt_ring->ring); |
2148 | gsi_evt_ring_id_free(gsi, evt_ring_id); |
2149 | } |
2150 | |
2151 | static bool gsi_channel_data_valid(struct gsi *gsi, bool command, |
2152 | const struct ipa_gsi_endpoint_data *data) |
2153 | { |
2154 | const struct gsi_channel_data *channel_data; |
2155 | u32 channel_id = data->channel_id; |
2156 | struct device *dev = gsi->dev; |
2157 | |
2158 | /* Make sure channel ids are in the range driver supports */ |
2159 | if (channel_id >= GSI_CHANNEL_COUNT_MAX) { |
2160 | dev_err(dev, "bad channel id %u; must be less than %u\n" , |
2161 | channel_id, GSI_CHANNEL_COUNT_MAX); |
2162 | return false; |
2163 | } |
2164 | |
2165 | if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) { |
2166 | dev_err(dev, "bad EE id %u; not AP or modem\n" , data->ee_id); |
2167 | return false; |
2168 | } |
2169 | |
2170 | if (command && !data->toward_ipa) { |
2171 | dev_err(dev, "command channel %u is not TX\n" , channel_id); |
2172 | return false; |
2173 | } |
2174 | |
2175 | channel_data = &data->channel; |
2176 | |
2177 | if (!channel_data->tlv_count || |
2178 | channel_data->tlv_count > GSI_TLV_MAX) { |
2179 | dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n" , |
2180 | channel_id, channel_data->tlv_count, GSI_TLV_MAX); |
2181 | return false; |
2182 | } |
2183 | |
2184 | if (command && IPA_COMMAND_TRANS_TRE_MAX > channel_data->tlv_count) { |
2185 | dev_err(dev, "command TRE max too big for channel %u (%u > %u)\n" , |
2186 | channel_id, IPA_COMMAND_TRANS_TRE_MAX, |
2187 | channel_data->tlv_count); |
2188 | return false; |
2189 | } |
2190 | |
2191 | /* We have to allow at least one maximally-sized transaction to |
2192 | * be outstanding (which would use tlv_count TREs). Given how |
2193 | * gsi_channel_tre_max() is computed, tre_count has to be almost |
2194 | * twice the TLV FIFO size to satisfy this requirement. |
2195 | */ |
2196 | if (channel_data->tre_count < 2 * channel_data->tlv_count - 1) { |
2197 | dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n" , |
2198 | channel_id, channel_data->tlv_count, |
2199 | channel_data->tre_count); |
2200 | return false; |
2201 | } |
2202 | |
2203 | if (!is_power_of_2(n: channel_data->tre_count)) { |
2204 | dev_err(dev, "channel %u bad tre_count %u; not power of 2\n" , |
2205 | channel_id, channel_data->tre_count); |
2206 | return false; |
2207 | } |
2208 | |
2209 | if (!is_power_of_2(n: channel_data->event_count)) { |
2210 | dev_err(dev, "channel %u bad event_count %u; not power of 2\n" , |
2211 | channel_id, channel_data->event_count); |
2212 | return false; |
2213 | } |
2214 | |
2215 | return true; |
2216 | } |
2217 | |
2218 | /* Init function for a single channel */ |
2219 | static int gsi_channel_init_one(struct gsi *gsi, |
2220 | const struct ipa_gsi_endpoint_data *data, |
2221 | bool command) |
2222 | { |
2223 | struct gsi_channel *channel; |
2224 | u32 tre_count; |
2225 | int ret; |
2226 | |
2227 | if (!gsi_channel_data_valid(gsi, command, data)) |
2228 | return -EINVAL; |
2229 | |
2230 | /* Worst case we need an event for every outstanding TRE */ |
2231 | if (data->channel.tre_count > data->channel.event_count) { |
2232 | tre_count = data->channel.event_count; |
2233 | dev_warn(gsi->dev, "channel %u limited to %u TREs\n" , |
2234 | data->channel_id, tre_count); |
2235 | } else { |
2236 | tre_count = data->channel.tre_count; |
2237 | } |
2238 | |
2239 | channel = &gsi->channel[data->channel_id]; |
2240 | memset(channel, 0, sizeof(*channel)); |
2241 | |
2242 | channel->gsi = gsi; |
2243 | channel->toward_ipa = data->toward_ipa; |
2244 | channel->command = command; |
2245 | channel->trans_tre_max = data->channel.tlv_count; |
2246 | channel->tre_count = tre_count; |
2247 | channel->event_count = data->channel.event_count; |
2248 | |
2249 | ret = gsi_channel_evt_ring_init(channel); |
2250 | if (ret) |
2251 | goto err_clear_gsi; |
2252 | |
2253 | ret = gsi_ring_alloc(gsi, ring: &channel->tre_ring, count: data->channel.tre_count); |
2254 | if (ret) { |
2255 | dev_err(gsi->dev, "error %d allocating channel %u ring\n" , |
2256 | ret, data->channel_id); |
2257 | goto err_channel_evt_ring_exit; |
2258 | } |
2259 | |
2260 | ret = gsi_channel_trans_init(gsi, channel_id: data->channel_id); |
2261 | if (ret) |
2262 | goto err_ring_free; |
2263 | |
2264 | if (command) { |
2265 | u32 tre_max = gsi_channel_tre_max(gsi, channel_id: data->channel_id); |
2266 | |
2267 | ret = ipa_cmd_pool_init(channel, tre_count: tre_max); |
2268 | } |
2269 | if (!ret) |
2270 | return 0; /* Success! */ |
2271 | |
2272 | gsi_channel_trans_exit(channel); |
2273 | err_ring_free: |
2274 | gsi_ring_free(gsi, ring: &channel->tre_ring); |
2275 | err_channel_evt_ring_exit: |
2276 | gsi_channel_evt_ring_exit(channel); |
2277 | err_clear_gsi: |
2278 | channel->gsi = NULL; /* Mark it not (fully) initialized */ |
2279 | |
2280 | return ret; |
2281 | } |
2282 | |
2283 | /* Inverse of gsi_channel_init_one() */ |
2284 | static void gsi_channel_exit_one(struct gsi_channel *channel) |
2285 | { |
2286 | if (!gsi_channel_initialized(channel)) |
2287 | return; |
2288 | |
2289 | if (channel->command) |
2290 | ipa_cmd_pool_exit(channel); |
2291 | gsi_channel_trans_exit(channel); |
2292 | gsi_ring_free(gsi: channel->gsi, ring: &channel->tre_ring); |
2293 | gsi_channel_evt_ring_exit(channel); |
2294 | } |
2295 | |
2296 | /* Init function for channels */ |
2297 | static int gsi_channel_init(struct gsi *gsi, u32 count, |
2298 | const struct ipa_gsi_endpoint_data *data) |
2299 | { |
2300 | bool modem_alloc; |
2301 | int ret = 0; |
2302 | u32 i; |
2303 | |
2304 | /* IPA v4.2 requires the AP to allocate channels for the modem */ |
2305 | modem_alloc = gsi->version == IPA_VERSION_4_2; |
2306 | |
2307 | gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX); |
2308 | gsi->ieob_enabled_bitmap = 0; |
2309 | |
2310 | /* The endpoint data array is indexed by endpoint name */ |
2311 | for (i = 0; i < count; i++) { |
2312 | bool command = i == IPA_ENDPOINT_AP_COMMAND_TX; |
2313 | |
2314 | if (ipa_gsi_endpoint_data_empty(data: &data[i])) |
2315 | continue; /* Skip over empty slots */ |
2316 | |
2317 | /* Mark modem channels to be allocated (hardware workaround) */ |
2318 | if (data[i].ee_id == GSI_EE_MODEM) { |
2319 | if (modem_alloc) |
2320 | gsi->modem_channel_bitmap |= |
2321 | BIT(data[i].channel_id); |
2322 | continue; |
2323 | } |
2324 | |
2325 | ret = gsi_channel_init_one(gsi, data: &data[i], command); |
2326 | if (ret) |
2327 | goto err_unwind; |
2328 | } |
2329 | |
2330 | return ret; |
2331 | |
2332 | err_unwind: |
2333 | while (i--) { |
2334 | if (ipa_gsi_endpoint_data_empty(data: &data[i])) |
2335 | continue; |
2336 | if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) { |
2337 | gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id); |
2338 | continue; |
2339 | } |
2340 | gsi_channel_exit_one(channel: &gsi->channel[data->channel_id]); |
2341 | } |
2342 | |
2343 | return ret; |
2344 | } |
2345 | |
2346 | /* Inverse of gsi_channel_init() */ |
2347 | static void gsi_channel_exit(struct gsi *gsi) |
2348 | { |
2349 | u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1; |
2350 | |
2351 | do |
2352 | gsi_channel_exit_one(channel: &gsi->channel[channel_id]); |
2353 | while (channel_id--); |
2354 | gsi->modem_channel_bitmap = 0; |
2355 | } |
2356 | |
2357 | /* Init function for GSI. GSI hardware does not need to be "ready" */ |
2358 | int gsi_init(struct gsi *gsi, struct platform_device *pdev, |
2359 | enum ipa_version version, u32 count, |
2360 | const struct ipa_gsi_endpoint_data *data) |
2361 | { |
2362 | int ret; |
2363 | |
2364 | gsi_validate_build(); |
2365 | |
2366 | gsi->dev = &pdev->dev; |
2367 | gsi->version = version; |
2368 | |
2369 | /* GSI uses NAPI on all channels. Create a dummy network device |
2370 | * for the channel NAPI contexts to be associated with. |
2371 | */ |
2372 | init_dummy_netdev(dev: &gsi->dummy_dev); |
2373 | init_completion(x: &gsi->completion); |
2374 | |
2375 | ret = gsi_reg_init(gsi, pdev); |
2376 | if (ret) |
2377 | return ret; |
2378 | |
2379 | ret = gsi_irq_init(gsi, pdev); /* No matching exit required */ |
2380 | if (ret) |
2381 | goto err_reg_exit; |
2382 | |
2383 | ret = gsi_channel_init(gsi, count, data); |
2384 | if (ret) |
2385 | goto err_reg_exit; |
2386 | |
2387 | mutex_init(&gsi->mutex); |
2388 | |
2389 | return 0; |
2390 | |
2391 | err_reg_exit: |
2392 | gsi_reg_exit(gsi); |
2393 | |
2394 | return ret; |
2395 | } |
2396 | |
2397 | /* Inverse of gsi_init() */ |
2398 | void gsi_exit(struct gsi *gsi) |
2399 | { |
2400 | mutex_destroy(lock: &gsi->mutex); |
2401 | gsi_channel_exit(gsi); |
2402 | gsi_reg_exit(gsi); |
2403 | } |
2404 | |
2405 | /* The maximum number of outstanding TREs on a channel. This limits |
2406 | * a channel's maximum number of transactions outstanding (worst case |
2407 | * is one TRE per transaction). |
2408 | * |
2409 | * The absolute limit is the number of TREs in the channel's TRE ring, |
2410 | * and in theory we should be able use all of them. But in practice, |
2411 | * doing that led to the hardware reporting exhaustion of event ring |
2412 | * slots for writing completion information. So the hardware limit |
2413 | * would be (tre_count - 1). |
2414 | * |
2415 | * We reduce it a bit further though. Transaction resource pools are |
2416 | * sized to be a little larger than this maximum, to allow resource |
2417 | * allocations to always be contiguous. The number of entries in a |
2418 | * TRE ring buffer is a power of 2, and the extra resources in a pool |
2419 | * tends to nearly double the memory allocated for it. Reducing the |
2420 | * maximum number of outstanding TREs allows the number of entries in |
2421 | * a pool to avoid crossing that power-of-2 boundary, and this can |
2422 | * substantially reduce pool memory requirements. The number we |
2423 | * reduce it by matches the number added in gsi_trans_pool_init(). |
2424 | */ |
2425 | u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id) |
2426 | { |
2427 | struct gsi_channel *channel = &gsi->channel[channel_id]; |
2428 | |
2429 | /* Hardware limit is channel->tre_count - 1 */ |
2430 | return channel->tre_count - (channel->trans_tre_max - 1); |
2431 | } |
2432 | |