1 | // SPDX-License-Identifier: GPL-2.0 |
---|---|
2 | |
3 | /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. |
4 | * Copyright (C) 2018-2024 Linaro Ltd. |
5 | */ |
6 | |
7 | #include <linux/bits.h> |
8 | #include <linux/bug.h> |
9 | #include <linux/completion.h> |
10 | #include <linux/interrupt.h> |
11 | #include <linux/mutex.h> |
12 | #include <linux/netdevice.h> |
13 | #include <linux/platform_device.h> |
14 | #include <linux/types.h> |
15 | |
16 | #include "gsi.h" |
17 | #include "gsi_private.h" |
18 | #include "gsi_reg.h" |
19 | #include "gsi_trans.h" |
20 | #include "ipa_data.h" |
21 | #include "ipa_gsi.h" |
22 | #include "ipa_version.h" |
23 | #include "reg.h" |
24 | |
25 | /** |
26 | * DOC: The IPA Generic Software Interface |
27 | * |
28 | * The generic software interface (GSI) is an integral component of the IPA, |
29 | * providing a well-defined communication layer between the AP subsystem |
30 | * and the IPA core. The modem uses the GSI layer as well. |
31 | * |
32 | * -------- --------- |
33 | * | | | | |
34 | * | AP +<---. .----+ Modem | |
35 | * | +--. | | .->+ | |
36 | * | | | | | | | | |
37 | * -------- | | | | --------- |
38 | * v | v | |
39 | * --+-+---+-+-- |
40 | * | GSI | |
41 | * |-----------| |
42 | * | | |
43 | * | IPA | |
44 | * | | |
45 | * ------------- |
46 | * |
47 | * In the above diagram, the AP and Modem represent "execution environments" |
48 | * (EEs), which are independent operating environments that use the IPA for |
49 | * data transfer. |
50 | * |
51 | * Each EE uses a set of unidirectional GSI "channels," which allow transfer |
52 | * of data to or from the IPA. A channel is implemented as a ring buffer, |
53 | * with a DRAM-resident array of "transfer elements" (TREs) available to |
54 | * describe transfers to or from other EEs through the IPA. A transfer |
55 | * element can also contain an immediate command, requesting the IPA perform |
56 | * actions other than data transfer. |
57 | * |
58 | * Each TRE refers to a block of data--also located in DRAM. After writing |
59 | * one or more TREs to a channel, the writer (either the IPA or an EE) writes |
60 | * a doorbell register to inform the receiving side how many elements have |
61 | * been written. |
62 | * |
63 | * Each channel has a GSI "event ring" associated with it. An event ring |
64 | * is implemented very much like a channel ring, but is always directed from |
65 | * the IPA to an EE. The IPA notifies an EE (such as the AP) about channel |
66 | * events by adding an entry to the event ring associated with the channel. |
67 | * The GSI then writes its doorbell for the event ring, causing the target |
68 | * EE to be interrupted. Each entry in an event ring contains a pointer |
69 | * to the channel TRE whose completion the event represents. |
70 | * |
71 | * Each TRE in a channel ring has a set of flags. One flag indicates whether |
72 | * the completion of the transfer operation generates an entry (and possibly |
73 | * an interrupt) in the channel's event ring. Other flags allow transfer |
74 | * elements to be chained together, forming a single logical transaction. |
75 | * TRE flags are used to control whether and when interrupts are generated |
76 | * to signal completion of channel transfers. |
77 | * |
78 | * Elements in channel and event rings are completed (or consumed) strictly |
79 | * in order. Completion of one entry implies the completion of all preceding |
80 | * entries. A single completion interrupt can therefore communicate the |
81 | * completion of many transfers. |
82 | * |
83 | * Note that all GSI registers are little-endian, which is the assumed |
84 | * endianness of I/O space accesses. The accessor functions perform byte |
85 | * swapping if needed (i.e., for a big endian CPU). |
86 | */ |
87 | |
88 | /* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */ |
89 | #define GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */ |
90 | |
91 | #define GSI_CMD_TIMEOUT 50 /* milliseconds */ |
92 | |
93 | #define GSI_CHANNEL_STOP_RETRIES 10 |
94 | #define GSI_CHANNEL_MODEM_HALT_RETRIES 10 |
95 | #define GSI_CHANNEL_MODEM_FLOW_RETRIES 5 /* disable flow control only */ |
96 | |
97 | #define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */ |
98 | #define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */ |
99 | |
100 | #define GSI_ISR_MAX_ITER 50 /* Detect interrupt storms */ |
101 | |
102 | /* An entry in an event ring */ |
103 | struct gsi_event { |
104 | __le64 xfer_ptr; |
105 | __le16 len; |
106 | u8 reserved1; |
107 | u8 code; |
108 | __le16 reserved2; |
109 | u8 type; |
110 | u8 chid; |
111 | }; |
112 | |
113 | /** gsi_channel_scratch_gpi - GPI protocol scratch register |
114 | * @max_outstanding_tre: |
115 | * Defines the maximum number of TREs allowed in a single transaction |
116 | * on a channel (in bytes). This determines the amount of prefetch |
117 | * performed by the hardware. We configure this to equal the size of |
118 | * the TLV FIFO for the channel. |
119 | * @outstanding_threshold: |
120 | * Defines the threshold (in bytes) determining when the sequencer |
121 | * should update the channel doorbell. We configure this to equal |
122 | * the size of two TREs. |
123 | */ |
124 | struct gsi_channel_scratch_gpi { |
125 | u64 reserved1; |
126 | u16 reserved2; |
127 | u16 max_outstanding_tre; |
128 | u16 reserved3; |
129 | u16 outstanding_threshold; |
130 | }; |
131 | |
132 | /** gsi_channel_scratch - channel scratch configuration area |
133 | * |
134 | * The exact interpretation of this register is protocol-specific. |
135 | * We only use GPI channels; see struct gsi_channel_scratch_gpi, above. |
136 | */ |
137 | union gsi_channel_scratch { |
138 | struct gsi_channel_scratch_gpi gpi; |
139 | struct { |
140 | u32 word1; |
141 | u32 word2; |
142 | u32 word3; |
143 | u32 word4; |
144 | } data; |
145 | }; |
146 | |
147 | /* Check things that can be validated at build time. */ |
148 | static void gsi_validate_build(void) |
149 | { |
150 | /* This is used as a divisor */ |
151 | BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE); |
152 | |
153 | /* Code assumes the size of channel and event ring element are |
154 | * the same (and fixed). Make sure the size of an event ring |
155 | * element is what's expected. |
156 | */ |
157 | BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE); |
158 | |
159 | /* Hardware requires a 2^n ring size. We ensure the number of |
160 | * elements in an event ring is a power of 2 elsewhere; this |
161 | * ensure the elements themselves meet the requirement. |
162 | */ |
163 | BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE)); |
164 | } |
165 | |
166 | /* Return the channel id associated with a given channel */ |
167 | static u32 gsi_channel_id(struct gsi_channel *channel) |
168 | { |
169 | return channel - &channel->gsi->channel[0]; |
170 | } |
171 | |
172 | /* An initialized channel has a non-null GSI pointer */ |
173 | static bool gsi_channel_initialized(struct gsi_channel *channel) |
174 | { |
175 | return !!channel->gsi; |
176 | } |
177 | |
178 | /* Encode the channel protocol for the CH_C_CNTXT_0 register */ |
179 | static u32 ch_c_cntxt_0_type_encode(enum ipa_version version, |
180 | const struct reg *reg, |
181 | enum gsi_channel_type type) |
182 | { |
183 | u32 val; |
184 | |
185 | val = reg_encode(reg, field_id: CHTYPE_PROTOCOL, val: type); |
186 | if (version < IPA_VERSION_4_5 || version >= IPA_VERSION_5_0) |
187 | return val; |
188 | |
189 | type >>= hweight32(reg_fmask(reg, CHTYPE_PROTOCOL)); |
190 | |
191 | return val | reg_encode(reg, field_id: CHTYPE_PROTOCOL_MSB, val: type); |
192 | } |
193 | |
194 | /* Update the GSI IRQ type register with the cached value */ |
195 | static void gsi_irq_type_update(struct gsi *gsi, u32 val) |
196 | { |
197 | const struct reg *reg = gsi_reg(gsi, reg_id: CNTXT_TYPE_IRQ_MSK); |
198 | |
199 | gsi->type_enabled_bitmap = val; |
200 | iowrite32(val, gsi->virt + reg_offset(reg)); |
201 | } |
202 | |
203 | static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id) |
204 | { |
205 | gsi_irq_type_update(gsi, val: gsi->type_enabled_bitmap | type_id); |
206 | } |
207 | |
208 | static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id) |
209 | { |
210 | gsi_irq_type_update(gsi, val: gsi->type_enabled_bitmap & ~type_id); |
211 | } |
212 | |
213 | /* Event ring commands are performed one at a time. Their completion |
214 | * is signaled by the event ring control GSI interrupt type, which is |
215 | * only enabled when we issue an event ring command. Only the event |
216 | * ring being operated on has this interrupt enabled. |
217 | */ |
218 | static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id) |
219 | { |
220 | u32 val = BIT(evt_ring_id); |
221 | const struct reg *reg; |
222 | |
223 | /* There's a small chance that a previous command completed |
224 | * after the interrupt was disabled, so make sure we have no |
225 | * pending interrupts before we enable them. |
226 | */ |
227 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_EV_CH_IRQ_CLR); |
228 | iowrite32(~0, gsi->virt + reg_offset(reg)); |
229 | |
230 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_EV_CH_IRQ_MSK); |
231 | iowrite32(val, gsi->virt + reg_offset(reg)); |
232 | gsi_irq_type_enable(gsi, type_id: GSI_EV_CTRL); |
233 | } |
234 | |
235 | /* Disable event ring control interrupts */ |
236 | static void gsi_irq_ev_ctrl_disable(struct gsi *gsi) |
237 | { |
238 | const struct reg *reg; |
239 | |
240 | gsi_irq_type_disable(gsi, type_id: GSI_EV_CTRL); |
241 | |
242 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_EV_CH_IRQ_MSK); |
243 | iowrite32(0, gsi->virt + reg_offset(reg)); |
244 | } |
245 | |
246 | /* Channel commands are performed one at a time. Their completion is |
247 | * signaled by the channel control GSI interrupt type, which is only |
248 | * enabled when we issue a channel command. Only the channel being |
249 | * operated on has this interrupt enabled. |
250 | */ |
251 | static void gsi_irq_ch_ctrl_enable(struct gsi *gsi, u32 channel_id) |
252 | { |
253 | u32 val = BIT(channel_id); |
254 | const struct reg *reg; |
255 | |
256 | /* There's a small chance that a previous command completed |
257 | * after the interrupt was disabled, so make sure we have no |
258 | * pending interrupts before we enable them. |
259 | */ |
260 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_CH_IRQ_CLR); |
261 | iowrite32(~0, gsi->virt + reg_offset(reg)); |
262 | |
263 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_CH_IRQ_MSK); |
264 | iowrite32(val, gsi->virt + reg_offset(reg)); |
265 | |
266 | gsi_irq_type_enable(gsi, type_id: GSI_CH_CTRL); |
267 | } |
268 | |
269 | /* Disable channel control interrupts */ |
270 | static void gsi_irq_ch_ctrl_disable(struct gsi *gsi) |
271 | { |
272 | const struct reg *reg; |
273 | |
274 | gsi_irq_type_disable(gsi, type_id: GSI_CH_CTRL); |
275 | |
276 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_CH_IRQ_MSK); |
277 | iowrite32(0, gsi->virt + reg_offset(reg)); |
278 | } |
279 | |
280 | static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id) |
281 | { |
282 | bool enable_ieob = !gsi->ieob_enabled_bitmap; |
283 | const struct reg *reg; |
284 | u32 val; |
285 | |
286 | gsi->ieob_enabled_bitmap |= BIT(evt_ring_id); |
287 | |
288 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_IEOB_IRQ_MSK); |
289 | val = gsi->ieob_enabled_bitmap; |
290 | iowrite32(val, gsi->virt + reg_offset(reg)); |
291 | |
292 | /* Enable the interrupt type if this is the first channel enabled */ |
293 | if (enable_ieob) |
294 | gsi_irq_type_enable(gsi, type_id: GSI_IEOB); |
295 | } |
296 | |
297 | static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask) |
298 | { |
299 | const struct reg *reg; |
300 | u32 val; |
301 | |
302 | gsi->ieob_enabled_bitmap &= ~event_mask; |
303 | |
304 | /* Disable the interrupt type if this was the last enabled channel */ |
305 | if (!gsi->ieob_enabled_bitmap) |
306 | gsi_irq_type_disable(gsi, type_id: GSI_IEOB); |
307 | |
308 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_IEOB_IRQ_MSK); |
309 | val = gsi->ieob_enabled_bitmap; |
310 | iowrite32(val, gsi->virt + reg_offset(reg)); |
311 | } |
312 | |
313 | static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id) |
314 | { |
315 | gsi_irq_ieob_disable(gsi, BIT(evt_ring_id)); |
316 | } |
317 | |
318 | /* Enable all GSI_interrupt types */ |
319 | static void gsi_irq_enable(struct gsi *gsi) |
320 | { |
321 | const struct reg *reg; |
322 | u32 val; |
323 | |
324 | /* Global interrupts include hardware error reports. Enable |
325 | * that so we can at least report the error should it occur. |
326 | */ |
327 | reg = gsi_reg(gsi, reg_id: CNTXT_GLOB_IRQ_EN); |
328 | iowrite32(ERROR_INT, gsi->virt + reg_offset(reg)); |
329 | |
330 | gsi_irq_type_update(gsi, val: gsi->type_enabled_bitmap | GSI_GLOB_EE); |
331 | |
332 | /* General GSI interrupts are reported to all EEs; if they occur |
333 | * they are unrecoverable (without reset). A breakpoint interrupt |
334 | * also exists, but we don't support that. We want to be notified |
335 | * of errors so we can report them, even if they can't be handled. |
336 | */ |
337 | reg = gsi_reg(gsi, reg_id: CNTXT_GSI_IRQ_EN); |
338 | val = BUS_ERROR; |
339 | val |= CMD_FIFO_OVRFLOW; |
340 | val |= MCS_STACK_OVRFLOW; |
341 | iowrite32(val, gsi->virt + reg_offset(reg)); |
342 | |
343 | gsi_irq_type_update(gsi, val: gsi->type_enabled_bitmap | GSI_GENERAL); |
344 | } |
345 | |
346 | /* Disable all GSI interrupt types */ |
347 | static void gsi_irq_disable(struct gsi *gsi) |
348 | { |
349 | const struct reg *reg; |
350 | |
351 | gsi_irq_type_update(gsi, val: 0); |
352 | |
353 | /* Clear the type-specific interrupt masks set by gsi_irq_enable() */ |
354 | reg = gsi_reg(gsi, reg_id: CNTXT_GSI_IRQ_EN); |
355 | iowrite32(0, gsi->virt + reg_offset(reg)); |
356 | |
357 | reg = gsi_reg(gsi, reg_id: CNTXT_GLOB_IRQ_EN); |
358 | iowrite32(0, gsi->virt + reg_offset(reg)); |
359 | } |
360 | |
361 | /* Return the virtual address associated with a ring index */ |
362 | void *gsi_ring_virt(struct gsi_ring *ring, u32 index) |
363 | { |
364 | /* Note: index *must* be used modulo the ring count here */ |
365 | return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE; |
366 | } |
367 | |
368 | /* Return the 32-bit DMA address associated with a ring index */ |
369 | static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index) |
370 | { |
371 | return lower_32_bits(ring->addr) + index * GSI_RING_ELEMENT_SIZE; |
372 | } |
373 | |
374 | /* Return the ring index of a 32-bit ring offset */ |
375 | static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset) |
376 | { |
377 | return (offset - gsi_ring_addr(ring, index: 0)) / GSI_RING_ELEMENT_SIZE; |
378 | } |
379 | |
380 | /* Issue a GSI command by writing a value to a register, then wait for |
381 | * completion to be signaled. Returns true if the command completes |
382 | * or false if it times out. |
383 | */ |
384 | static bool gsi_command(struct gsi *gsi, u32 reg, u32 val) |
385 | { |
386 | unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT); |
387 | struct completion *completion = &gsi->completion; |
388 | |
389 | reinit_completion(x: completion); |
390 | |
391 | iowrite32(val, gsi->virt + reg); |
392 | |
393 | return !!wait_for_completion_timeout(x: completion, timeout); |
394 | } |
395 | |
396 | /* Return the hardware's notion of the current state of an event ring */ |
397 | static enum gsi_evt_ring_state |
398 | gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id) |
399 | { |
400 | const struct reg *reg = gsi_reg(gsi, reg_id: EV_CH_E_CNTXT_0); |
401 | u32 val; |
402 | |
403 | val = ioread32(gsi->virt + reg_n_offset(reg, n: evt_ring_id)); |
404 | |
405 | return reg_decode(reg, field_id: EV_CHSTATE, val); |
406 | } |
407 | |
408 | /* Issue an event ring command and wait for it to complete */ |
409 | static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id, |
410 | enum gsi_evt_cmd_opcode opcode) |
411 | { |
412 | struct device *dev = gsi->dev; |
413 | const struct reg *reg; |
414 | bool timeout; |
415 | u32 val; |
416 | |
417 | /* Enable the completion interrupt for the command */ |
418 | gsi_irq_ev_ctrl_enable(gsi, evt_ring_id); |
419 | |
420 | reg = gsi_reg(gsi, reg_id: EV_CH_CMD); |
421 | val = reg_encode(reg, field_id: EV_CHID, val: evt_ring_id); |
422 | val |= reg_encode(reg, field_id: EV_OPCODE, val: opcode); |
423 | |
424 | timeout = !gsi_command(gsi, reg: reg_offset(reg), val); |
425 | |
426 | gsi_irq_ev_ctrl_disable(gsi); |
427 | |
428 | if (!timeout) |
429 | return; |
430 | |
431 | dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n", |
432 | opcode, evt_ring_id, gsi_evt_ring_state(gsi, evt_ring_id)); |
433 | } |
434 | |
435 | /* Allocate an event ring in NOT_ALLOCATED state */ |
436 | static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id) |
437 | { |
438 | enum gsi_evt_ring_state state; |
439 | |
440 | /* Get initial event ring state */ |
441 | state = gsi_evt_ring_state(gsi, evt_ring_id); |
442 | if (state != GSI_EVT_RING_STATE_NOT_ALLOCATED) { |
443 | dev_err(gsi->dev, "event ring %u bad state %u before alloc\n", |
444 | evt_ring_id, state); |
445 | return -EINVAL; |
446 | } |
447 | |
448 | gsi_evt_ring_command(gsi, evt_ring_id, opcode: GSI_EVT_ALLOCATE); |
449 | |
450 | /* If successful the event ring state will have changed */ |
451 | state = gsi_evt_ring_state(gsi, evt_ring_id); |
452 | if (state == GSI_EVT_RING_STATE_ALLOCATED) |
453 | return 0; |
454 | |
455 | dev_err(gsi->dev, "event ring %u bad state %u after alloc\n", |
456 | evt_ring_id, state); |
457 | |
458 | return -EIO; |
459 | } |
460 | |
461 | /* Reset a GSI event ring in ALLOCATED or ERROR state. */ |
462 | static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id) |
463 | { |
464 | enum gsi_evt_ring_state state; |
465 | |
466 | state = gsi_evt_ring_state(gsi, evt_ring_id); |
467 | if (state != GSI_EVT_RING_STATE_ALLOCATED && |
468 | state != GSI_EVT_RING_STATE_ERROR) { |
469 | dev_err(gsi->dev, "event ring %u bad state %u before reset\n", |
470 | evt_ring_id, state); |
471 | return; |
472 | } |
473 | |
474 | gsi_evt_ring_command(gsi, evt_ring_id, opcode: GSI_EVT_RESET); |
475 | |
476 | /* If successful the event ring state will have changed */ |
477 | state = gsi_evt_ring_state(gsi, evt_ring_id); |
478 | if (state == GSI_EVT_RING_STATE_ALLOCATED) |
479 | return; |
480 | |
481 | dev_err(gsi->dev, "event ring %u bad state %u after reset\n", |
482 | evt_ring_id, state); |
483 | } |
484 | |
485 | /* Issue a hardware de-allocation request for an allocated event ring */ |
486 | static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id) |
487 | { |
488 | enum gsi_evt_ring_state state; |
489 | |
490 | state = gsi_evt_ring_state(gsi, evt_ring_id); |
491 | if (state != GSI_EVT_RING_STATE_ALLOCATED) { |
492 | dev_err(gsi->dev, "event ring %u state %u before dealloc\n", |
493 | evt_ring_id, state); |
494 | return; |
495 | } |
496 | |
497 | gsi_evt_ring_command(gsi, evt_ring_id, opcode: GSI_EVT_DE_ALLOC); |
498 | |
499 | /* If successful the event ring state will have changed */ |
500 | state = gsi_evt_ring_state(gsi, evt_ring_id); |
501 | if (state == GSI_EVT_RING_STATE_NOT_ALLOCATED) |
502 | return; |
503 | |
504 | dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n", |
505 | evt_ring_id, state); |
506 | } |
507 | |
508 | /* Fetch the current state of a channel from hardware */ |
509 | static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel) |
510 | { |
511 | const struct reg *reg = gsi_reg(gsi: channel->gsi, reg_id: CH_C_CNTXT_0); |
512 | u32 channel_id = gsi_channel_id(channel); |
513 | struct gsi *gsi = channel->gsi; |
514 | void __iomem *virt = gsi->virt; |
515 | u32 val; |
516 | |
517 | reg = gsi_reg(gsi, reg_id: CH_C_CNTXT_0); |
518 | val = ioread32(virt + reg_n_offset(reg, n: channel_id)); |
519 | |
520 | return reg_decode(reg, field_id: CHSTATE, val); |
521 | } |
522 | |
523 | /* Issue a channel command and wait for it to complete */ |
524 | static void |
525 | gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode) |
526 | { |
527 | u32 channel_id = gsi_channel_id(channel); |
528 | struct gsi *gsi = channel->gsi; |
529 | struct device *dev = gsi->dev; |
530 | const struct reg *reg; |
531 | bool timeout; |
532 | u32 val; |
533 | |
534 | /* Enable the completion interrupt for the command */ |
535 | gsi_irq_ch_ctrl_enable(gsi, channel_id); |
536 | |
537 | reg = gsi_reg(gsi, reg_id: CH_CMD); |
538 | val = reg_encode(reg, field_id: CH_CHID, val: channel_id); |
539 | val |= reg_encode(reg, field_id: CH_OPCODE, val: opcode); |
540 | |
541 | timeout = !gsi_command(gsi, reg: reg_offset(reg), val); |
542 | |
543 | gsi_irq_ch_ctrl_disable(gsi); |
544 | |
545 | if (!timeout) |
546 | return; |
547 | |
548 | dev_err(dev, "GSI command %u for channel %u timed out, state %u\n", |
549 | opcode, channel_id, gsi_channel_state(channel)); |
550 | } |
551 | |
552 | /* Allocate GSI channel in NOT_ALLOCATED state */ |
553 | static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id) |
554 | { |
555 | struct gsi_channel *channel = &gsi->channel[channel_id]; |
556 | struct device *dev = gsi->dev; |
557 | enum gsi_channel_state state; |
558 | |
559 | /* Get initial channel state */ |
560 | state = gsi_channel_state(channel); |
561 | if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) { |
562 | dev_err(dev, "channel %u bad state %u before alloc\n", |
563 | channel_id, state); |
564 | return -EINVAL; |
565 | } |
566 | |
567 | gsi_channel_command(channel, opcode: GSI_CH_ALLOCATE); |
568 | |
569 | /* If successful the channel state will have changed */ |
570 | state = gsi_channel_state(channel); |
571 | if (state == GSI_CHANNEL_STATE_ALLOCATED) |
572 | return 0; |
573 | |
574 | dev_err(dev, "channel %u bad state %u after alloc\n", |
575 | channel_id, state); |
576 | |
577 | return -EIO; |
578 | } |
579 | |
580 | /* Start an ALLOCATED channel */ |
581 | static int gsi_channel_start_command(struct gsi_channel *channel) |
582 | { |
583 | struct device *dev = channel->gsi->dev; |
584 | enum gsi_channel_state state; |
585 | |
586 | state = gsi_channel_state(channel); |
587 | if (state != GSI_CHANNEL_STATE_ALLOCATED && |
588 | state != GSI_CHANNEL_STATE_STOPPED) { |
589 | dev_err(dev, "channel %u bad state %u before start\n", |
590 | gsi_channel_id(channel), state); |
591 | return -EINVAL; |
592 | } |
593 | |
594 | gsi_channel_command(channel, opcode: GSI_CH_START); |
595 | |
596 | /* If successful the channel state will have changed */ |
597 | state = gsi_channel_state(channel); |
598 | if (state == GSI_CHANNEL_STATE_STARTED) |
599 | return 0; |
600 | |
601 | dev_err(dev, "channel %u bad state %u after start\n", |
602 | gsi_channel_id(channel), state); |
603 | |
604 | return -EIO; |
605 | } |
606 | |
607 | /* Stop a GSI channel in STARTED state */ |
608 | static int gsi_channel_stop_command(struct gsi_channel *channel) |
609 | { |
610 | struct device *dev = channel->gsi->dev; |
611 | enum gsi_channel_state state; |
612 | |
613 | state = gsi_channel_state(channel); |
614 | |
615 | /* Channel could have entered STOPPED state since last call |
616 | * if it timed out. If so, we're done. |
617 | */ |
618 | if (state == GSI_CHANNEL_STATE_STOPPED) |
619 | return 0; |
620 | |
621 | if (state != GSI_CHANNEL_STATE_STARTED && |
622 | state != GSI_CHANNEL_STATE_STOP_IN_PROC) { |
623 | dev_err(dev, "channel %u bad state %u before stop\n", |
624 | gsi_channel_id(channel), state); |
625 | return -EINVAL; |
626 | } |
627 | |
628 | gsi_channel_command(channel, opcode: GSI_CH_STOP); |
629 | |
630 | /* If successful the channel state will have changed */ |
631 | state = gsi_channel_state(channel); |
632 | if (state == GSI_CHANNEL_STATE_STOPPED) |
633 | return 0; |
634 | |
635 | /* We may have to try again if stop is in progress */ |
636 | if (state == GSI_CHANNEL_STATE_STOP_IN_PROC) |
637 | return -EAGAIN; |
638 | |
639 | dev_err(dev, "channel %u bad state %u after stop\n", |
640 | gsi_channel_id(channel), state); |
641 | |
642 | return -EIO; |
643 | } |
644 | |
645 | /* Reset a GSI channel in ALLOCATED or ERROR state. */ |
646 | static void gsi_channel_reset_command(struct gsi_channel *channel) |
647 | { |
648 | struct device *dev = channel->gsi->dev; |
649 | enum gsi_channel_state state; |
650 | |
651 | /* A short delay is required before a RESET command */ |
652 | usleep_range(USEC_PER_MSEC, max: 2 * USEC_PER_MSEC); |
653 | |
654 | state = gsi_channel_state(channel); |
655 | if (state != GSI_CHANNEL_STATE_STOPPED && |
656 | state != GSI_CHANNEL_STATE_ERROR) { |
657 | /* No need to reset a channel already in ALLOCATED state */ |
658 | if (state != GSI_CHANNEL_STATE_ALLOCATED) |
659 | dev_err(dev, "channel %u bad state %u before reset\n", |
660 | gsi_channel_id(channel), state); |
661 | return; |
662 | } |
663 | |
664 | gsi_channel_command(channel, opcode: GSI_CH_RESET); |
665 | |
666 | /* If successful the channel state will have changed */ |
667 | state = gsi_channel_state(channel); |
668 | if (state != GSI_CHANNEL_STATE_ALLOCATED) |
669 | dev_err(dev, "channel %u bad state %u after reset\n", |
670 | gsi_channel_id(channel), state); |
671 | } |
672 | |
673 | /* Deallocate an ALLOCATED GSI channel */ |
674 | static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id) |
675 | { |
676 | struct gsi_channel *channel = &gsi->channel[channel_id]; |
677 | struct device *dev = gsi->dev; |
678 | enum gsi_channel_state state; |
679 | |
680 | state = gsi_channel_state(channel); |
681 | if (state != GSI_CHANNEL_STATE_ALLOCATED) { |
682 | dev_err(dev, "channel %u bad state %u before dealloc\n", |
683 | channel_id, state); |
684 | return; |
685 | } |
686 | |
687 | gsi_channel_command(channel, opcode: GSI_CH_DE_ALLOC); |
688 | |
689 | /* If successful the channel state will have changed */ |
690 | state = gsi_channel_state(channel); |
691 | |
692 | if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) |
693 | dev_err(dev, "channel %u bad state %u after dealloc\n", |
694 | channel_id, state); |
695 | } |
696 | |
697 | /* Ring an event ring doorbell, reporting the last entry processed by the AP. |
698 | * The index argument (modulo the ring count) is the first unfilled entry, so |
699 | * we supply one less than that with the doorbell. Update the event ring |
700 | * index field with the value provided. |
701 | */ |
702 | static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index) |
703 | { |
704 | const struct reg *reg = gsi_reg(gsi, reg_id: EV_CH_E_DOORBELL_0); |
705 | struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring; |
706 | u32 val; |
707 | |
708 | ring->index = index; /* Next unused entry */ |
709 | |
710 | /* Note: index *must* be used modulo the ring count here */ |
711 | val = gsi_ring_addr(ring, index: (index - 1) % ring->count); |
712 | iowrite32(val, gsi->virt + reg_n_offset(reg, n: evt_ring_id)); |
713 | } |
714 | |
715 | /* Program an event ring for use */ |
716 | static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id) |
717 | { |
718 | struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; |
719 | struct gsi_ring *ring = &evt_ring->ring; |
720 | const struct reg *reg; |
721 | u32 val; |
722 | |
723 | reg = gsi_reg(gsi, reg_id: EV_CH_E_CNTXT_0); |
724 | /* We program all event rings as GPI type/protocol */ |
725 | val = reg_encode(reg, field_id: EV_CHTYPE, val: GSI_CHANNEL_TYPE_GPI); |
726 | /* EV_EE field is 0 (GSI_EE_AP) */ |
727 | val |= reg_bit(reg, field_id: EV_INTYPE); |
728 | val |= reg_encode(reg, field_id: EV_ELEMENT_SIZE, GSI_RING_ELEMENT_SIZE); |
729 | iowrite32(val, gsi->virt + reg_n_offset(reg, n: evt_ring_id)); |
730 | |
731 | reg = gsi_reg(gsi, reg_id: EV_CH_E_CNTXT_1); |
732 | val = reg_encode(reg, field_id: R_LENGTH, val: ring->count * GSI_RING_ELEMENT_SIZE); |
733 | iowrite32(val, gsi->virt + reg_n_offset(reg, n: evt_ring_id)); |
734 | |
735 | /* The context 2 and 3 registers store the low-order and |
736 | * high-order 32 bits of the address of the event ring, |
737 | * respectively. |
738 | */ |
739 | reg = gsi_reg(gsi, reg_id: EV_CH_E_CNTXT_2); |
740 | val = lower_32_bits(ring->addr); |
741 | iowrite32(val, gsi->virt + reg_n_offset(reg, n: evt_ring_id)); |
742 | |
743 | reg = gsi_reg(gsi, reg_id: EV_CH_E_CNTXT_3); |
744 | val = upper_32_bits(ring->addr); |
745 | iowrite32(val, gsi->virt + reg_n_offset(reg, n: evt_ring_id)); |
746 | |
747 | /* Enable interrupt moderation by setting the moderation delay */ |
748 | reg = gsi_reg(gsi, reg_id: EV_CH_E_CNTXT_8); |
749 | val = reg_encode(reg, field_id: EV_MODT, GSI_EVT_RING_INT_MODT); |
750 | val |= reg_encode(reg, field_id: EV_MODC, val: 1); /* comes from channel */ |
751 | /* EV_MOD_CNT is 0 (no counter-based interrupt coalescing) */ |
752 | iowrite32(val, gsi->virt + reg_n_offset(reg, n: evt_ring_id)); |
753 | |
754 | /* No MSI write data, and MSI high and low address is 0 */ |
755 | reg = gsi_reg(gsi, reg_id: EV_CH_E_CNTXT_9); |
756 | iowrite32(0, gsi->virt + reg_n_offset(reg, n: evt_ring_id)); |
757 | |
758 | reg = gsi_reg(gsi, reg_id: EV_CH_E_CNTXT_10); |
759 | iowrite32(0, gsi->virt + reg_n_offset(reg, n: evt_ring_id)); |
760 | |
761 | reg = gsi_reg(gsi, reg_id: EV_CH_E_CNTXT_11); |
762 | iowrite32(0, gsi->virt + reg_n_offset(reg, n: evt_ring_id)); |
763 | |
764 | /* We don't need to get event read pointer updates */ |
765 | reg = gsi_reg(gsi, reg_id: EV_CH_E_CNTXT_12); |
766 | iowrite32(0, gsi->virt + reg_n_offset(reg, n: evt_ring_id)); |
767 | |
768 | reg = gsi_reg(gsi, reg_id: EV_CH_E_CNTXT_13); |
769 | iowrite32(0, gsi->virt + reg_n_offset(reg, n: evt_ring_id)); |
770 | |
771 | /* Finally, tell the hardware our "last processed" event (arbitrary) */ |
772 | gsi_evt_ring_doorbell(gsi, evt_ring_id, index: ring->index); |
773 | } |
774 | |
775 | /* Find the transaction whose completion indicates a channel is quiesced */ |
776 | static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel) |
777 | { |
778 | struct gsi_trans_info *trans_info = &channel->trans_info; |
779 | u32 pending_id = trans_info->pending_id; |
780 | struct gsi_trans *trans; |
781 | u16 trans_id; |
782 | |
783 | if (channel->toward_ipa && pending_id != trans_info->free_id) { |
784 | /* There is a small chance a TX transaction got allocated |
785 | * just before we disabled transmits, so check for that. |
786 | * The last allocated, committed, or pending transaction |
787 | * precedes the first free transaction. |
788 | */ |
789 | trans_id = trans_info->free_id - 1; |
790 | } else if (trans_info->polled_id != pending_id) { |
791 | /* Otherwise (TX or RX) we want to wait for anything that |
792 | * has completed, or has been polled but not released yet. |
793 | * |
794 | * The last completed or polled transaction precedes the |
795 | * first pending transaction. |
796 | */ |
797 | trans_id = pending_id - 1; |
798 | } else { |
799 | return NULL; |
800 | } |
801 | |
802 | /* Caller will wait for this, so take a reference */ |
803 | trans = &trans_info->trans[trans_id % channel->tre_count]; |
804 | refcount_inc(r: &trans->refcount); |
805 | |
806 | return trans; |
807 | } |
808 | |
809 | /* Wait for transaction activity on a channel to complete */ |
810 | static void gsi_channel_trans_quiesce(struct gsi_channel *channel) |
811 | { |
812 | struct gsi_trans *trans; |
813 | |
814 | /* Get the last transaction, and wait for it to complete */ |
815 | trans = gsi_channel_trans_last(channel); |
816 | if (trans) { |
817 | wait_for_completion(&trans->completion); |
818 | gsi_trans_free(trans); |
819 | } |
820 | } |
821 | |
822 | /* Program a channel for use; there is no gsi_channel_deprogram() */ |
823 | static void gsi_channel_program(struct gsi_channel *channel, bool doorbell) |
824 | { |
825 | size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE; |
826 | u32 channel_id = gsi_channel_id(channel); |
827 | union gsi_channel_scratch scr = { }; |
828 | struct gsi_channel_scratch_gpi *gpi; |
829 | struct gsi *gsi = channel->gsi; |
830 | const struct reg *reg; |
831 | u32 wrr_weight = 0; |
832 | u32 offset; |
833 | u32 val; |
834 | |
835 | reg = gsi_reg(gsi, reg_id: CH_C_CNTXT_0); |
836 | |
837 | /* We program all channels as GPI type/protocol */ |
838 | val = ch_c_cntxt_0_type_encode(version: gsi->version, reg, type: GSI_CHANNEL_TYPE_GPI); |
839 | if (channel->toward_ipa) |
840 | val |= reg_bit(reg, field_id: CHTYPE_DIR); |
841 | if (gsi->version < IPA_VERSION_5_0) |
842 | val |= reg_encode(reg, field_id: ERINDEX, val: channel->evt_ring_id); |
843 | val |= reg_encode(reg, field_id: ELEMENT_SIZE, GSI_RING_ELEMENT_SIZE); |
844 | iowrite32(val, gsi->virt + reg_n_offset(reg, n: channel_id)); |
845 | |
846 | reg = gsi_reg(gsi, reg_id: CH_C_CNTXT_1); |
847 | val = reg_encode(reg, field_id: CH_R_LENGTH, val: size); |
848 | if (gsi->version >= IPA_VERSION_5_0) |
849 | val |= reg_encode(reg, field_id: CH_ERINDEX, val: channel->evt_ring_id); |
850 | iowrite32(val, gsi->virt + reg_n_offset(reg, n: channel_id)); |
851 | |
852 | /* The context 2 and 3 registers store the low-order and |
853 | * high-order 32 bits of the address of the channel ring, |
854 | * respectively. |
855 | */ |
856 | reg = gsi_reg(gsi, reg_id: CH_C_CNTXT_2); |
857 | val = lower_32_bits(channel->tre_ring.addr); |
858 | iowrite32(val, gsi->virt + reg_n_offset(reg, n: channel_id)); |
859 | |
860 | reg = gsi_reg(gsi, reg_id: CH_C_CNTXT_3); |
861 | val = upper_32_bits(channel->tre_ring.addr); |
862 | iowrite32(val, gsi->virt + reg_n_offset(reg, n: channel_id)); |
863 | |
864 | reg = gsi_reg(gsi, reg_id: CH_C_QOS); |
865 | |
866 | /* Command channel gets low weighted round-robin priority */ |
867 | if (channel->command) |
868 | wrr_weight = reg_field_max(reg, field_id: WRR_WEIGHT); |
869 | val = reg_encode(reg, field_id: WRR_WEIGHT, val: wrr_weight); |
870 | |
871 | /* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */ |
872 | |
873 | /* No need to use the doorbell engine starting at IPA v4.0 */ |
874 | if (gsi->version < IPA_VERSION_4_0 && doorbell) |
875 | val |= reg_bit(reg, field_id: USE_DB_ENG); |
876 | |
877 | /* v4.0 introduces an escape buffer for prefetch. We use it |
878 | * on all but the AP command channel. |
879 | */ |
880 | if (gsi->version >= IPA_VERSION_4_0 && !channel->command) { |
881 | /* If not otherwise set, prefetch buffers are used */ |
882 | if (gsi->version < IPA_VERSION_4_5) |
883 | val |= reg_bit(reg, field_id: USE_ESCAPE_BUF_ONLY); |
884 | else |
885 | val |= reg_encode(reg, field_id: PREFETCH_MODE, val: ESCAPE_BUF_ONLY); |
886 | } |
887 | /* All channels set DB_IN_BYTES */ |
888 | if (gsi->version >= IPA_VERSION_4_9) |
889 | val |= reg_bit(reg, field_id: DB_IN_BYTES); |
890 | |
891 | iowrite32(val, gsi->virt + reg_n_offset(reg, n: channel_id)); |
892 | |
893 | /* Now update the scratch registers for GPI protocol */ |
894 | gpi = &scr.gpi; |
895 | gpi->max_outstanding_tre = channel->trans_tre_max * |
896 | GSI_RING_ELEMENT_SIZE; |
897 | gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE; |
898 | |
899 | reg = gsi_reg(gsi, reg_id: CH_C_SCRATCH_0); |
900 | val = scr.data.word1; |
901 | iowrite32(val, gsi->virt + reg_n_offset(reg, n: channel_id)); |
902 | |
903 | reg = gsi_reg(gsi, reg_id: CH_C_SCRATCH_1); |
904 | val = scr.data.word2; |
905 | iowrite32(val, gsi->virt + reg_n_offset(reg, n: channel_id)); |
906 | |
907 | reg = gsi_reg(gsi, reg_id: CH_C_SCRATCH_2); |
908 | val = scr.data.word3; |
909 | iowrite32(val, gsi->virt + reg_n_offset(reg, n: channel_id)); |
910 | |
911 | /* We must preserve the upper 16 bits of the last scratch register. |
912 | * The next sequence assumes those bits remain unchanged between the |
913 | * read and the write. |
914 | */ |
915 | reg = gsi_reg(gsi, reg_id: CH_C_SCRATCH_3); |
916 | offset = reg_n_offset(reg, n: channel_id); |
917 | val = ioread32(gsi->virt + offset); |
918 | val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0)); |
919 | iowrite32(val, gsi->virt + offset); |
920 | |
921 | /* All done! */ |
922 | } |
923 | |
924 | static int __gsi_channel_start(struct gsi_channel *channel, bool resume) |
925 | { |
926 | struct gsi *gsi = channel->gsi; |
927 | int ret; |
928 | |
929 | /* Prior to IPA v4.0 suspend/resume is not implemented by GSI */ |
930 | if (resume && gsi->version < IPA_VERSION_4_0) |
931 | return 0; |
932 | |
933 | mutex_lock(&gsi->mutex); |
934 | |
935 | ret = gsi_channel_start_command(channel); |
936 | |
937 | mutex_unlock(lock: &gsi->mutex); |
938 | |
939 | return ret; |
940 | } |
941 | |
942 | /* Start an allocated GSI channel */ |
943 | int gsi_channel_start(struct gsi *gsi, u32 channel_id) |
944 | { |
945 | struct gsi_channel *channel = &gsi->channel[channel_id]; |
946 | int ret; |
947 | |
948 | /* Enable NAPI and the completion interrupt */ |
949 | napi_enable(n: &channel->napi); |
950 | gsi_irq_ieob_enable_one(gsi, evt_ring_id: channel->evt_ring_id); |
951 | |
952 | ret = __gsi_channel_start(channel, resume: false); |
953 | if (ret) { |
954 | gsi_irq_ieob_disable_one(gsi, evt_ring_id: channel->evt_ring_id); |
955 | napi_disable(n: &channel->napi); |
956 | } |
957 | |
958 | return ret; |
959 | } |
960 | |
961 | static int gsi_channel_stop_retry(struct gsi_channel *channel) |
962 | { |
963 | u32 retries = GSI_CHANNEL_STOP_RETRIES; |
964 | int ret; |
965 | |
966 | do { |
967 | ret = gsi_channel_stop_command(channel); |
968 | if (ret != -EAGAIN) |
969 | break; |
970 | usleep_range(min: 3 * USEC_PER_MSEC, max: 5 * USEC_PER_MSEC); |
971 | } while (retries--); |
972 | |
973 | return ret; |
974 | } |
975 | |
976 | static int __gsi_channel_stop(struct gsi_channel *channel, bool suspend) |
977 | { |
978 | struct gsi *gsi = channel->gsi; |
979 | int ret; |
980 | |
981 | /* Wait for any underway transactions to complete before stopping. */ |
982 | gsi_channel_trans_quiesce(channel); |
983 | |
984 | /* Prior to IPA v4.0 suspend/resume is not implemented by GSI */ |
985 | if (suspend && gsi->version < IPA_VERSION_4_0) |
986 | return 0; |
987 | |
988 | mutex_lock(&gsi->mutex); |
989 | |
990 | ret = gsi_channel_stop_retry(channel); |
991 | |
992 | mutex_unlock(lock: &gsi->mutex); |
993 | |
994 | return ret; |
995 | } |
996 | |
997 | /* Stop a started channel */ |
998 | int gsi_channel_stop(struct gsi *gsi, u32 channel_id) |
999 | { |
1000 | struct gsi_channel *channel = &gsi->channel[channel_id]; |
1001 | int ret; |
1002 | |
1003 | ret = __gsi_channel_stop(channel, suspend: false); |
1004 | if (ret) |
1005 | return ret; |
1006 | |
1007 | /* Disable the completion interrupt and NAPI if successful */ |
1008 | gsi_irq_ieob_disable_one(gsi, evt_ring_id: channel->evt_ring_id); |
1009 | napi_disable(n: &channel->napi); |
1010 | |
1011 | return 0; |
1012 | } |
1013 | |
1014 | /* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */ |
1015 | void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell) |
1016 | { |
1017 | struct gsi_channel *channel = &gsi->channel[channel_id]; |
1018 | |
1019 | mutex_lock(&gsi->mutex); |
1020 | |
1021 | gsi_channel_reset_command(channel); |
1022 | /* Due to a hardware quirk we may need to reset RX channels twice. */ |
1023 | if (gsi->version < IPA_VERSION_4_0 && !channel->toward_ipa) |
1024 | gsi_channel_reset_command(channel); |
1025 | |
1026 | /* Hardware assumes this is 0 following reset */ |
1027 | channel->tre_ring.index = 0; |
1028 | gsi_channel_program(channel, doorbell); |
1029 | gsi_channel_trans_cancel_pending(channel); |
1030 | |
1031 | mutex_unlock(lock: &gsi->mutex); |
1032 | } |
1033 | |
1034 | /* Stop a started channel for suspend */ |
1035 | int gsi_channel_suspend(struct gsi *gsi, u32 channel_id) |
1036 | { |
1037 | struct gsi_channel *channel = &gsi->channel[channel_id]; |
1038 | int ret; |
1039 | |
1040 | ret = __gsi_channel_stop(channel, suspend: true); |
1041 | if (ret) |
1042 | return ret; |
1043 | |
1044 | /* Ensure NAPI polling has finished. */ |
1045 | napi_synchronize(n: &channel->napi); |
1046 | |
1047 | return 0; |
1048 | } |
1049 | |
1050 | /* Resume a suspended channel (starting if stopped) */ |
1051 | int gsi_channel_resume(struct gsi *gsi, u32 channel_id) |
1052 | { |
1053 | struct gsi_channel *channel = &gsi->channel[channel_id]; |
1054 | |
1055 | return __gsi_channel_start(channel, resume: true); |
1056 | } |
1057 | |
1058 | /* Prevent all GSI interrupts while suspended */ |
1059 | void gsi_suspend(struct gsi *gsi) |
1060 | { |
1061 | disable_irq(irq: gsi->irq); |
1062 | } |
1063 | |
1064 | /* Allow all GSI interrupts again when resuming */ |
1065 | void gsi_resume(struct gsi *gsi) |
1066 | { |
1067 | enable_irq(irq: gsi->irq); |
1068 | } |
1069 | |
1070 | void gsi_trans_tx_committed(struct gsi_trans *trans) |
1071 | { |
1072 | struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id]; |
1073 | |
1074 | channel->trans_count++; |
1075 | channel->byte_count += trans->len; |
1076 | |
1077 | trans->trans_count = channel->trans_count; |
1078 | trans->byte_count = channel->byte_count; |
1079 | } |
1080 | |
1081 | void gsi_trans_tx_queued(struct gsi_trans *trans) |
1082 | { |
1083 | u32 channel_id = trans->channel_id; |
1084 | struct gsi *gsi = trans->gsi; |
1085 | struct gsi_channel *channel; |
1086 | u32 trans_count; |
1087 | u32 byte_count; |
1088 | |
1089 | channel = &gsi->channel[channel_id]; |
1090 | |
1091 | byte_count = channel->byte_count - channel->queued_byte_count; |
1092 | trans_count = channel->trans_count - channel->queued_trans_count; |
1093 | channel->queued_byte_count = channel->byte_count; |
1094 | channel->queued_trans_count = channel->trans_count; |
1095 | |
1096 | ipa_gsi_channel_tx_queued(gsi, channel_id, count: trans_count, byte_count); |
1097 | } |
1098 | |
1099 | /** |
1100 | * gsi_trans_tx_completed() - Report completed TX transactions |
1101 | * @trans: TX channel transaction that has completed |
1102 | * |
1103 | * Report that a transaction on a TX channel has completed. At the time a |
1104 | * transaction is committed, we record *in the transaction* its channel's |
1105 | * committed transaction and byte counts. Transactions are completed in |
1106 | * order, and the difference between the channel's byte/transaction count |
1107 | * when the transaction was committed and when it completes tells us |
1108 | * exactly how much data has been transferred while the transaction was |
1109 | * pending. |
1110 | * |
1111 | * We report this information to the network stack, which uses it to manage |
1112 | * the rate at which data is sent to hardware. |
1113 | */ |
1114 | static void gsi_trans_tx_completed(struct gsi_trans *trans) |
1115 | { |
1116 | u32 channel_id = trans->channel_id; |
1117 | struct gsi *gsi = trans->gsi; |
1118 | struct gsi_channel *channel; |
1119 | u32 trans_count; |
1120 | u32 byte_count; |
1121 | |
1122 | channel = &gsi->channel[channel_id]; |
1123 | trans_count = trans->trans_count - channel->compl_trans_count; |
1124 | byte_count = trans->byte_count - channel->compl_byte_count; |
1125 | |
1126 | channel->compl_trans_count += trans_count; |
1127 | channel->compl_byte_count += byte_count; |
1128 | |
1129 | ipa_gsi_channel_tx_completed(gsi, channel_id, count: trans_count, byte_count); |
1130 | } |
1131 | |
1132 | /* Channel control interrupt handler */ |
1133 | static void gsi_isr_chan_ctrl(struct gsi *gsi) |
1134 | { |
1135 | const struct reg *reg; |
1136 | u32 channel_mask; |
1137 | |
1138 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_CH_IRQ); |
1139 | channel_mask = ioread32(gsi->virt + reg_offset(reg)); |
1140 | |
1141 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_CH_IRQ_CLR); |
1142 | iowrite32(channel_mask, gsi->virt + reg_offset(reg)); |
1143 | |
1144 | while (channel_mask) { |
1145 | u32 channel_id = __ffs(channel_mask); |
1146 | |
1147 | channel_mask ^= BIT(channel_id); |
1148 | |
1149 | complete(&gsi->completion); |
1150 | } |
1151 | } |
1152 | |
1153 | /* Event ring control interrupt handler */ |
1154 | static void gsi_isr_evt_ctrl(struct gsi *gsi) |
1155 | { |
1156 | const struct reg *reg; |
1157 | u32 event_mask; |
1158 | |
1159 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_EV_CH_IRQ); |
1160 | event_mask = ioread32(gsi->virt + reg_offset(reg)); |
1161 | |
1162 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_EV_CH_IRQ_CLR); |
1163 | iowrite32(event_mask, gsi->virt + reg_offset(reg)); |
1164 | |
1165 | while (event_mask) { |
1166 | u32 evt_ring_id = __ffs(event_mask); |
1167 | |
1168 | event_mask ^= BIT(evt_ring_id); |
1169 | |
1170 | complete(&gsi->completion); |
1171 | } |
1172 | } |
1173 | |
1174 | /* Global channel error interrupt handler */ |
1175 | static void |
1176 | gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code) |
1177 | { |
1178 | if (code == GSI_OUT_OF_RESOURCES) { |
1179 | dev_err(gsi->dev, "channel %u out of resources\n", channel_id); |
1180 | complete(&gsi->completion); |
1181 | return; |
1182 | } |
1183 | |
1184 | /* Report, but otherwise ignore all other error codes */ |
1185 | dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n", |
1186 | channel_id, err_ee, code); |
1187 | } |
1188 | |
1189 | /* Global event error interrupt handler */ |
1190 | static void |
1191 | gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code) |
1192 | { |
1193 | if (code == GSI_OUT_OF_RESOURCES) { |
1194 | struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; |
1195 | u32 channel_id = gsi_channel_id(channel: evt_ring->channel); |
1196 | |
1197 | complete(&gsi->completion); |
1198 | dev_err(gsi->dev, "evt_ring for channel %u out of resources\n", |
1199 | channel_id); |
1200 | return; |
1201 | } |
1202 | |
1203 | /* Report, but otherwise ignore all other error codes */ |
1204 | dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n", |
1205 | evt_ring_id, err_ee, code); |
1206 | } |
1207 | |
1208 | /* Global error interrupt handler */ |
1209 | static void gsi_isr_glob_err(struct gsi *gsi) |
1210 | { |
1211 | const struct reg *log_reg; |
1212 | const struct reg *clr_reg; |
1213 | enum gsi_err_type type; |
1214 | enum gsi_err_code code; |
1215 | u32 offset; |
1216 | u32 which; |
1217 | u32 val; |
1218 | u32 ee; |
1219 | |
1220 | /* Get the logged error, then reinitialize the log */ |
1221 | log_reg = gsi_reg(gsi, reg_id: ERROR_LOG); |
1222 | offset = reg_offset(reg: log_reg); |
1223 | val = ioread32(gsi->virt + offset); |
1224 | iowrite32(0, gsi->virt + offset); |
1225 | |
1226 | clr_reg = gsi_reg(gsi, reg_id: ERROR_LOG_CLR); |
1227 | iowrite32(~0, gsi->virt + reg_offset(reg: clr_reg)); |
1228 | |
1229 | /* Parse the error value */ |
1230 | ee = reg_decode(reg: log_reg, field_id: ERR_EE, val); |
1231 | type = reg_decode(reg: log_reg, field_id: ERR_TYPE, val); |
1232 | which = reg_decode(reg: log_reg, field_id: ERR_VIRT_IDX, val); |
1233 | code = reg_decode(reg: log_reg, field_id: ERR_CODE, val); |
1234 | |
1235 | if (type == GSI_ERR_TYPE_CHAN) |
1236 | gsi_isr_glob_chan_err(gsi, err_ee: ee, channel_id: which, code); |
1237 | else if (type == GSI_ERR_TYPE_EVT) |
1238 | gsi_isr_glob_evt_err(gsi, err_ee: ee, evt_ring_id: which, code); |
1239 | else /* type GSI_ERR_TYPE_GLOB should be fatal */ |
1240 | dev_err(gsi->dev, "unexpected global error 0x%08x\n", type); |
1241 | } |
1242 | |
1243 | /* Generic EE interrupt handler */ |
1244 | static void gsi_isr_gp_int1(struct gsi *gsi) |
1245 | { |
1246 | const struct reg *reg; |
1247 | u32 result; |
1248 | u32 val; |
1249 | |
1250 | /* This interrupt is used to handle completions of GENERIC GSI |
1251 | * commands. We use these to allocate and halt channels on the |
1252 | * modem's behalf due to a hardware quirk on IPA v4.2. The modem |
1253 | * "owns" channels even when the AP allocates them, and have no |
1254 | * way of knowing whether a modem channel's state has been changed. |
1255 | * |
1256 | * We also use GENERIC commands to enable/disable channel flow |
1257 | * control for IPA v4.2+. |
1258 | * |
1259 | * It is recommended that we halt the modem channels we allocated |
1260 | * when shutting down, but it's possible the channel isn't running |
1261 | * at the time we issue the HALT command. We'll get an error in |
1262 | * that case, but it's harmless (the channel is already halted). |
1263 | * Similarly, we could get an error back when updating flow control |
1264 | * on a channel because it's not in the proper state. |
1265 | * |
1266 | * In either case, we silently ignore a INCORRECT_CHANNEL_STATE |
1267 | * error if we receive it. |
1268 | */ |
1269 | reg = gsi_reg(gsi, reg_id: CNTXT_SCRATCH_0); |
1270 | val = ioread32(gsi->virt + reg_offset(reg)); |
1271 | result = reg_decode(reg, field_id: GENERIC_EE_RESULT, val); |
1272 | |
1273 | switch (result) { |
1274 | case GENERIC_EE_SUCCESS: |
1275 | case GENERIC_EE_INCORRECT_CHANNEL_STATE: |
1276 | gsi->result = 0; |
1277 | break; |
1278 | |
1279 | case GENERIC_EE_RETRY: |
1280 | gsi->result = -EAGAIN; |
1281 | break; |
1282 | |
1283 | default: |
1284 | dev_err(gsi->dev, "global INT1 generic result %u\n", result); |
1285 | gsi->result = -EIO; |
1286 | break; |
1287 | } |
1288 | |
1289 | complete(&gsi->completion); |
1290 | } |
1291 | |
1292 | /* Inter-EE interrupt handler */ |
1293 | static void gsi_isr_glob_ee(struct gsi *gsi) |
1294 | { |
1295 | const struct reg *reg; |
1296 | u32 val; |
1297 | |
1298 | reg = gsi_reg(gsi, reg_id: CNTXT_GLOB_IRQ_STTS); |
1299 | val = ioread32(gsi->virt + reg_offset(reg)); |
1300 | |
1301 | if (val & ERROR_INT) |
1302 | gsi_isr_glob_err(gsi); |
1303 | |
1304 | reg = gsi_reg(gsi, reg_id: CNTXT_GLOB_IRQ_CLR); |
1305 | iowrite32(val, gsi->virt + reg_offset(reg)); |
1306 | |
1307 | val &= ~ERROR_INT; |
1308 | |
1309 | if (val & GP_INT1) { |
1310 | val ^= GP_INT1; |
1311 | gsi_isr_gp_int1(gsi); |
1312 | } |
1313 | |
1314 | if (val) |
1315 | dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val); |
1316 | } |
1317 | |
1318 | /* I/O completion interrupt event */ |
1319 | static void gsi_isr_ieob(struct gsi *gsi) |
1320 | { |
1321 | const struct reg *reg; |
1322 | u32 event_mask; |
1323 | |
1324 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_IEOB_IRQ); |
1325 | event_mask = ioread32(gsi->virt + reg_offset(reg)); |
1326 | |
1327 | gsi_irq_ieob_disable(gsi, event_mask); |
1328 | |
1329 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_IEOB_IRQ_CLR); |
1330 | iowrite32(event_mask, gsi->virt + reg_offset(reg)); |
1331 | |
1332 | while (event_mask) { |
1333 | u32 evt_ring_id = __ffs(event_mask); |
1334 | |
1335 | event_mask ^= BIT(evt_ring_id); |
1336 | |
1337 | napi_schedule(n: &gsi->evt_ring[evt_ring_id].channel->napi); |
1338 | } |
1339 | } |
1340 | |
1341 | /* General event interrupts represent serious problems, so report them */ |
1342 | static void gsi_isr_general(struct gsi *gsi) |
1343 | { |
1344 | struct device *dev = gsi->dev; |
1345 | const struct reg *reg; |
1346 | u32 val; |
1347 | |
1348 | reg = gsi_reg(gsi, reg_id: CNTXT_GSI_IRQ_STTS); |
1349 | val = ioread32(gsi->virt + reg_offset(reg)); |
1350 | |
1351 | reg = gsi_reg(gsi, reg_id: CNTXT_GSI_IRQ_CLR); |
1352 | iowrite32(val, gsi->virt + reg_offset(reg)); |
1353 | |
1354 | dev_err(dev, "unexpected general interrupt 0x%08x\n", val); |
1355 | } |
1356 | |
1357 | /** |
1358 | * gsi_isr() - Top level GSI interrupt service routine |
1359 | * @irq: Interrupt number (ignored) |
1360 | * @dev_id: GSI pointer supplied to request_irq() |
1361 | * |
1362 | * This is the main handler function registered for the GSI IRQ. Each type |
1363 | * of interrupt has a separate handler function that is called from here. |
1364 | */ |
1365 | static irqreturn_t gsi_isr(int irq, void *dev_id) |
1366 | { |
1367 | struct gsi *gsi = dev_id; |
1368 | const struct reg *reg; |
1369 | u32 intr_mask; |
1370 | u32 cnt = 0; |
1371 | u32 offset; |
1372 | |
1373 | reg = gsi_reg(gsi, reg_id: CNTXT_TYPE_IRQ); |
1374 | offset = reg_offset(reg); |
1375 | |
1376 | /* enum gsi_irq_type_id defines GSI interrupt types */ |
1377 | while ((intr_mask = ioread32(gsi->virt + offset))) { |
1378 | /* intr_mask contains bitmask of pending GSI interrupts */ |
1379 | do { |
1380 | u32 gsi_intr = BIT(__ffs(intr_mask)); |
1381 | |
1382 | intr_mask ^= gsi_intr; |
1383 | |
1384 | /* Note: the IRQ condition for each type is cleared |
1385 | * when the type-specific register is updated. |
1386 | */ |
1387 | switch (gsi_intr) { |
1388 | case GSI_CH_CTRL: |
1389 | gsi_isr_chan_ctrl(gsi); |
1390 | break; |
1391 | case GSI_EV_CTRL: |
1392 | gsi_isr_evt_ctrl(gsi); |
1393 | break; |
1394 | case GSI_GLOB_EE: |
1395 | gsi_isr_glob_ee(gsi); |
1396 | break; |
1397 | case GSI_IEOB: |
1398 | gsi_isr_ieob(gsi); |
1399 | break; |
1400 | case GSI_GENERAL: |
1401 | gsi_isr_general(gsi); |
1402 | break; |
1403 | default: |
1404 | dev_err(gsi->dev, |
1405 | "unrecognized interrupt type 0x%08x\n", |
1406 | gsi_intr); |
1407 | break; |
1408 | } |
1409 | } while (intr_mask); |
1410 | |
1411 | if (++cnt > GSI_ISR_MAX_ITER) { |
1412 | dev_err(gsi->dev, "interrupt flood\n"); |
1413 | break; |
1414 | } |
1415 | } |
1416 | |
1417 | return IRQ_HANDLED; |
1418 | } |
1419 | |
1420 | /* Init function for GSI IRQ lookup; there is no gsi_irq_exit() */ |
1421 | static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev) |
1422 | { |
1423 | int ret; |
1424 | |
1425 | ret = platform_get_irq_byname(pdev, "gsi"); |
1426 | if (ret <= 0) |
1427 | return ret ? : -EINVAL; |
1428 | |
1429 | gsi->irq = ret; |
1430 | |
1431 | return 0; |
1432 | } |
1433 | |
1434 | /* Return the transaction associated with a transfer completion event */ |
1435 | static struct gsi_trans * |
1436 | gsi_event_trans(struct gsi *gsi, struct gsi_event *event) |
1437 | { |
1438 | u32 channel_id = event->chid; |
1439 | struct gsi_channel *channel; |
1440 | struct gsi_trans *trans; |
1441 | u32 tre_offset; |
1442 | u32 tre_index; |
1443 | |
1444 | channel = &gsi->channel[channel_id]; |
1445 | if (WARN(!channel->gsi, "event has bad channel %u\n", channel_id)) |
1446 | return NULL; |
1447 | |
1448 | /* Event xfer_ptr records the TRE it's associated with */ |
1449 | tre_offset = lower_32_bits(le64_to_cpu(event->xfer_ptr)); |
1450 | tre_index = gsi_ring_index(ring: &channel->tre_ring, offset: tre_offset); |
1451 | |
1452 | trans = gsi_channel_trans_mapped(channel, index: tre_index); |
1453 | |
1454 | if (WARN(!trans, "channel %u event with no transaction\n", channel_id)) |
1455 | return NULL; |
1456 | |
1457 | return trans; |
1458 | } |
1459 | |
1460 | /** |
1461 | * gsi_evt_ring_update() - Update transaction state from hardware |
1462 | * @gsi: GSI pointer |
1463 | * @evt_ring_id: Event ring ID |
1464 | * @index: Event index in ring reported by hardware |
1465 | * |
1466 | * Events for RX channels contain the actual number of bytes received into |
1467 | * the buffer. Every event has a transaction associated with it, and here |
1468 | * we update transactions to record their actual received lengths. |
1469 | * |
1470 | * When an event for a TX channel arrives we use information in the |
1471 | * transaction to report the number of requests and bytes that have |
1472 | * been transferred. |
1473 | * |
1474 | * This function is called whenever we learn that the GSI hardware has filled |
1475 | * new events since the last time we checked. The ring's index field tells |
1476 | * the first entry in need of processing. The index provided is the |
1477 | * first *unfilled* event in the ring (following the last filled one). |
1478 | * |
1479 | * Events are sequential within the event ring, and transactions are |
1480 | * sequential within the transaction array. |
1481 | * |
1482 | * Note that @index always refers to an element *within* the event ring. |
1483 | */ |
1484 | static void gsi_evt_ring_update(struct gsi *gsi, u32 evt_ring_id, u32 index) |
1485 | { |
1486 | struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; |
1487 | struct gsi_ring *ring = &evt_ring->ring; |
1488 | struct gsi_event *event_done; |
1489 | struct gsi_event *event; |
1490 | u32 event_avail; |
1491 | u32 old_index; |
1492 | |
1493 | /* Starting with the oldest un-processed event, determine which |
1494 | * transaction (and which channel) is associated with the event. |
1495 | * For RX channels, update each completed transaction with the |
1496 | * number of bytes that were actually received. For TX channels |
1497 | * associated with a network device, report to the network stack |
1498 | * the number of transfers and bytes this completion represents. |
1499 | */ |
1500 | old_index = ring->index; |
1501 | event = gsi_ring_virt(ring, index: old_index); |
1502 | |
1503 | /* Compute the number of events to process before we wrap, |
1504 | * and determine when we'll be done processing events. |
1505 | */ |
1506 | event_avail = ring->count - old_index % ring->count; |
1507 | event_done = gsi_ring_virt(ring, index); |
1508 | do { |
1509 | struct gsi_trans *trans; |
1510 | |
1511 | trans = gsi_event_trans(gsi, event); |
1512 | if (!trans) |
1513 | return; |
1514 | |
1515 | if (trans->direction == DMA_FROM_DEVICE) |
1516 | trans->len = __le16_to_cpu(event->len); |
1517 | else |
1518 | gsi_trans_tx_completed(trans); |
1519 | |
1520 | gsi_trans_move_complete(trans); |
1521 | |
1522 | /* Move on to the next event and transaction */ |
1523 | if (--event_avail) |
1524 | event++; |
1525 | else |
1526 | event = gsi_ring_virt(ring, index: 0); |
1527 | } while (event != event_done); |
1528 | |
1529 | /* Tell the hardware we've handled these events */ |
1530 | gsi_evt_ring_doorbell(gsi, evt_ring_id, index); |
1531 | } |
1532 | |
1533 | /* Initialize a ring, including allocating DMA memory for its entries */ |
1534 | static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count) |
1535 | { |
1536 | u32 size = count * GSI_RING_ELEMENT_SIZE; |
1537 | struct device *dev = gsi->dev; |
1538 | dma_addr_t addr; |
1539 | |
1540 | /* Hardware requires a 2^n ring size, with alignment equal to size. |
1541 | * The DMA address returned by dma_alloc_coherent() is guaranteed to |
1542 | * be a power-of-2 number of pages, which satisfies the requirement. |
1543 | */ |
1544 | ring->virt = dma_alloc_coherent(dev, size, dma_handle: &addr, GFP_KERNEL); |
1545 | if (!ring->virt) |
1546 | return -ENOMEM; |
1547 | |
1548 | ring->addr = addr; |
1549 | ring->count = count; |
1550 | ring->index = 0; |
1551 | |
1552 | return 0; |
1553 | } |
1554 | |
1555 | /* Free a previously-allocated ring */ |
1556 | static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring) |
1557 | { |
1558 | size_t size = ring->count * GSI_RING_ELEMENT_SIZE; |
1559 | |
1560 | dma_free_coherent(dev: gsi->dev, size, cpu_addr: ring->virt, dma_handle: ring->addr); |
1561 | } |
1562 | |
1563 | /* Allocate an available event ring id */ |
1564 | static int gsi_evt_ring_id_alloc(struct gsi *gsi) |
1565 | { |
1566 | u32 evt_ring_id; |
1567 | |
1568 | if (gsi->event_bitmap == ~0U) { |
1569 | dev_err(gsi->dev, "event rings exhausted\n"); |
1570 | return -ENOSPC; |
1571 | } |
1572 | |
1573 | evt_ring_id = ffz(gsi->event_bitmap); |
1574 | gsi->event_bitmap |= BIT(evt_ring_id); |
1575 | |
1576 | return (int)evt_ring_id; |
1577 | } |
1578 | |
1579 | /* Free a previously-allocated event ring id */ |
1580 | static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id) |
1581 | { |
1582 | gsi->event_bitmap &= ~BIT(evt_ring_id); |
1583 | } |
1584 | |
1585 | /* Ring a channel doorbell, reporting the first un-filled entry */ |
1586 | void gsi_channel_doorbell(struct gsi_channel *channel) |
1587 | { |
1588 | struct gsi_ring *tre_ring = &channel->tre_ring; |
1589 | u32 channel_id = gsi_channel_id(channel); |
1590 | struct gsi *gsi = channel->gsi; |
1591 | const struct reg *reg; |
1592 | u32 val; |
1593 | |
1594 | reg = gsi_reg(gsi, reg_id: CH_C_DOORBELL_0); |
1595 | /* Note: index *must* be used modulo the ring count here */ |
1596 | val = gsi_ring_addr(ring: tre_ring, index: tre_ring->index % tre_ring->count); |
1597 | iowrite32(val, gsi->virt + reg_n_offset(reg, n: channel_id)); |
1598 | } |
1599 | |
1600 | /* Consult hardware, move newly completed transactions to completed state */ |
1601 | void gsi_channel_update(struct gsi_channel *channel) |
1602 | { |
1603 | u32 evt_ring_id = channel->evt_ring_id; |
1604 | struct gsi *gsi = channel->gsi; |
1605 | struct gsi_evt_ring *evt_ring; |
1606 | struct gsi_trans *trans; |
1607 | struct gsi_ring *ring; |
1608 | const struct reg *reg; |
1609 | u32 offset; |
1610 | u32 index; |
1611 | |
1612 | evt_ring = &gsi->evt_ring[evt_ring_id]; |
1613 | ring = &evt_ring->ring; |
1614 | |
1615 | /* See if there's anything new to process; if not, we're done. Note |
1616 | * that index always refers to an entry *within* the event ring. |
1617 | */ |
1618 | reg = gsi_reg(gsi, reg_id: EV_CH_E_CNTXT_4); |
1619 | offset = reg_n_offset(reg, n: evt_ring_id); |
1620 | index = gsi_ring_index(ring, offset: ioread32(gsi->virt + offset)); |
1621 | if (index == ring->index % ring->count) |
1622 | return; |
1623 | |
1624 | /* Get the transaction for the latest completed event. */ |
1625 | trans = gsi_event_trans(gsi, event: gsi_ring_virt(ring, index: index - 1)); |
1626 | if (!trans) |
1627 | return; |
1628 | |
1629 | /* For RX channels, update each completed transaction with the number |
1630 | * of bytes that were actually received. For TX channels, report |
1631 | * the number of transactions and bytes this completion represents |
1632 | * up the network stack. |
1633 | */ |
1634 | gsi_evt_ring_update(gsi, evt_ring_id, index); |
1635 | } |
1636 | |
1637 | /** |
1638 | * gsi_channel_poll_one() - Return a single completed transaction on a channel |
1639 | * @channel: Channel to be polled |
1640 | * |
1641 | * Return: Transaction pointer, or null if none are available |
1642 | * |
1643 | * This function returns the first of a channel's completed transactions. |
1644 | * If no transactions are in completed state, the hardware is consulted to |
1645 | * determine whether any new transactions have completed. If so, they're |
1646 | * moved to completed state and the first such transaction is returned. |
1647 | * If there are no more completed transactions, a null pointer is returned. |
1648 | */ |
1649 | static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel) |
1650 | { |
1651 | struct gsi_trans *trans; |
1652 | |
1653 | /* Get the first completed transaction */ |
1654 | trans = gsi_channel_trans_complete(channel); |
1655 | if (trans) |
1656 | gsi_trans_move_polled(trans); |
1657 | |
1658 | return trans; |
1659 | } |
1660 | |
1661 | /** |
1662 | * gsi_channel_poll() - NAPI poll function for a channel |
1663 | * @napi: NAPI structure for the channel |
1664 | * @budget: Budget supplied by NAPI core |
1665 | * |
1666 | * Return: Number of items polled (<= budget) |
1667 | * |
1668 | * Single transactions completed by hardware are polled until either |
1669 | * the budget is exhausted, or there are no more. Each transaction |
1670 | * polled is passed to gsi_trans_complete(), to perform remaining |
1671 | * completion processing and retire/free the transaction. |
1672 | */ |
1673 | static int gsi_channel_poll(struct napi_struct *napi, int budget) |
1674 | { |
1675 | struct gsi_channel *channel; |
1676 | int count; |
1677 | |
1678 | channel = container_of(napi, struct gsi_channel, napi); |
1679 | for (count = 0; count < budget; count++) { |
1680 | struct gsi_trans *trans; |
1681 | |
1682 | trans = gsi_channel_poll_one(channel); |
1683 | if (!trans) |
1684 | break; |
1685 | gsi_trans_complete(trans); |
1686 | } |
1687 | |
1688 | if (count < budget && napi_complete(n: napi)) |
1689 | gsi_irq_ieob_enable_one(gsi: channel->gsi, evt_ring_id: channel->evt_ring_id); |
1690 | |
1691 | return count; |
1692 | } |
1693 | |
1694 | /* The event bitmap represents which event ids are available for allocation. |
1695 | * Set bits are not available, clear bits can be used. This function |
1696 | * initializes the map so all events supported by the hardware are available, |
1697 | * then precludes any reserved events from being allocated. |
1698 | */ |
1699 | static u32 gsi_event_bitmap_init(u32 evt_ring_max) |
1700 | { |
1701 | u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max); |
1702 | |
1703 | event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START); |
1704 | |
1705 | return event_bitmap; |
1706 | } |
1707 | |
1708 | /* Setup function for a single channel */ |
1709 | static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id) |
1710 | { |
1711 | struct gsi_channel *channel = &gsi->channel[channel_id]; |
1712 | u32 evt_ring_id = channel->evt_ring_id; |
1713 | int ret; |
1714 | |
1715 | if (!gsi_channel_initialized(channel)) |
1716 | return 0; |
1717 | |
1718 | ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id); |
1719 | if (ret) |
1720 | return ret; |
1721 | |
1722 | gsi_evt_ring_program(gsi, evt_ring_id); |
1723 | |
1724 | ret = gsi_channel_alloc_command(gsi, channel_id); |
1725 | if (ret) |
1726 | goto err_evt_ring_de_alloc; |
1727 | |
1728 | gsi_channel_program(channel, doorbell: true); |
1729 | |
1730 | if (channel->toward_ipa) |
1731 | netif_napi_add_tx(dev: gsi->dummy_dev, napi: &channel->napi, |
1732 | poll: gsi_channel_poll); |
1733 | else |
1734 | netif_napi_add(dev: gsi->dummy_dev, napi: &channel->napi, |
1735 | poll: gsi_channel_poll); |
1736 | |
1737 | return 0; |
1738 | |
1739 | err_evt_ring_de_alloc: |
1740 | /* We've done nothing with the event ring yet so don't reset */ |
1741 | gsi_evt_ring_de_alloc_command(gsi, evt_ring_id); |
1742 | |
1743 | return ret; |
1744 | } |
1745 | |
1746 | /* Inverse of gsi_channel_setup_one() */ |
1747 | static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id) |
1748 | { |
1749 | struct gsi_channel *channel = &gsi->channel[channel_id]; |
1750 | u32 evt_ring_id = channel->evt_ring_id; |
1751 | |
1752 | if (!gsi_channel_initialized(channel)) |
1753 | return; |
1754 | |
1755 | netif_napi_del(napi: &channel->napi); |
1756 | |
1757 | gsi_channel_de_alloc_command(gsi, channel_id); |
1758 | gsi_evt_ring_reset_command(gsi, evt_ring_id); |
1759 | gsi_evt_ring_de_alloc_command(gsi, evt_ring_id); |
1760 | } |
1761 | |
1762 | /* We use generic commands only to operate on modem channels. We don't have |
1763 | * the ability to determine channel state for a modem channel, so we simply |
1764 | * issue the command and wait for it to complete. |
1765 | */ |
1766 | static int gsi_generic_command(struct gsi *gsi, u32 channel_id, |
1767 | enum gsi_generic_cmd_opcode opcode, |
1768 | u8 params) |
1769 | { |
1770 | const struct reg *reg; |
1771 | bool timeout; |
1772 | u32 offset; |
1773 | u32 val; |
1774 | |
1775 | /* The error global interrupt type is always enabled (until we tear |
1776 | * down), so we will keep it enabled. |
1777 | * |
1778 | * A generic EE command completes with a GSI global interrupt of |
1779 | * type GP_INT1. We only perform one generic command at a time |
1780 | * (to allocate, halt, or enable/disable flow control on a modem |
1781 | * channel), and only from this function. So we enable the GP_INT1 |
1782 | * IRQ type here, and disable it again after the command completes. |
1783 | */ |
1784 | reg = gsi_reg(gsi, reg_id: CNTXT_GLOB_IRQ_EN); |
1785 | val = ERROR_INT | GP_INT1; |
1786 | iowrite32(val, gsi->virt + reg_offset(reg)); |
1787 | |
1788 | /* First zero the result code field */ |
1789 | reg = gsi_reg(gsi, reg_id: CNTXT_SCRATCH_0); |
1790 | offset = reg_offset(reg); |
1791 | val = ioread32(gsi->virt + offset); |
1792 | |
1793 | val &= ~reg_fmask(reg, field_id: GENERIC_EE_RESULT); |
1794 | iowrite32(val, gsi->virt + offset); |
1795 | |
1796 | /* Now issue the command */ |
1797 | reg = gsi_reg(gsi, reg_id: GENERIC_CMD); |
1798 | val = reg_encode(reg, field_id: GENERIC_OPCODE, val: opcode); |
1799 | val |= reg_encode(reg, field_id: GENERIC_CHID, val: channel_id); |
1800 | val |= reg_encode(reg, field_id: GENERIC_EE, val: GSI_EE_MODEM); |
1801 | if (gsi->version >= IPA_VERSION_4_11) |
1802 | val |= reg_encode(reg, field_id: GENERIC_PARAMS, val: params); |
1803 | |
1804 | timeout = !gsi_command(gsi, reg: reg_offset(reg), val); |
1805 | |
1806 | /* Disable the GP_INT1 IRQ type again */ |
1807 | reg = gsi_reg(gsi, reg_id: CNTXT_GLOB_IRQ_EN); |
1808 | iowrite32(ERROR_INT, gsi->virt + reg_offset(reg)); |
1809 | |
1810 | if (!timeout) |
1811 | return gsi->result; |
1812 | |
1813 | dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n", |
1814 | opcode, channel_id); |
1815 | |
1816 | return -ETIMEDOUT; |
1817 | } |
1818 | |
1819 | static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id) |
1820 | { |
1821 | return gsi_generic_command(gsi, channel_id, |
1822 | opcode: GSI_GENERIC_ALLOCATE_CHANNEL, params: 0); |
1823 | } |
1824 | |
1825 | static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id) |
1826 | { |
1827 | u32 retries = GSI_CHANNEL_MODEM_HALT_RETRIES; |
1828 | int ret; |
1829 | |
1830 | do |
1831 | ret = gsi_generic_command(gsi, channel_id, |
1832 | opcode: GSI_GENERIC_HALT_CHANNEL, params: 0); |
1833 | while (ret == -EAGAIN && retries--); |
1834 | |
1835 | if (ret) |
1836 | dev_err(gsi->dev, "error %d halting modem channel %u\n", |
1837 | ret, channel_id); |
1838 | } |
1839 | |
1840 | /* Enable or disable flow control for a modem GSI TX channel (IPA v4.2+) */ |
1841 | void |
1842 | gsi_modem_channel_flow_control(struct gsi *gsi, u32 channel_id, bool enable) |
1843 | { |
1844 | u32 retries = 0; |
1845 | u32 command; |
1846 | int ret; |
1847 | |
1848 | command = enable ? GSI_GENERIC_ENABLE_FLOW_CONTROL |
1849 | : GSI_GENERIC_DISABLE_FLOW_CONTROL; |
1850 | /* Disabling flow control on IPA v4.11+ can return -EAGAIN if enable |
1851 | * is underway. In this case we need to retry the command. |
1852 | */ |
1853 | if (!enable && gsi->version >= IPA_VERSION_4_11) |
1854 | retries = GSI_CHANNEL_MODEM_FLOW_RETRIES; |
1855 | |
1856 | do |
1857 | ret = gsi_generic_command(gsi, channel_id, opcode: command, params: 0); |
1858 | while (ret == -EAGAIN && retries--); |
1859 | |
1860 | if (ret) |
1861 | dev_err(gsi->dev, |
1862 | "error %d %sabling mode channel %u flow control\n", |
1863 | ret, enable ? "en": "dis", channel_id); |
1864 | } |
1865 | |
1866 | /* Setup function for channels */ |
1867 | static int gsi_channel_setup(struct gsi *gsi) |
1868 | { |
1869 | u32 channel_id = 0; |
1870 | u32 mask; |
1871 | int ret; |
1872 | |
1873 | gsi_irq_enable(gsi); |
1874 | |
1875 | mutex_lock(&gsi->mutex); |
1876 | |
1877 | do { |
1878 | ret = gsi_channel_setup_one(gsi, channel_id); |
1879 | if (ret) |
1880 | goto err_unwind; |
1881 | } while (++channel_id < gsi->channel_count); |
1882 | |
1883 | /* Make sure no channels were defined that hardware does not support */ |
1884 | while (channel_id < GSI_CHANNEL_COUNT_MAX) { |
1885 | struct gsi_channel *channel = &gsi->channel[channel_id++]; |
1886 | |
1887 | if (!gsi_channel_initialized(channel)) |
1888 | continue; |
1889 | |
1890 | ret = -EINVAL; |
1891 | dev_err(gsi->dev, "channel %u not supported by hardware\n", |
1892 | channel_id - 1); |
1893 | channel_id = gsi->channel_count; |
1894 | goto err_unwind; |
1895 | } |
1896 | |
1897 | /* Allocate modem channels if necessary */ |
1898 | mask = gsi->modem_channel_bitmap; |
1899 | while (mask) { |
1900 | u32 modem_channel_id = __ffs(mask); |
1901 | |
1902 | ret = gsi_modem_channel_alloc(gsi, channel_id: modem_channel_id); |
1903 | if (ret) |
1904 | goto err_unwind_modem; |
1905 | |
1906 | /* Clear bit from mask only after success (for unwind) */ |
1907 | mask ^= BIT(modem_channel_id); |
1908 | } |
1909 | |
1910 | mutex_unlock(lock: &gsi->mutex); |
1911 | |
1912 | return 0; |
1913 | |
1914 | err_unwind_modem: |
1915 | /* Compute which modem channels need to be deallocated */ |
1916 | mask ^= gsi->modem_channel_bitmap; |
1917 | while (mask) { |
1918 | channel_id = __fls(word: mask); |
1919 | |
1920 | mask ^= BIT(channel_id); |
1921 | |
1922 | gsi_modem_channel_halt(gsi, channel_id); |
1923 | } |
1924 | |
1925 | err_unwind: |
1926 | while (channel_id--) |
1927 | gsi_channel_teardown_one(gsi, channel_id); |
1928 | |
1929 | mutex_unlock(lock: &gsi->mutex); |
1930 | |
1931 | gsi_irq_disable(gsi); |
1932 | |
1933 | return ret; |
1934 | } |
1935 | |
1936 | /* Inverse of gsi_channel_setup() */ |
1937 | static void gsi_channel_teardown(struct gsi *gsi) |
1938 | { |
1939 | u32 mask = gsi->modem_channel_bitmap; |
1940 | u32 channel_id; |
1941 | |
1942 | mutex_lock(&gsi->mutex); |
1943 | |
1944 | while (mask) { |
1945 | channel_id = __fls(word: mask); |
1946 | |
1947 | mask ^= BIT(channel_id); |
1948 | |
1949 | gsi_modem_channel_halt(gsi, channel_id); |
1950 | } |
1951 | |
1952 | channel_id = gsi->channel_count - 1; |
1953 | do |
1954 | gsi_channel_teardown_one(gsi, channel_id); |
1955 | while (channel_id--); |
1956 | |
1957 | mutex_unlock(lock: &gsi->mutex); |
1958 | |
1959 | gsi_irq_disable(gsi); |
1960 | } |
1961 | |
1962 | /* Turn off all GSI interrupts initially */ |
1963 | static int gsi_irq_setup(struct gsi *gsi) |
1964 | { |
1965 | const struct reg *reg; |
1966 | int ret; |
1967 | |
1968 | /* Writing 1 indicates IRQ interrupts; 0 would be MSI */ |
1969 | reg = gsi_reg(gsi, reg_id: CNTXT_INTSET); |
1970 | iowrite32(reg_bit(reg, field_id: INTYPE), gsi->virt + reg_offset(reg)); |
1971 | |
1972 | /* Disable all interrupt types */ |
1973 | gsi_irq_type_update(gsi, val: 0); |
1974 | |
1975 | /* Clear all type-specific interrupt masks */ |
1976 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_CH_IRQ_MSK); |
1977 | iowrite32(0, gsi->virt + reg_offset(reg)); |
1978 | |
1979 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_EV_CH_IRQ_MSK); |
1980 | iowrite32(0, gsi->virt + reg_offset(reg)); |
1981 | |
1982 | reg = gsi_reg(gsi, reg_id: CNTXT_GLOB_IRQ_EN); |
1983 | iowrite32(0, gsi->virt + reg_offset(reg)); |
1984 | |
1985 | reg = gsi_reg(gsi, reg_id: CNTXT_SRC_IEOB_IRQ_MSK); |
1986 | iowrite32(0, gsi->virt + reg_offset(reg)); |
1987 | |
1988 | /* The inter-EE interrupts are not supported for IPA v3.0-v3.1 */ |
1989 | if (gsi->version > IPA_VERSION_3_1) { |
1990 | reg = gsi_reg(gsi, reg_id: INTER_EE_SRC_CH_IRQ_MSK); |
1991 | iowrite32(0, gsi->virt + reg_offset(reg)); |
1992 | |
1993 | reg = gsi_reg(gsi, reg_id: INTER_EE_SRC_EV_CH_IRQ_MSK); |
1994 | iowrite32(0, gsi->virt + reg_offset(reg)); |
1995 | } |
1996 | |
1997 | reg = gsi_reg(gsi, reg_id: CNTXT_GSI_IRQ_EN); |
1998 | iowrite32(0, gsi->virt + reg_offset(reg)); |
1999 | |
2000 | ret = request_irq(irq: gsi->irq, handler: gsi_isr, flags: 0, name: "gsi", dev: gsi); |
2001 | if (ret) |
2002 | dev_err(gsi->dev, "error %d requesting \"gsi\" IRQ\n", ret); |
2003 | |
2004 | return ret; |
2005 | } |
2006 | |
2007 | static void gsi_irq_teardown(struct gsi *gsi) |
2008 | { |
2009 | free_irq(gsi->irq, gsi); |
2010 | } |
2011 | |
2012 | /* Get # supported channel and event rings; there is no gsi_ring_teardown() */ |
2013 | static int gsi_ring_setup(struct gsi *gsi) |
2014 | { |
2015 | struct device *dev = gsi->dev; |
2016 | const struct reg *reg; |
2017 | u32 count; |
2018 | u32 val; |
2019 | |
2020 | if (gsi->version < IPA_VERSION_3_5_1) { |
2021 | /* No HW_PARAM_2 register prior to IPA v3.5.1, assume the max */ |
2022 | gsi->channel_count = GSI_CHANNEL_COUNT_MAX; |
2023 | gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX; |
2024 | |
2025 | return 0; |
2026 | } |
2027 | |
2028 | reg = gsi_reg(gsi, reg_id: HW_PARAM_2); |
2029 | val = ioread32(gsi->virt + reg_offset(reg)); |
2030 | |
2031 | count = reg_decode(reg, field_id: NUM_CH_PER_EE, val); |
2032 | if (!count) { |
2033 | dev_err(dev, "GSI reports zero channels supported\n"); |
2034 | return -EINVAL; |
2035 | } |
2036 | if (count > GSI_CHANNEL_COUNT_MAX) { |
2037 | dev_warn(dev, "limiting to %u channels; hardware supports %u\n", |
2038 | GSI_CHANNEL_COUNT_MAX, count); |
2039 | count = GSI_CHANNEL_COUNT_MAX; |
2040 | } |
2041 | gsi->channel_count = count; |
2042 | |
2043 | if (gsi->version < IPA_VERSION_5_0) { |
2044 | count = reg_decode(reg, field_id: NUM_EV_PER_EE, val); |
2045 | } else { |
2046 | reg = gsi_reg(gsi, reg_id: HW_PARAM_4); |
2047 | count = reg_decode(reg, field_id: EV_PER_EE, val); |
2048 | } |
2049 | if (!count) { |
2050 | dev_err(dev, "GSI reports zero event rings supported\n"); |
2051 | return -EINVAL; |
2052 | } |
2053 | if (count > GSI_EVT_RING_COUNT_MAX) { |
2054 | dev_warn(dev, |
2055 | "limiting to %u event rings; hardware supports %u\n", |
2056 | GSI_EVT_RING_COUNT_MAX, count); |
2057 | count = GSI_EVT_RING_COUNT_MAX; |
2058 | } |
2059 | gsi->evt_ring_count = count; |
2060 | |
2061 | return 0; |
2062 | } |
2063 | |
2064 | /* Setup function for GSI. GSI firmware must be loaded and initialized */ |
2065 | int gsi_setup(struct gsi *gsi) |
2066 | { |
2067 | const struct reg *reg; |
2068 | u32 val; |
2069 | int ret; |
2070 | |
2071 | /* Here is where we first touch the GSI hardware */ |
2072 | reg = gsi_reg(gsi, reg_id: GSI_STATUS); |
2073 | val = ioread32(gsi->virt + reg_offset(reg)); |
2074 | if (!(val & reg_bit(reg, field_id: ENABLED))) { |
2075 | dev_err(gsi->dev, "GSI has not been enabled\n"); |
2076 | return -EIO; |
2077 | } |
2078 | |
2079 | ret = gsi_irq_setup(gsi); |
2080 | if (ret) |
2081 | return ret; |
2082 | |
2083 | ret = gsi_ring_setup(gsi); /* No matching teardown required */ |
2084 | if (ret) |
2085 | goto err_irq_teardown; |
2086 | |
2087 | /* Initialize the error log */ |
2088 | reg = gsi_reg(gsi, reg_id: ERROR_LOG); |
2089 | iowrite32(0, gsi->virt + reg_offset(reg)); |
2090 | |
2091 | ret = gsi_channel_setup(gsi); |
2092 | if (ret) |
2093 | goto err_irq_teardown; |
2094 | |
2095 | return 0; |
2096 | |
2097 | err_irq_teardown: |
2098 | gsi_irq_teardown(gsi); |
2099 | |
2100 | return ret; |
2101 | } |
2102 | |
2103 | /* Inverse of gsi_setup() */ |
2104 | void gsi_teardown(struct gsi *gsi) |
2105 | { |
2106 | gsi_channel_teardown(gsi); |
2107 | gsi_irq_teardown(gsi); |
2108 | } |
2109 | |
2110 | /* Initialize a channel's event ring */ |
2111 | static int gsi_channel_evt_ring_init(struct gsi_channel *channel) |
2112 | { |
2113 | struct gsi *gsi = channel->gsi; |
2114 | struct gsi_evt_ring *evt_ring; |
2115 | int ret; |
2116 | |
2117 | ret = gsi_evt_ring_id_alloc(gsi); |
2118 | if (ret < 0) |
2119 | return ret; |
2120 | channel->evt_ring_id = ret; |
2121 | |
2122 | evt_ring = &gsi->evt_ring[channel->evt_ring_id]; |
2123 | evt_ring->channel = channel; |
2124 | |
2125 | ret = gsi_ring_alloc(gsi, ring: &evt_ring->ring, count: channel->event_count); |
2126 | if (!ret) |
2127 | return 0; /* Success! */ |
2128 | |
2129 | dev_err(gsi->dev, "error %d allocating channel %u event ring\n", |
2130 | ret, gsi_channel_id(channel)); |
2131 | |
2132 | gsi_evt_ring_id_free(gsi, evt_ring_id: channel->evt_ring_id); |
2133 | |
2134 | return ret; |
2135 | } |
2136 | |
2137 | /* Inverse of gsi_channel_evt_ring_init() */ |
2138 | static void gsi_channel_evt_ring_exit(struct gsi_channel *channel) |
2139 | { |
2140 | u32 evt_ring_id = channel->evt_ring_id; |
2141 | struct gsi *gsi = channel->gsi; |
2142 | struct gsi_evt_ring *evt_ring; |
2143 | |
2144 | evt_ring = &gsi->evt_ring[evt_ring_id]; |
2145 | gsi_ring_free(gsi, ring: &evt_ring->ring); |
2146 | gsi_evt_ring_id_free(gsi, evt_ring_id); |
2147 | } |
2148 | |
2149 | static bool gsi_channel_data_valid(struct gsi *gsi, bool command, |
2150 | const struct ipa_gsi_endpoint_data *data) |
2151 | { |
2152 | const struct gsi_channel_data *channel_data; |
2153 | u32 channel_id = data->channel_id; |
2154 | struct device *dev = gsi->dev; |
2155 | |
2156 | /* Make sure channel ids are in the range driver supports */ |
2157 | if (channel_id >= GSI_CHANNEL_COUNT_MAX) { |
2158 | dev_err(dev, "bad channel id %u; must be less than %u\n", |
2159 | channel_id, GSI_CHANNEL_COUNT_MAX); |
2160 | return false; |
2161 | } |
2162 | |
2163 | if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) { |
2164 | dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id); |
2165 | return false; |
2166 | } |
2167 | |
2168 | if (command && !data->toward_ipa) { |
2169 | dev_err(dev, "command channel %u is not TX\n", channel_id); |
2170 | return false; |
2171 | } |
2172 | |
2173 | channel_data = &data->channel; |
2174 | |
2175 | if (!channel_data->tlv_count || |
2176 | channel_data->tlv_count > GSI_TLV_MAX) { |
2177 | dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n", |
2178 | channel_id, channel_data->tlv_count, GSI_TLV_MAX); |
2179 | return false; |
2180 | } |
2181 | |
2182 | if (command && IPA_COMMAND_TRANS_TRE_MAX > channel_data->tlv_count) { |
2183 | dev_err(dev, "command TRE max too big for channel %u (%u > %u)\n", |
2184 | channel_id, IPA_COMMAND_TRANS_TRE_MAX, |
2185 | channel_data->tlv_count); |
2186 | return false; |
2187 | } |
2188 | |
2189 | /* We have to allow at least one maximally-sized transaction to |
2190 | * be outstanding (which would use tlv_count TREs). Given how |
2191 | * gsi_channel_tre_max() is computed, tre_count has to be almost |
2192 | * twice the TLV FIFO size to satisfy this requirement. |
2193 | */ |
2194 | if (channel_data->tre_count < 2 * channel_data->tlv_count - 1) { |
2195 | dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n", |
2196 | channel_id, channel_data->tlv_count, |
2197 | channel_data->tre_count); |
2198 | return false; |
2199 | } |
2200 | |
2201 | if (!is_power_of_2(n: channel_data->tre_count)) { |
2202 | dev_err(dev, "channel %u bad tre_count %u; not power of 2\n", |
2203 | channel_id, channel_data->tre_count); |
2204 | return false; |
2205 | } |
2206 | |
2207 | if (!is_power_of_2(n: channel_data->event_count)) { |
2208 | dev_err(dev, "channel %u bad event_count %u; not power of 2\n", |
2209 | channel_id, channel_data->event_count); |
2210 | return false; |
2211 | } |
2212 | |
2213 | return true; |
2214 | } |
2215 | |
2216 | /* Init function for a single channel */ |
2217 | static int gsi_channel_init_one(struct gsi *gsi, |
2218 | const struct ipa_gsi_endpoint_data *data, |
2219 | bool command) |
2220 | { |
2221 | struct gsi_channel *channel; |
2222 | u32 tre_count; |
2223 | int ret; |
2224 | |
2225 | if (!gsi_channel_data_valid(gsi, command, data)) |
2226 | return -EINVAL; |
2227 | |
2228 | /* Worst case we need an event for every outstanding TRE */ |
2229 | if (data->channel.tre_count > data->channel.event_count) { |
2230 | tre_count = data->channel.event_count; |
2231 | dev_warn(gsi->dev, "channel %u limited to %u TREs\n", |
2232 | data->channel_id, tre_count); |
2233 | } else { |
2234 | tre_count = data->channel.tre_count; |
2235 | } |
2236 | |
2237 | channel = &gsi->channel[data->channel_id]; |
2238 | memset(channel, 0, sizeof(*channel)); |
2239 | |
2240 | channel->gsi = gsi; |
2241 | channel->toward_ipa = data->toward_ipa; |
2242 | channel->command = command; |
2243 | channel->trans_tre_max = data->channel.tlv_count; |
2244 | channel->tre_count = tre_count; |
2245 | channel->event_count = data->channel.event_count; |
2246 | |
2247 | ret = gsi_channel_evt_ring_init(channel); |
2248 | if (ret) |
2249 | goto err_clear_gsi; |
2250 | |
2251 | ret = gsi_ring_alloc(gsi, ring: &channel->tre_ring, count: data->channel.tre_count); |
2252 | if (ret) { |
2253 | dev_err(gsi->dev, "error %d allocating channel %u ring\n", |
2254 | ret, data->channel_id); |
2255 | goto err_channel_evt_ring_exit; |
2256 | } |
2257 | |
2258 | ret = gsi_channel_trans_init(gsi, channel_id: data->channel_id); |
2259 | if (ret) |
2260 | goto err_ring_free; |
2261 | |
2262 | if (command) { |
2263 | u32 tre_max = gsi_channel_tre_max(gsi, channel_id: data->channel_id); |
2264 | |
2265 | ret = ipa_cmd_pool_init(channel, tre_count: tre_max); |
2266 | } |
2267 | if (!ret) |
2268 | return 0; /* Success! */ |
2269 | |
2270 | gsi_channel_trans_exit(channel); |
2271 | err_ring_free: |
2272 | gsi_ring_free(gsi, ring: &channel->tre_ring); |
2273 | err_channel_evt_ring_exit: |
2274 | gsi_channel_evt_ring_exit(channel); |
2275 | err_clear_gsi: |
2276 | channel->gsi = NULL; /* Mark it not (fully) initialized */ |
2277 | |
2278 | return ret; |
2279 | } |
2280 | |
2281 | /* Inverse of gsi_channel_init_one() */ |
2282 | static void gsi_channel_exit_one(struct gsi_channel *channel) |
2283 | { |
2284 | if (!gsi_channel_initialized(channel)) |
2285 | return; |
2286 | |
2287 | if (channel->command) |
2288 | ipa_cmd_pool_exit(channel); |
2289 | gsi_channel_trans_exit(channel); |
2290 | gsi_ring_free(gsi: channel->gsi, ring: &channel->tre_ring); |
2291 | gsi_channel_evt_ring_exit(channel); |
2292 | } |
2293 | |
2294 | /* Init function for channels */ |
2295 | static int gsi_channel_init(struct gsi *gsi, u32 count, |
2296 | const struct ipa_gsi_endpoint_data *data) |
2297 | { |
2298 | bool modem_alloc; |
2299 | int ret = 0; |
2300 | u32 i; |
2301 | |
2302 | /* IPA v4.2 requires the AP to allocate channels for the modem */ |
2303 | modem_alloc = gsi->version == IPA_VERSION_4_2; |
2304 | |
2305 | gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX); |
2306 | gsi->ieob_enabled_bitmap = 0; |
2307 | |
2308 | /* The endpoint data array is indexed by endpoint name */ |
2309 | for (i = 0; i < count; i++) { |
2310 | bool command = i == IPA_ENDPOINT_AP_COMMAND_TX; |
2311 | |
2312 | if (ipa_gsi_endpoint_data_empty(data: &data[i])) |
2313 | continue; /* Skip over empty slots */ |
2314 | |
2315 | /* Mark modem channels to be allocated (hardware workaround) */ |
2316 | if (data[i].ee_id == GSI_EE_MODEM) { |
2317 | if (modem_alloc) |
2318 | gsi->modem_channel_bitmap |= |
2319 | BIT(data[i].channel_id); |
2320 | continue; |
2321 | } |
2322 | |
2323 | ret = gsi_channel_init_one(gsi, data: &data[i], command); |
2324 | if (ret) |
2325 | goto err_unwind; |
2326 | } |
2327 | |
2328 | return ret; |
2329 | |
2330 | err_unwind: |
2331 | while (i--) { |
2332 | if (ipa_gsi_endpoint_data_empty(data: &data[i])) |
2333 | continue; |
2334 | if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) { |
2335 | gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id); |
2336 | continue; |
2337 | } |
2338 | gsi_channel_exit_one(channel: &gsi->channel[data->channel_id]); |
2339 | } |
2340 | |
2341 | return ret; |
2342 | } |
2343 | |
2344 | /* Inverse of gsi_channel_init() */ |
2345 | static void gsi_channel_exit(struct gsi *gsi) |
2346 | { |
2347 | u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1; |
2348 | |
2349 | do |
2350 | gsi_channel_exit_one(channel: &gsi->channel[channel_id]); |
2351 | while (channel_id--); |
2352 | gsi->modem_channel_bitmap = 0; |
2353 | } |
2354 | |
2355 | /* Init function for GSI. GSI hardware does not need to be "ready" */ |
2356 | int gsi_init(struct gsi *gsi, struct platform_device *pdev, |
2357 | enum ipa_version version, u32 count, |
2358 | const struct ipa_gsi_endpoint_data *data) |
2359 | { |
2360 | int ret; |
2361 | |
2362 | gsi_validate_build(); |
2363 | |
2364 | gsi->dev = &pdev->dev; |
2365 | gsi->version = version; |
2366 | |
2367 | /* GSI uses NAPI on all channels. Create a dummy network device |
2368 | * for the channel NAPI contexts to be associated with. |
2369 | */ |
2370 | gsi->dummy_dev = alloc_netdev_dummy(sizeof_priv: 0); |
2371 | if (!gsi->dummy_dev) |
2372 | return -ENOMEM; |
2373 | init_completion(x: &gsi->completion); |
2374 | |
2375 | ret = gsi_reg_init(gsi, pdev); |
2376 | if (ret) |
2377 | goto err_reg_exit; |
2378 | |
2379 | ret = gsi_irq_init(gsi, pdev); /* No matching exit required */ |
2380 | if (ret) |
2381 | goto err_reg_exit; |
2382 | |
2383 | ret = gsi_channel_init(gsi, count, data); |
2384 | if (ret) |
2385 | goto err_reg_exit; |
2386 | |
2387 | mutex_init(&gsi->mutex); |
2388 | |
2389 | return 0; |
2390 | |
2391 | err_reg_exit: |
2392 | free_netdev(dev: gsi->dummy_dev); |
2393 | gsi_reg_exit(gsi); |
2394 | |
2395 | return ret; |
2396 | } |
2397 | |
2398 | /* Inverse of gsi_init() */ |
2399 | void gsi_exit(struct gsi *gsi) |
2400 | { |
2401 | mutex_destroy(lock: &gsi->mutex); |
2402 | gsi_channel_exit(gsi); |
2403 | free_netdev(dev: gsi->dummy_dev); |
2404 | gsi_reg_exit(gsi); |
2405 | } |
2406 | |
2407 | /* The maximum number of outstanding TREs on a channel. This limits |
2408 | * a channel's maximum number of transactions outstanding (worst case |
2409 | * is one TRE per transaction). |
2410 | * |
2411 | * The absolute limit is the number of TREs in the channel's TRE ring, |
2412 | * and in theory we should be able use all of them. But in practice, |
2413 | * doing that led to the hardware reporting exhaustion of event ring |
2414 | * slots for writing completion information. So the hardware limit |
2415 | * would be (tre_count - 1). |
2416 | * |
2417 | * We reduce it a bit further though. Transaction resource pools are |
2418 | * sized to be a little larger than this maximum, to allow resource |
2419 | * allocations to always be contiguous. The number of entries in a |
2420 | * TRE ring buffer is a power of 2, and the extra resources in a pool |
2421 | * tends to nearly double the memory allocated for it. Reducing the |
2422 | * maximum number of outstanding TREs allows the number of entries in |
2423 | * a pool to avoid crossing that power-of-2 boundary, and this can |
2424 | * substantially reduce pool memory requirements. The number we |
2425 | * reduce it by matches the number added in gsi_trans_pool_init(). |
2426 | */ |
2427 | u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id) |
2428 | { |
2429 | struct gsi_channel *channel = &gsi->channel[channel_id]; |
2430 | |
2431 | /* Hardware limit is channel->tre_count - 1 */ |
2432 | return channel->tre_count - (channel->trans_tre_max - 1); |
2433 | } |
2434 |
Definitions
- gsi_event
- gsi_channel_scratch_gpi
- gsi_channel_scratch
- gsi_validate_build
- gsi_channel_id
- gsi_channel_initialized
- ch_c_cntxt_0_type_encode
- gsi_irq_type_update
- gsi_irq_type_enable
- gsi_irq_type_disable
- gsi_irq_ev_ctrl_enable
- gsi_irq_ev_ctrl_disable
- gsi_irq_ch_ctrl_enable
- gsi_irq_ch_ctrl_disable
- gsi_irq_ieob_enable_one
- gsi_irq_ieob_disable
- gsi_irq_ieob_disable_one
- gsi_irq_enable
- gsi_irq_disable
- gsi_ring_virt
- gsi_ring_addr
- gsi_ring_index
- gsi_command
- gsi_evt_ring_state
- gsi_evt_ring_command
- gsi_evt_ring_alloc_command
- gsi_evt_ring_reset_command
- gsi_evt_ring_de_alloc_command
- gsi_channel_state
- gsi_channel_command
- gsi_channel_alloc_command
- gsi_channel_start_command
- gsi_channel_stop_command
- gsi_channel_reset_command
- gsi_channel_de_alloc_command
- gsi_evt_ring_doorbell
- gsi_evt_ring_program
- gsi_channel_trans_last
- gsi_channel_trans_quiesce
- gsi_channel_program
- __gsi_channel_start
- gsi_channel_start
- gsi_channel_stop_retry
- __gsi_channel_stop
- gsi_channel_stop
- gsi_channel_reset
- gsi_channel_suspend
- gsi_channel_resume
- gsi_suspend
- gsi_resume
- gsi_trans_tx_committed
- gsi_trans_tx_queued
- gsi_trans_tx_completed
- gsi_isr_chan_ctrl
- gsi_isr_evt_ctrl
- gsi_isr_glob_chan_err
- gsi_isr_glob_evt_err
- gsi_isr_glob_err
- gsi_isr_gp_int1
- gsi_isr_glob_ee
- gsi_isr_ieob
- gsi_isr_general
- gsi_isr
- gsi_irq_init
- gsi_event_trans
- gsi_evt_ring_update
- gsi_ring_alloc
- gsi_ring_free
- gsi_evt_ring_id_alloc
- gsi_evt_ring_id_free
- gsi_channel_doorbell
- gsi_channel_update
- gsi_channel_poll_one
- gsi_channel_poll
- gsi_event_bitmap_init
- gsi_channel_setup_one
- gsi_channel_teardown_one
- gsi_generic_command
- gsi_modem_channel_alloc
- gsi_modem_channel_halt
- gsi_modem_channel_flow_control
- gsi_channel_setup
- gsi_channel_teardown
- gsi_irq_setup
- gsi_irq_teardown
- gsi_ring_setup
- gsi_setup
- gsi_teardown
- gsi_channel_evt_ring_init
- gsi_channel_evt_ring_exit
- gsi_channel_data_valid
- gsi_channel_init_one
- gsi_channel_exit_one
- gsi_channel_init
- gsi_channel_exit
- gsi_init
- gsi_exit
Improve your Profiling and Debugging skills
Find out more