1 | // SPDX-License-Identifier: GPL-2.0 OR MIT |
2 | /* |
3 | * Copyright 2014-2022 Advanced Micro Devices, Inc. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the "Software"), |
7 | * to deal in the Software without restriction, including without limitation |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * Software is furnished to do so, subject to the following conditions: |
11 | * |
12 | * The above copyright notice and this permission notice shall be included in |
13 | * all copies or substantial portions of the Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
21 | * OTHER DEALINGS IN THE SOFTWARE. |
22 | */ |
23 | |
24 | /* |
25 | * KFD Interrupts. |
26 | * |
27 | * AMD GPUs deliver interrupts by pushing an interrupt description onto the |
28 | * interrupt ring and then sending an interrupt. KGD receives the interrupt |
29 | * in ISR and sends us a pointer to each new entry on the interrupt ring. |
30 | * |
31 | * We generally can't process interrupt-signaled events from ISR, so we call |
32 | * out to each interrupt client module (currently only the scheduler) to ask if |
33 | * each interrupt is interesting. If they return true, then it requires further |
34 | * processing so we copy it to an internal interrupt ring and call each |
35 | * interrupt client again from a work-queue. |
36 | * |
37 | * There's no acknowledgment for the interrupts we use. The hardware simply |
38 | * queues a new interrupt each time without waiting. |
39 | * |
40 | * The fixed-size internal queue means that it's possible for us to lose |
41 | * interrupts because we have no back-pressure to the hardware. |
42 | */ |
43 | |
44 | #include <linux/slab.h> |
45 | #include <linux/device.h> |
46 | #include <linux/kfifo.h> |
47 | #include "kfd_priv.h" |
48 | |
49 | #define KFD_IH_NUM_ENTRIES 8192 |
50 | |
51 | static void interrupt_wq(struct work_struct *); |
52 | |
53 | int kfd_interrupt_init(struct kfd_dev *kfd) |
54 | { |
55 | int r; |
56 | |
57 | r = kfifo_alloc(&kfd->ih_fifo, |
58 | KFD_IH_NUM_ENTRIES * kfd->device_info.ih_ring_entry_size, |
59 | GFP_KERNEL); |
60 | if (r) { |
61 | dev_err(kfd->adev->dev, "Failed to allocate IH fifo\n" ); |
62 | return r; |
63 | } |
64 | |
65 | kfd->ih_wq = alloc_workqueue("KFD IH" , WQ_HIGHPRI, 1); |
66 | if (unlikely(!kfd->ih_wq)) { |
67 | kfifo_free(&kfd->ih_fifo); |
68 | dev_err(kfd->adev->dev, "Failed to allocate KFD IH workqueue\n" ); |
69 | return -ENOMEM; |
70 | } |
71 | spin_lock_init(&kfd->interrupt_lock); |
72 | |
73 | INIT_WORK(&kfd->interrupt_work, interrupt_wq); |
74 | |
75 | kfd->interrupts_active = true; |
76 | |
77 | /* |
78 | * After this function returns, the interrupt will be enabled. This |
79 | * barrier ensures that the interrupt running on a different processor |
80 | * sees all the above writes. |
81 | */ |
82 | smp_wmb(); |
83 | |
84 | return 0; |
85 | } |
86 | |
87 | void kfd_interrupt_exit(struct kfd_dev *kfd) |
88 | { |
89 | /* |
90 | * Stop the interrupt handler from writing to the ring and scheduling |
91 | * workqueue items. The spinlock ensures that any interrupt running |
92 | * after we have unlocked sees interrupts_active = false. |
93 | */ |
94 | unsigned long flags; |
95 | |
96 | spin_lock_irqsave(&kfd->interrupt_lock, flags); |
97 | kfd->interrupts_active = false; |
98 | spin_unlock_irqrestore(&kfd->interrupt_lock, flags); |
99 | |
100 | /* |
101 | * flush_work ensures that there are no outstanding |
102 | * work-queue items that will access interrupt_ring. New work items |
103 | * can't be created because we stopped interrupt handling above. |
104 | */ |
105 | flush_workqueue(kfd->ih_wq); |
106 | |
107 | kfifo_free(&kfd->ih_fifo); |
108 | } |
109 | |
110 | /* |
111 | * Assumption: single reader/writer. This function is not re-entrant |
112 | */ |
113 | bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry) |
114 | { |
115 | int count; |
116 | |
117 | count = kfifo_in(&kfd->ih_fifo, ih_ring_entry, |
118 | kfd->device_info.ih_ring_entry_size); |
119 | if (count != kfd->device_info.ih_ring_entry_size) { |
120 | dev_dbg_ratelimited(kfd->adev->dev, |
121 | "Interrupt ring overflow, dropping interrupt %d\n" , |
122 | count); |
123 | return false; |
124 | } |
125 | |
126 | return true; |
127 | } |
128 | |
129 | /* |
130 | * Assumption: single reader/writer. This function is not re-entrant |
131 | */ |
132 | static bool dequeue_ih_ring_entry(struct kfd_dev *kfd, void *ih_ring_entry) |
133 | { |
134 | int count; |
135 | |
136 | count = kfifo_out(&kfd->ih_fifo, ih_ring_entry, |
137 | kfd->device_info.ih_ring_entry_size); |
138 | |
139 | WARN_ON(count && count != kfd->device_info.ih_ring_entry_size); |
140 | |
141 | return count == kfd->device_info.ih_ring_entry_size; |
142 | } |
143 | |
144 | static void interrupt_wq(struct work_struct *work) |
145 | { |
146 | struct kfd_dev *dev = container_of(work, struct kfd_dev, |
147 | interrupt_work); |
148 | uint32_t ih_ring_entry[KFD_MAX_RING_ENTRY_SIZE]; |
149 | unsigned long start_jiffies = jiffies; |
150 | |
151 | if (dev->device_info.ih_ring_entry_size > sizeof(ih_ring_entry)) { |
152 | dev_err_once(dev->adev->dev, "Ring entry too small\n" ); |
153 | return; |
154 | } |
155 | |
156 | while (dequeue_ih_ring_entry(dev, ih_ring_entry)) { |
157 | dev->device_info.event_interrupt_class->interrupt_wq(dev, |
158 | ih_ring_entry); |
159 | if (time_is_before_jiffies(start_jiffies + HZ)) { |
160 | /* If we spent more than a second processing signals, |
161 | * reschedule the worker to avoid soft-lockup warnings |
162 | */ |
163 | queue_work(dev->ih_wq, &dev->interrupt_work); |
164 | break; |
165 | } |
166 | } |
167 | } |
168 | |
169 | bool interrupt_is_wanted(struct kfd_dev *dev, |
170 | const uint32_t *ih_ring_entry, |
171 | uint32_t *patched_ihre, bool *flag) |
172 | { |
173 | /* integer and bitwise OR so there is no boolean short-circuiting */ |
174 | unsigned int wanted = 0; |
175 | |
176 | wanted |= dev->device_info.event_interrupt_class->interrupt_isr(dev, |
177 | ih_ring_entry, patched_ihre, flag); |
178 | |
179 | return wanted != 0; |
180 | } |
181 | |