1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * PowerNV OPAL asynchronous completion interfaces
4 *
5 * Copyright 2013-2017 IBM Corp.
6 */
7
8#undef DEBUG
9
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <linux/slab.h>
13#include <linux/sched.h>
14#include <linux/semaphore.h>
15#include <linux/spinlock.h>
16#include <linux/wait.h>
17#include <linux/gfp.h>
18#include <linux/of.h>
19#include <asm/machdep.h>
20#include <asm/opal.h>
21
22enum opal_async_token_state {
23 ASYNC_TOKEN_UNALLOCATED = 0,
24 ASYNC_TOKEN_ALLOCATED,
25 ASYNC_TOKEN_DISPATCHED,
26 ASYNC_TOKEN_ABANDONED,
27 ASYNC_TOKEN_COMPLETED
28};
29
30struct opal_async_token {
31 enum opal_async_token_state state;
32 struct opal_msg response;
33};
34
35static DECLARE_WAIT_QUEUE_HEAD(opal_async_wait);
36static DEFINE_SPINLOCK(opal_async_comp_lock);
37static struct semaphore opal_async_sem;
38static unsigned int opal_max_async_tokens;
39static struct opal_async_token *opal_async_tokens;
40
41static int __opal_async_get_token(void)
42{
43 unsigned long flags;
44 int i, token = -EBUSY;
45
46 spin_lock_irqsave(&opal_async_comp_lock, flags);
47
48 for (i = 0; i < opal_max_async_tokens; i++) {
49 if (opal_async_tokens[i].state == ASYNC_TOKEN_UNALLOCATED) {
50 opal_async_tokens[i].state = ASYNC_TOKEN_ALLOCATED;
51 token = i;
52 break;
53 }
54 }
55
56 spin_unlock_irqrestore(lock: &opal_async_comp_lock, flags);
57 return token;
58}
59
60/*
61 * Note: If the returned token is used in an opal call and opal returns
62 * OPAL_ASYNC_COMPLETION you MUST call one of opal_async_wait_response() or
63 * opal_async_wait_response_interruptible() at least once before calling another
64 * opal_async_* function
65 */
66int opal_async_get_token_interruptible(void)
67{
68 int token;
69
70 /* Wait until a token is available */
71 if (down_interruptible(sem: &opal_async_sem))
72 return -ERESTARTSYS;
73
74 token = __opal_async_get_token();
75 if (token < 0)
76 up(sem: &opal_async_sem);
77
78 return token;
79}
80EXPORT_SYMBOL_GPL(opal_async_get_token_interruptible);
81
82static int __opal_async_release_token(int token)
83{
84 unsigned long flags;
85 int rc;
86
87 if (token < 0 || token >= opal_max_async_tokens) {
88 pr_err("%s: Passed token is out of range, token %d\n",
89 __func__, token);
90 return -EINVAL;
91 }
92
93 spin_lock_irqsave(&opal_async_comp_lock, flags);
94 switch (opal_async_tokens[token].state) {
95 case ASYNC_TOKEN_COMPLETED:
96 case ASYNC_TOKEN_ALLOCATED:
97 opal_async_tokens[token].state = ASYNC_TOKEN_UNALLOCATED;
98 rc = 0;
99 break;
100 /*
101 * DISPATCHED and ABANDONED tokens must wait for OPAL to respond.
102 * Mark a DISPATCHED token as ABANDONED so that the response handling
103 * code knows no one cares and that it can free it then.
104 */
105 case ASYNC_TOKEN_DISPATCHED:
106 opal_async_tokens[token].state = ASYNC_TOKEN_ABANDONED;
107 fallthrough;
108 default:
109 rc = 1;
110 }
111 spin_unlock_irqrestore(lock: &opal_async_comp_lock, flags);
112
113 return rc;
114}
115
116int opal_async_release_token(int token)
117{
118 int ret;
119
120 ret = __opal_async_release_token(token);
121 if (!ret)
122 up(sem: &opal_async_sem);
123
124 return ret;
125}
126EXPORT_SYMBOL_GPL(opal_async_release_token);
127
128int opal_async_wait_response(uint64_t token, struct opal_msg *msg)
129{
130 if (token >= opal_max_async_tokens) {
131 pr_err("%s: Invalid token passed\n", __func__);
132 return -EINVAL;
133 }
134
135 if (!msg) {
136 pr_err("%s: Invalid message pointer passed\n", __func__);
137 return -EINVAL;
138 }
139
140 /*
141 * There is no need to mark the token as dispatched, wait_event()
142 * will block until the token completes.
143 *
144 * Wakeup the poller before we wait for events to speed things
145 * up on platforms or simulators where the interrupts aren't
146 * functional.
147 */
148 opal_wake_poller();
149 wait_event(opal_async_wait, opal_async_tokens[token].state
150 == ASYNC_TOKEN_COMPLETED);
151 memcpy(msg, &opal_async_tokens[token].response, sizeof(*msg));
152
153 return 0;
154}
155EXPORT_SYMBOL_GPL(opal_async_wait_response);
156
157int opal_async_wait_response_interruptible(uint64_t token, struct opal_msg *msg)
158{
159 unsigned long flags;
160 int ret;
161
162 if (token >= opal_max_async_tokens) {
163 pr_err("%s: Invalid token passed\n", __func__);
164 return -EINVAL;
165 }
166
167 if (!msg) {
168 pr_err("%s: Invalid message pointer passed\n", __func__);
169 return -EINVAL;
170 }
171
172 /*
173 * The first time this gets called we mark the token as DISPATCHED
174 * so that if wait_event_interruptible() returns not zero and the
175 * caller frees the token, we know not to actually free the token
176 * until the response comes.
177 *
178 * Only change if the token is ALLOCATED - it may have been
179 * completed even before the caller gets around to calling this
180 * the first time.
181 *
182 * There is also a dirty great comment at the token allocation
183 * function that if the opal call returns OPAL_ASYNC_COMPLETION to
184 * the caller then the caller *must* call this or the not
185 * interruptible version before doing anything else with the
186 * token.
187 */
188 if (opal_async_tokens[token].state == ASYNC_TOKEN_ALLOCATED) {
189 spin_lock_irqsave(&opal_async_comp_lock, flags);
190 if (opal_async_tokens[token].state == ASYNC_TOKEN_ALLOCATED)
191 opal_async_tokens[token].state = ASYNC_TOKEN_DISPATCHED;
192 spin_unlock_irqrestore(lock: &opal_async_comp_lock, flags);
193 }
194
195 /*
196 * Wakeup the poller before we wait for events to speed things
197 * up on platforms or simulators where the interrupts aren't
198 * functional.
199 */
200 opal_wake_poller();
201 ret = wait_event_interruptible(opal_async_wait,
202 opal_async_tokens[token].state ==
203 ASYNC_TOKEN_COMPLETED);
204 if (!ret)
205 memcpy(msg, &opal_async_tokens[token].response, sizeof(*msg));
206
207 return ret;
208}
209EXPORT_SYMBOL_GPL(opal_async_wait_response_interruptible);
210
211/* Called from interrupt context */
212static int opal_async_comp_event(struct notifier_block *nb,
213 unsigned long msg_type, void *msg)
214{
215 struct opal_msg *comp_msg = msg;
216 enum opal_async_token_state state;
217 unsigned long flags;
218 uint64_t token;
219
220 if (msg_type != OPAL_MSG_ASYNC_COMP)
221 return 0;
222
223 token = be64_to_cpu(comp_msg->params[0]);
224 spin_lock_irqsave(&opal_async_comp_lock, flags);
225 state = opal_async_tokens[token].state;
226 opal_async_tokens[token].state = ASYNC_TOKEN_COMPLETED;
227 spin_unlock_irqrestore(lock: &opal_async_comp_lock, flags);
228
229 if (state == ASYNC_TOKEN_ABANDONED) {
230 /* Free the token, no one else will */
231 opal_async_release_token(token);
232 return 0;
233 }
234 memcpy(&opal_async_tokens[token].response, comp_msg, sizeof(*comp_msg));
235 wake_up(&opal_async_wait);
236
237 return 0;
238}
239
240static struct notifier_block opal_async_comp_nb = {
241 .notifier_call = opal_async_comp_event,
242 .next = NULL,
243 .priority = 0,
244};
245
246int __init opal_async_comp_init(void)
247{
248 struct device_node *opal_node;
249 const __be32 *async;
250 int err;
251
252 opal_node = of_find_node_by_path(path: "/ibm,opal");
253 if (!opal_node) {
254 pr_err("%s: Opal node not found\n", __func__);
255 err = -ENOENT;
256 goto out;
257 }
258
259 async = of_get_property(node: opal_node, name: "opal-msg-async-num", NULL);
260 if (!async) {
261 pr_err("%s: %pOF has no opal-msg-async-num\n",
262 __func__, opal_node);
263 err = -ENOENT;
264 goto out_opal_node;
265 }
266
267 opal_max_async_tokens = be32_to_cpup(p: async);
268 opal_async_tokens = kcalloc(opal_max_async_tokens,
269 sizeof(*opal_async_tokens), GFP_KERNEL);
270 if (!opal_async_tokens) {
271 err = -ENOMEM;
272 goto out_opal_node;
273 }
274
275 err = opal_message_notifier_register(OPAL_MSG_ASYNC_COMP,
276 &opal_async_comp_nb);
277 if (err) {
278 pr_err("%s: Can't register OPAL event notifier (%d)\n",
279 __func__, err);
280 kfree(objp: opal_async_tokens);
281 goto out_opal_node;
282 }
283
284 sema_init(sem: &opal_async_sem, val: opal_max_async_tokens);
285
286out_opal_node:
287 of_node_put(node: opal_node);
288out:
289 return err;
290}
291

source code of linux/arch/powerpc/platforms/powernv/opal-async.c