1//===-- Shared memory RPC client / server interface -------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements a remote procedure call mechanism to communicate between
10// heterogeneous devices that can share an address space atomically. We provide
11// a client and a server to facilitate the remote call. The client makes request
12// to the server using a shared communication channel. We use separate atomic
13// signals to indicate which side, the client or the server is in ownership of
14// the buffer.
15//
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_LIBC_SRC___SUPPORT_RPC_RPC_H
19#define LLVM_LIBC_SRC___SUPPORT_RPC_RPC_H
20
21#include "rpc_util.h"
22#include "src/__support/CPP/algorithm.h" // max
23#include "src/__support/CPP/atomic.h"
24#include "src/__support/CPP/functional.h"
25#include "src/__support/CPP/optional.h"
26#include "src/__support/GPU/utils.h"
27
28#include <stdint.h>
29
30namespace LIBC_NAMESPACE {
31namespace rpc {
32
33/// A fixed size channel used to communicate between the RPC client and server.
34struct Buffer {
35 uint64_t data[8];
36};
37static_assert(sizeof(Buffer) == 64, "Buffer size mismatch");
38
39/// The information associated with a packet. This indicates which operations to
40/// perform and which threads are active in the slots.
41struct Header {
42 uint64_t mask;
43 uint16_t opcode;
44};
45
46/// The maximum number of parallel ports that the RPC interface can support.
47constexpr uint64_t MAX_PORT_COUNT = 4096;
48
49/// A common process used to synchronize communication between a client and a
50/// server. The process contains a read-only inbox and a write-only outbox used
51/// for signaling ownership of the shared buffer between both sides. We assign
52/// ownership of the buffer to the client if the inbox and outbox bits match,
53/// otherwise it is owned by the server.
54///
55/// This process is designed to allow the client and the server to exchange data
56/// using a fixed size packet in a mostly arbitrary order using the 'send' and
57/// 'recv' operations. The following restrictions to this scheme apply:
58/// - The client will always start with a 'send' operation.
59/// - The server will always start with a 'recv' operation.
60/// - Every 'send' or 'recv' call is mirrored by the other process.
61template <bool Invert> struct Process {
62 LIBC_INLINE Process() = default;
63 LIBC_INLINE Process(const Process &) = delete;
64 LIBC_INLINE Process &operator=(const Process &) = delete;
65 LIBC_INLINE Process(Process &&) = default;
66 LIBC_INLINE Process &operator=(Process &&) = default;
67 LIBC_INLINE ~Process() = default;
68
69 uint32_t port_count = 0;
70 cpp::Atomic<uint32_t> *inbox = nullptr;
71 cpp::Atomic<uint32_t> *outbox = nullptr;
72 Header *header = nullptr;
73 Buffer *packet = nullptr;
74
75 static constexpr uint64_t NUM_BITS_IN_WORD = sizeof(uint32_t) * 8;
76 cpp::Atomic<uint32_t> lock[MAX_PORT_COUNT / NUM_BITS_IN_WORD] = {0};
77
78 LIBC_INLINE Process(uint32_t port_count, void *buffer)
79 : port_count(port_count), inbox(reinterpret_cast<cpp::Atomic<uint32_t> *>(
80 advance(ptr: buffer, bytes: inbox_offset(port_count)))),
81 outbox(reinterpret_cast<cpp::Atomic<uint32_t> *>(
82 advance(ptr: buffer, bytes: outbox_offset(port_count)))),
83 header(reinterpret_cast<Header *>(
84 advance(ptr: buffer, bytes: header_offset(port_count)))),
85 packet(reinterpret_cast<Buffer *>(
86 advance(ptr: buffer, bytes: buffer_offset(port_count)))) {}
87
88 /// Allocate a memory buffer sufficient to store the following equivalent
89 /// representation in memory.
90 ///
91 /// struct Equivalent {
92 /// Atomic<uint32_t> primary[port_count];
93 /// Atomic<uint32_t> secondary[port_count];
94 /// Header header[port_count];
95 /// Buffer packet[port_count][lane_size];
96 /// };
97 LIBC_INLINE static constexpr uint64_t allocation_size(uint32_t port_count,
98 uint32_t lane_size) {
99 return buffer_offset(port_count) + buffer_bytes(port_count, lane_size);
100 }
101
102 /// Retrieve the inbox state from memory shared between processes.
103 LIBC_INLINE uint32_t load_inbox(uint64_t lane_mask, uint32_t index) const {
104 return gpu::broadcast_value(
105 lane_mask,
106 x: inbox[index].load(mem_ord: cpp::MemoryOrder::RELAXED, mem_scope: cpp::MemoryScope::SYSTEM));
107 }
108
109 /// Retrieve the outbox state from memory shared between processes.
110 LIBC_INLINE uint32_t load_outbox(uint64_t lane_mask, uint32_t index) const {
111 return gpu::broadcast_value(lane_mask,
112 x: outbox[index].load(mem_ord: cpp::MemoryOrder::RELAXED,
113 mem_scope: cpp::MemoryScope::SYSTEM));
114 }
115
116 /// Signal to the other process that this one is finished with the buffer.
117 /// Equivalent to loading outbox followed by store of the inverted value
118 /// The outbox is write only by this warp and tracking the value locally is
119 /// cheaper than calling load_outbox to get the value to store.
120 LIBC_INLINE uint32_t invert_outbox(uint32_t index, uint32_t current_outbox) {
121 uint32_t inverted_outbox = !current_outbox;
122 atomic_thread_fence(mem_ord: cpp::MemoryOrder::RELEASE);
123 outbox[index].store(rhs: inverted_outbox, mem_ord: cpp::MemoryOrder::RELAXED,
124 mem_scope: cpp::MemoryScope::SYSTEM);
125 return inverted_outbox;
126 }
127
128 // Given the current outbox and inbox values, wait until the inbox changes
129 // to indicate that this thread owns the buffer element.
130 LIBC_INLINE void wait_for_ownership(uint64_t lane_mask, uint32_t index,
131 uint32_t outbox, uint32_t in) {
132 while (buffer_unavailable(in, out: outbox)) {
133 sleep_briefly();
134 in = load_inbox(lane_mask, index);
135 }
136 atomic_thread_fence(mem_ord: cpp::MemoryOrder::ACQUIRE);
137 }
138
139 /// The packet is a linearly allocated array of buffers used to communicate
140 /// with the other process. This function returns the appropriate slot in this
141 /// array such that the process can operate on an entire warp or wavefront.
142 LIBC_INLINE Buffer *get_packet(uint32_t index, uint32_t lane_size) {
143 return &packet[index * lane_size];
144 }
145
146 /// Determines if this process needs to wait for ownership of the buffer. We
147 /// invert the condition on one of the processes to indicate that if one
148 /// process owns the buffer then the other does not.
149 LIBC_INLINE static bool buffer_unavailable(uint32_t in, uint32_t out) {
150 bool cond = in != out;
151 return Invert ? !cond : cond;
152 }
153
154 /// Attempt to claim the lock at index. Return true on lock taken.
155 /// lane_mask is a bitmap of the threads in the warp that would hold the
156 /// single lock on success, e.g. the result of gpu::get_lane_mask()
157 /// The lock is held when the n-th bit of the lock bitfield is set.
158 [[clang::convergent]] LIBC_INLINE bool try_lock(uint64_t lane_mask,
159 uint32_t index) {
160 // On amdgpu, test and set to the nth lock bit and a sync_lane would suffice
161 // On volta, need to handle differences between the threads running and
162 // the threads that were detected in the previous call to get_lane_mask()
163 //
164 // All threads in lane_mask try to claim the lock. At most one can succeed.
165 // There may be threads active which are not in lane mask which must not
166 // succeed in taking the lock, as otherwise it will leak. This is handled
167 // by making threads which are not in lane_mask or with 0, a no-op.
168 uint32_t id = gpu::get_lane_id();
169 bool id_in_lane_mask = lane_mask & (1ul << id);
170
171 // All threads in the warp call fetch_or. Possibly at the same time.
172 bool before = set_nth(bits: lock, index, cond: id_in_lane_mask);
173 uint64_t packed = gpu::ballot(lane_mask, x: before);
174
175 // If every bit set in lane_mask is also set in packed, every single thread
176 // in the warp failed to get the lock. Ballot returns unset for threads not
177 // in the lane mask.
178 //
179 // Cases, per thread:
180 // mask==0 -> unspecified before, discarded by ballot -> 0
181 // mask==1 and before==0 (success), set zero by ballot -> 0
182 // mask==1 and before==1 (failure), set one by ballot -> 1
183 //
184 // mask != packed implies at least one of the threads got the lock
185 // atomic semantics of fetch_or mean at most one of the threads for the lock
186
187 // If holding the lock then the caller can load values knowing said loads
188 // won't move past the lock. No such guarantee is needed if the lock acquire
189 // failed. This conditional branch is expected to fold in the caller after
190 // inlining the current function.
191 bool holding_lock = lane_mask != packed;
192 if (holding_lock)
193 atomic_thread_fence(mem_ord: cpp::MemoryOrder::ACQUIRE);
194 return holding_lock;
195 }
196
197 /// Unlock the lock at index. We need a lane sync to keep this function
198 /// convergent, otherwise the compiler will sink the store and deadlock.
199 [[clang::convergent]] LIBC_INLINE void unlock(uint64_t lane_mask,
200 uint32_t index) {
201 // Do not move any writes past the unlock.
202 atomic_thread_fence(mem_ord: cpp::MemoryOrder::RELEASE);
203
204 // Use exactly one thread to clear the nth bit in the lock array Must
205 // restrict to a single thread to avoid one thread dropping the lock, then
206 // an unrelated warp claiming the lock, then a second thread in this warp
207 // dropping the lock again.
208 clear_nth(bits: lock, index, cond: gpu::is_first_lane(lane_mask));
209 gpu::sync_lane(lane_mask);
210 }
211
212 /// Number of bytes to allocate for an inbox or outbox.
213 LIBC_INLINE static constexpr uint64_t mailbox_bytes(uint32_t port_count) {
214 return port_count * sizeof(cpp::Atomic<uint32_t>);
215 }
216
217 /// Number of bytes to allocate for the buffer containing the packets.
218 LIBC_INLINE static constexpr uint64_t buffer_bytes(uint32_t port_count,
219 uint32_t lane_size) {
220 return port_count * lane_size * sizeof(Buffer);
221 }
222
223 /// Offset of the inbox in memory. This is the same as the outbox if inverted.
224 LIBC_INLINE static constexpr uint64_t inbox_offset(uint32_t port_count) {
225 return Invert ? mailbox_bytes(port_count) : 0;
226 }
227
228 /// Offset of the outbox in memory. This is the same as the inbox if inverted.
229 LIBC_INLINE static constexpr uint64_t outbox_offset(uint32_t port_count) {
230 return Invert ? 0 : mailbox_bytes(port_count);
231 }
232
233 /// Offset of the buffer containing the packets after the inbox and outbox.
234 LIBC_INLINE static constexpr uint64_t header_offset(uint32_t port_count) {
235 return align_up(val: 2 * mailbox_bytes(port_count), align: alignof(Header));
236 }
237
238 /// Offset of the buffer containing the packets after the inbox and outbox.
239 LIBC_INLINE static constexpr uint64_t buffer_offset(uint32_t port_count) {
240 return align_up(val: header_offset(port_count) + port_count * sizeof(Header),
241 align: alignof(Buffer));
242 }
243
244 /// Conditionally set the n-th bit in the atomic bitfield.
245 LIBC_INLINE static constexpr uint32_t set_nth(cpp::Atomic<uint32_t> *bits,
246 uint32_t index, bool cond) {
247 uint32_t slot = index / NUM_BITS_IN_WORD;
248 uint32_t bit = index % NUM_BITS_IN_WORD;
249 return bits[slot].fetch_or(mask: static_cast<uint32_t>(cond) << bit,
250 mem_ord: cpp::MemoryOrder::RELAXED,
251 mem_scope: cpp::MemoryScope::DEVICE) &
252 (1u << bit);
253 }
254
255 /// Conditionally clear the n-th bit in the atomic bitfield.
256 LIBC_INLINE static constexpr uint32_t clear_nth(cpp::Atomic<uint32_t> *bits,
257 uint32_t index, bool cond) {
258 uint32_t slot = index / NUM_BITS_IN_WORD;
259 uint32_t bit = index % NUM_BITS_IN_WORD;
260 return bits[slot].fetch_and(mask: ~0u ^ (static_cast<uint32_t>(cond) << bit),
261 mem_ord: cpp::MemoryOrder::RELAXED,
262 mem_scope: cpp::MemoryScope::DEVICE) &
263 (1u << bit);
264 }
265};
266
267/// Invokes a function accross every active buffer across the total lane size.
268static LIBC_INLINE void invoke_rpc(cpp::function<void(Buffer *)> fn,
269 uint32_t lane_size, uint64_t lane_mask,
270 Buffer *slot) {
271 if constexpr (is_process_gpu()) {
272 fn(&slot[gpu::get_lane_id()]);
273 } else {
274 for (uint32_t i = 0; i < lane_size; i += gpu::get_lane_size())
275 if (lane_mask & (1ul << i))
276 fn(&slot[i]);
277 }
278}
279
280/// Alternate version that also provides the index of the current lane.
281static LIBC_INLINE void invoke_rpc(cpp::function<void(Buffer *, uint32_t)> fn,
282 uint32_t lane_size, uint64_t lane_mask,
283 Buffer *slot) {
284 if constexpr (is_process_gpu()) {
285 fn(&slot[gpu::get_lane_id()], gpu::get_lane_id());
286 } else {
287 for (uint32_t i = 0; i < lane_size; i += gpu::get_lane_size())
288 if (lane_mask & (1ul << i))
289 fn(&slot[i], i);
290 }
291}
292
293/// The port provides the interface to communicate between the multiple
294/// processes. A port is conceptually an index into the memory provided by the
295/// underlying process that is guarded by a lock bit.
296template <bool T> struct Port {
297 LIBC_INLINE Port(Process<T> &process, uint64_t lane_mask, uint32_t lane_size,
298 uint32_t index, uint32_t out)
299 : process(process), lane_mask(lane_mask), lane_size(lane_size),
300 index(index), out(out), receive(false), owns_buffer(true) {}
301 LIBC_INLINE ~Port() = default;
302
303private:
304 LIBC_INLINE Port(const Port &) = delete;
305 LIBC_INLINE Port &operator=(const Port &) = delete;
306 LIBC_INLINE Port(Port &&) = default;
307 LIBC_INLINE Port &operator=(Port &&) = default;
308
309 friend struct Client;
310 friend struct Server;
311 friend class cpp::optional<Port<T>>;
312
313public:
314 template <typename U> LIBC_INLINE void recv(U use);
315 template <typename F> LIBC_INLINE void send(F fill);
316 template <typename F, typename U>
317 LIBC_INLINE void send_and_recv(F fill, U use);
318 template <typename W> LIBC_INLINE void recv_and_send(W work);
319 LIBC_INLINE void send_n(const void *const *src, uint64_t *size);
320 LIBC_INLINE void send_n(const void *src, uint64_t size);
321 template <typename A>
322 LIBC_INLINE void recv_n(void **dst, uint64_t *size, A &&alloc);
323
324 LIBC_INLINE uint16_t get_opcode() const {
325 return process.header[index].opcode;
326 }
327
328 LIBC_INLINE uint16_t get_index() const { return index; }
329
330 LIBC_INLINE void close() {
331 // Wait for all lanes to finish using the port.
332 gpu::sync_lane(lane_mask);
333
334 // The server is passive, if it own the buffer when it closes we need to
335 // give ownership back to the client.
336 if (owns_buffer && T)
337 out = process.invert_outbox(index, out);
338 process.unlock(lane_mask, index);
339 }
340
341private:
342 Process<T> &process;
343 uint64_t lane_mask;
344 uint32_t lane_size;
345 uint32_t index;
346 uint32_t out;
347 bool receive;
348 bool owns_buffer;
349};
350
351/// The RPC client used to make requests to the server.
352struct Client {
353 LIBC_INLINE Client() = default;
354 LIBC_INLINE Client(const Client &) = delete;
355 LIBC_INLINE Client &operator=(const Client &) = delete;
356 LIBC_INLINE ~Client() = default;
357
358 LIBC_INLINE Client(uint32_t port_count, void *buffer)
359 : process(port_count, buffer) {}
360
361 using Port = rpc::Port<false>;
362 template <uint16_t opcode> LIBC_INLINE Port open();
363
364private:
365 Process<false> process;
366};
367static_assert(cpp::is_trivially_copyable<Client>::value &&
368 sizeof(Process<true>) == sizeof(Process<false>),
369 "The client is not trivially copyable from the server");
370
371/// The RPC server used to respond to the client.
372struct Server {
373 LIBC_INLINE Server() = default;
374 LIBC_INLINE Server(const Server &) = delete;
375 LIBC_INLINE Server &operator=(const Server &) = delete;
376 LIBC_INLINE ~Server() = default;
377
378 LIBC_INLINE Server(uint32_t port_count, void *buffer)
379 : process(port_count, buffer) {}
380
381 using Port = rpc::Port<true>;
382 LIBC_INLINE cpp::optional<Port> try_open(uint32_t lane_size,
383 uint32_t start = 0);
384 LIBC_INLINE Port open(uint32_t lane_size);
385
386 LIBC_INLINE static uint64_t allocation_size(uint32_t lane_size,
387 uint32_t port_count) {
388 return Process<true>::allocation_size(port_count, lane_size);
389 }
390
391private:
392 Process<true> process;
393};
394
395/// Applies \p fill to the shared buffer and initiates a send operation.
396template <bool T> template <typename F> LIBC_INLINE void Port<T>::send(F fill) {
397 uint32_t in = owns_buffer ? out ^ T : process.load_inbox(lane_mask, index);
398
399 // We need to wait until we own the buffer before sending.
400 process.wait_for_ownership(lane_mask, index, out, in);
401
402 // Apply the \p fill function to initialize the buffer and release the memory.
403 invoke_rpc(fill, lane_size, process.header[index].mask,
404 process.get_packet(index, lane_size));
405 out = process.invert_outbox(index, out);
406 owns_buffer = false;
407 receive = false;
408}
409
410/// Applies \p use to the shared buffer and acknowledges the send.
411template <bool T> template <typename U> LIBC_INLINE void Port<T>::recv(U use) {
412 // We only exchange ownership of the buffer during a receive if we are waiting
413 // for a previous receive to finish.
414 if (receive) {
415 out = process.invert_outbox(index, out);
416 owns_buffer = false;
417 }
418
419 uint32_t in = owns_buffer ? out ^ T : process.load_inbox(lane_mask, index);
420
421 // We need to wait until we own the buffer before receiving.
422 process.wait_for_ownership(lane_mask, index, out, in);
423
424 // Apply the \p use function to read the memory out of the buffer.
425 invoke_rpc(use, lane_size, process.header[index].mask,
426 process.get_packet(index, lane_size));
427 receive = true;
428 owns_buffer = true;
429}
430
431/// Combines a send and receive into a single function.
432template <bool T>
433template <typename F, typename U>
434LIBC_INLINE void Port<T>::send_and_recv(F fill, U use) {
435 send(fill);
436 recv(use);
437}
438
439/// Combines a receive and send operation into a single function. The \p work
440/// function modifies the buffer in-place and the send is only used to initiate
441/// the copy back.
442template <bool T>
443template <typename W>
444LIBC_INLINE void Port<T>::recv_and_send(W work) {
445 recv(work);
446 send([](Buffer *) { /* no-op */ });
447}
448
449/// Helper routine to simplify the interface when sending from the GPU using
450/// thread private pointers to the underlying value.
451template <bool T>
452LIBC_INLINE void Port<T>::send_n(const void *src, uint64_t size) {
453 const void **src_ptr = &src;
454 uint64_t *size_ptr = &size;
455 send_n(src_ptr, size_ptr);
456}
457
458/// Sends an arbitrarily sized data buffer \p src across the shared channel in
459/// multiples of the packet length.
460template <bool T>
461LIBC_INLINE void Port<T>::send_n(const void *const *src, uint64_t *size) {
462 uint64_t num_sends = 0;
463 send([&](Buffer *buffer, uint32_t id) {
464 reinterpret_cast<uint64_t *>(buffer->data)[0] = lane_value(val: size, id);
465 num_sends = is_process_gpu() ? lane_value(val: size, id)
466 : cpp::max(a: lane_value(val: size, id), b: num_sends);
467 uint64_t len =
468 lane_value(val: size, id) > sizeof(Buffer::data) - sizeof(uint64_t)
469 ? sizeof(Buffer::data) - sizeof(uint64_t)
470 : lane_value(val: size, id);
471 rpc_memcpy(dst: &buffer->data[1], src: lane_value(val: src, id), count: len);
472 });
473 uint64_t idx = sizeof(Buffer::data) - sizeof(uint64_t);
474 uint64_t mask = process.header[index].mask;
475 while (gpu::ballot(mask, x: idx < num_sends)) {
476 send([=](Buffer *buffer, uint32_t id) {
477 uint64_t len = lane_value(val: size, id) - idx > sizeof(Buffer::data)
478 ? sizeof(Buffer::data)
479 : lane_value(val: size, id) - idx;
480 if (idx < lane_value(val: size, id))
481 rpc_memcpy(dst: buffer->data, src: advance(ptr: lane_value(val: src, id), bytes: idx), count: len);
482 });
483 idx += sizeof(Buffer::data);
484 }
485}
486
487/// Receives an arbitrarily sized data buffer across the shared channel in
488/// multiples of the packet length. The \p alloc function is called with the
489/// size of the data so that we can initialize the size of the \p dst buffer.
490template <bool T>
491template <typename A>
492LIBC_INLINE void Port<T>::recv_n(void **dst, uint64_t *size, A &&alloc) {
493 uint64_t num_recvs = 0;
494 recv([&](Buffer *buffer, uint32_t id) {
495 lane_value(val: size, id) = reinterpret_cast<uint64_t *>(buffer->data)[0];
496 lane_value(val: dst, id) =
497 reinterpret_cast<uint8_t *>(alloc(lane_value(val: size, id)));
498 num_recvs = is_process_gpu() ? lane_value(val: size, id)
499 : cpp::max(a: lane_value(val: size, id), b: num_recvs);
500 uint64_t len =
501 lane_value(val: size, id) > sizeof(Buffer::data) - sizeof(uint64_t)
502 ? sizeof(Buffer::data) - sizeof(uint64_t)
503 : lane_value(val: size, id);
504 rpc_memcpy(dst: lane_value(val: dst, id), src: &buffer->data[1], count: len);
505 });
506 uint64_t idx = sizeof(Buffer::data) - sizeof(uint64_t);
507 uint64_t mask = process.header[index].mask;
508 while (gpu::ballot(mask, x: idx < num_recvs)) {
509 recv([=](Buffer *buffer, uint32_t id) {
510 uint64_t len = lane_value(val: size, id) - idx > sizeof(Buffer::data)
511 ? sizeof(Buffer::data)
512 : lane_value(val: size, id) - idx;
513 if (idx < lane_value(val: size, id))
514 rpc_memcpy(dst: advance(ptr: lane_value(val: dst, id), bytes: idx), src: buffer->data, count: len);
515 });
516 idx += sizeof(Buffer::data);
517 }
518}
519
520/// Continually attempts to open a port to use as the client. The client can
521/// only open a port if we find an index that is in a valid sending state. That
522/// is, there are send operations pending that haven't been serviced on this
523/// port. Each port instance uses an associated \p opcode to tell the server
524/// what to do. The Client interface provides the appropriate lane size to the
525/// port using the platform's returned value.
526template <uint16_t opcode>
527[[clang::convergent]] LIBC_INLINE Client::Port Client::open() {
528 // Repeatedly perform a naive linear scan for a port that can be opened to
529 // send data.
530 for (uint32_t index = gpu::get_cluster_id();; ++index) {
531 // Start from the beginning if we run out of ports to check.
532 if (index >= process.port_count)
533 index = 0;
534
535 // Attempt to acquire the lock on this index.
536 uint64_t lane_mask = gpu::get_lane_mask();
537 if (!process.try_lock(lane_mask, index))
538 continue;
539
540 uint32_t in = process.load_inbox(lane_mask, index);
541 uint32_t out = process.load_outbox(lane_mask, index);
542
543 // Once we acquire the index we need to check if we are in a valid sending
544 // state.
545 if (process.buffer_unavailable(in, out)) {
546 process.unlock(lane_mask, index);
547 continue;
548 }
549
550 if (gpu::is_first_lane(lane_mask)) {
551 process.header[index].opcode = opcode;
552 process.header[index].mask = lane_mask;
553 }
554 gpu::sync_lane(lane_mask);
555 return Port(process, lane_mask, gpu::get_lane_size(), index, out);
556 }
557}
558
559/// Attempts to open a port to use as the server. The server can only open a
560/// port if it has a pending receive operation
561[[clang::convergent]] LIBC_INLINE cpp::optional<typename Server::Port>
562Server::try_open(uint32_t lane_size, uint32_t start) {
563 // Perform a naive linear scan for a port that has a pending request.
564 for (uint32_t index = start; index < process.port_count; ++index) {
565 uint64_t lane_mask = gpu::get_lane_mask();
566 uint32_t in = process.load_inbox(lane_mask, index);
567 uint32_t out = process.load_outbox(lane_mask, index);
568
569 // The server is passive, if there is no work pending don't bother
570 // opening a port.
571 if (process.buffer_unavailable(in, out))
572 continue;
573
574 // Attempt to acquire the lock on this index.
575 if (!process.try_lock(lane_mask, index))
576 continue;
577
578 in = process.load_inbox(lane_mask, index);
579 out = process.load_outbox(lane_mask, index);
580
581 if (process.buffer_unavailable(in, out)) {
582 process.unlock(lane_mask, index);
583 continue;
584 }
585
586 return Port(process, lane_mask, lane_size, index, out);
587 }
588 return cpp::nullopt;
589}
590
591LIBC_INLINE Server::Port Server::open(uint32_t lane_size) {
592 for (;;) {
593 if (cpp::optional<Server::Port> p = try_open(lane_size))
594 return cpp::move(t&: p.value());
595 sleep_briefly();
596 }
597}
598
599} // namespace rpc
600} // namespace LIBC_NAMESPACE
601
602#endif
603

source code of libc/src/__support/RPC/rpc.h