1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef _LINUX_RANDOM_H
4#define _LINUX_RANDOM_H
5
6#include <linux/bug.h>
7#include <linux/kernel.h>
8#include <linux/list.h>
9
10#include <uapi/linux/random.h>
11
12struct notifier_block;
13
14void add_device_randomness(const void *buf, size_t len);
15void __init add_bootloader_randomness(const void *buf, size_t len);
16void add_input_randomness(unsigned int type, unsigned int code,
17 unsigned int value) __latent_entropy;
18void add_interrupt_randomness(int irq) __latent_entropy;
19void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy, bool sleep_after);
20
21static inline void add_latent_entropy(void)
22{
23#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
24 add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
25#else
26 add_device_randomness(NULL, len: 0);
27#endif
28}
29
30#if IS_ENABLED(CONFIG_VMGENID)
31void add_vmfork_randomness(const void *unique_vm_id, size_t len);
32int register_random_vmfork_notifier(struct notifier_block *nb);
33int unregister_random_vmfork_notifier(struct notifier_block *nb);
34#else
35static inline int register_random_vmfork_notifier(struct notifier_block *nb) { return 0; }
36static inline int unregister_random_vmfork_notifier(struct notifier_block *nb) { return 0; }
37#endif
38
39void get_random_bytes(void *buf, size_t len);
40u8 get_random_u8(void);
41u16 get_random_u16(void);
42u32 get_random_u32(void);
43u64 get_random_u64(void);
44static inline unsigned long get_random_long(void)
45{
46#if BITS_PER_LONG == 64
47 return get_random_u64();
48#else
49 return get_random_u32();
50#endif
51}
52
53u32 __get_random_u32_below(u32 ceil);
54
55/*
56 * Returns a random integer in the interval [0, ceil), with uniform
57 * distribution, suitable for all uses. Fastest when ceil is a constant, but
58 * still fast for variable ceil as well.
59 */
60static inline u32 get_random_u32_below(u32 ceil)
61{
62 if (!__builtin_constant_p(ceil))
63 return __get_random_u32_below(ceil);
64
65 /*
66 * For the fast path, below, all operations on ceil are precomputed by
67 * the compiler, so this incurs no overhead for checking pow2, doing
68 * divisions, or branching based on integer size. The resultant
69 * algorithm does traditional reciprocal multiplication (typically
70 * optimized by the compiler into shifts and adds), rejecting samples
71 * whose lower half would indicate a range indivisible by ceil.
72 */
73 BUILD_BUG_ON_MSG(!ceil, "get_random_u32_below() must take ceil > 0");
74 if (ceil <= 1)
75 return 0;
76 for (;;) {
77 if (ceil <= 1U << 8) {
78 u32 mult = ceil * get_random_u8();
79 if (likely(is_power_of_2(ceil) || (u8)mult >= (1U << 8) % ceil))
80 return mult >> 8;
81 } else if (ceil <= 1U << 16) {
82 u32 mult = ceil * get_random_u16();
83 if (likely(is_power_of_2(ceil) || (u16)mult >= (1U << 16) % ceil))
84 return mult >> 16;
85 } else {
86 u64 mult = (u64)ceil * get_random_u32();
87 if (likely(is_power_of_2(ceil) || (u32)mult >= -ceil % ceil))
88 return mult >> 32;
89 }
90 }
91}
92
93/*
94 * Returns a random integer in the interval (floor, U32_MAX], with uniform
95 * distribution, suitable for all uses. Fastest when floor is a constant, but
96 * still fast for variable floor as well.
97 */
98static inline u32 get_random_u32_above(u32 floor)
99{
100 BUILD_BUG_ON_MSG(__builtin_constant_p(floor) && floor == U32_MAX,
101 "get_random_u32_above() must take floor < U32_MAX");
102 return floor + 1 + get_random_u32_below(U32_MAX - floor);
103}
104
105/*
106 * Returns a random integer in the interval [floor, ceil], with uniform
107 * distribution, suitable for all uses. Fastest when floor and ceil are
108 * constant, but still fast for variable floor and ceil as well.
109 */
110static inline u32 get_random_u32_inclusive(u32 floor, u32 ceil)
111{
112 BUILD_BUG_ON_MSG(__builtin_constant_p(floor) && __builtin_constant_p(ceil) &&
113 (floor > ceil || ceil - floor == U32_MAX),
114 "get_random_u32_inclusive() must take floor <= ceil");
115 return floor + get_random_u32_below(ceil: ceil - floor + 1);
116}
117
118void __init random_init_early(const char *command_line);
119void __init random_init(void);
120bool rng_is_initialized(void);
121int wait_for_random_bytes(void);
122int execute_with_initialized_rng(struct notifier_block *nb);
123
124/* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes).
125 * Returns the result of the call to wait_for_random_bytes. */
126static inline int get_random_bytes_wait(void *buf, size_t nbytes)
127{
128 int ret = wait_for_random_bytes();
129 get_random_bytes(buf, len: nbytes);
130 return ret;
131}
132
133#define declare_get_random_var_wait(name, ret_type) \
134 static inline int get_random_ ## name ## _wait(ret_type *out) { \
135 int ret = wait_for_random_bytes(); \
136 if (unlikely(ret)) \
137 return ret; \
138 *out = get_random_ ## name(); \
139 return 0; \
140 }
141declare_get_random_var_wait(u8, u8)
142declare_get_random_var_wait(u16, u16)
143declare_get_random_var_wait(u32, u32)
144declare_get_random_var_wait(u64, u32)
145declare_get_random_var_wait(long, unsigned long)
146#undef declare_get_random_var
147
148/*
149 * This is designed to be standalone for just prandom
150 * users, but for now we include it from <linux/random.h>
151 * for legacy reasons.
152 */
153#include <linux/prandom.h>
154
155#ifdef CONFIG_SMP
156int random_prepare_cpu(unsigned int cpu);
157int random_online_cpu(unsigned int cpu);
158#endif
159
160#ifndef MODULE
161extern const struct file_operations random_fops, urandom_fops;
162#endif
163
164#endif /* _LINUX_RANDOM_H */
165

source code of linux/include/linux/random.h