1 | #ifndef INTERNAL_IO_WQ_H |
2 | #define INTERNAL_IO_WQ_H |
3 | |
4 | #include <linux/refcount.h> |
5 | #include <linux/io_uring_types.h> |
6 | |
7 | struct io_wq; |
8 | |
9 | enum { |
10 | IO_WQ_WORK_CANCEL = 1, |
11 | IO_WQ_WORK_HASHED = 2, |
12 | IO_WQ_WORK_UNBOUND = 4, |
13 | IO_WQ_WORK_CONCURRENT = 16, |
14 | |
15 | IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */ |
16 | }; |
17 | |
18 | enum io_wq_cancel { |
19 | IO_WQ_CANCEL_OK, /* cancelled before started */ |
20 | IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */ |
21 | IO_WQ_CANCEL_NOTFOUND, /* work not found */ |
22 | }; |
23 | |
24 | struct io_wq_hash { |
25 | refcount_t refs; |
26 | unsigned long map; |
27 | struct wait_queue_head wait; |
28 | }; |
29 | |
30 | static inline void io_wq_put_hash(struct io_wq_hash *hash) |
31 | { |
32 | if (refcount_dec_and_test(r: &hash->refs)) |
33 | kfree(objp: hash); |
34 | } |
35 | |
36 | struct io_wq_data { |
37 | struct io_wq_hash *hash; |
38 | struct task_struct *task; |
39 | }; |
40 | |
41 | struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data); |
42 | void io_wq_exit_start(struct io_wq *wq); |
43 | void io_wq_put_and_exit(struct io_wq *wq); |
44 | |
45 | void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work); |
46 | void io_wq_hash_work(struct io_wq_work *work, void *val); |
47 | |
48 | int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask); |
49 | int io_wq_max_workers(struct io_wq *wq, int *new_count); |
50 | bool io_wq_worker_stopped(void); |
51 | |
52 | static inline bool __io_wq_is_hashed(unsigned int work_flags) |
53 | { |
54 | return work_flags & IO_WQ_WORK_HASHED; |
55 | } |
56 | |
57 | static inline bool io_wq_is_hashed(struct io_wq_work *work) |
58 | { |
59 | return __io_wq_is_hashed(work_flags: atomic_read(v: &work->flags)); |
60 | } |
61 | |
62 | typedef bool (work_cancel_fn)(struct io_wq_work *, void *); |
63 | |
64 | enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, |
65 | void *data, bool cancel_all); |
66 | |
67 | #if defined(CONFIG_IO_WQ) |
68 | extern void io_wq_worker_sleeping(struct task_struct *); |
69 | extern void io_wq_worker_running(struct task_struct *); |
70 | #else |
71 | static inline void io_wq_worker_sleeping(struct task_struct *tsk) |
72 | { |
73 | } |
74 | static inline void io_wq_worker_running(struct task_struct *tsk) |
75 | { |
76 | } |
77 | #endif |
78 | |
79 | static inline bool io_wq_current_is_worker(void) |
80 | { |
81 | return in_task() && (current->flags & PF_IO_WORKER) && |
82 | current->worker_private; |
83 | } |
84 | #endif |
85 | |