1 | #ifndef INTERNAL_IO_WQ_H |
2 | #define INTERNAL_IO_WQ_H |
3 | |
4 | #include <linux/refcount.h> |
5 | #include <linux/io_uring_types.h> |
6 | |
7 | struct io_wq; |
8 | |
9 | enum { |
10 | IO_WQ_WORK_CANCEL = 1, |
11 | IO_WQ_WORK_HASHED = 2, |
12 | IO_WQ_WORK_UNBOUND = 4, |
13 | IO_WQ_WORK_CONCURRENT = 16, |
14 | |
15 | IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */ |
16 | }; |
17 | |
18 | enum io_wq_cancel { |
19 | IO_WQ_CANCEL_OK, /* cancelled before started */ |
20 | IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */ |
21 | IO_WQ_CANCEL_NOTFOUND, /* work not found */ |
22 | }; |
23 | |
24 | typedef struct io_wq_work *(free_work_fn)(struct io_wq_work *); |
25 | typedef void (io_wq_work_fn)(struct io_wq_work *); |
26 | |
27 | struct io_wq_hash { |
28 | refcount_t refs; |
29 | unsigned long map; |
30 | struct wait_queue_head wait; |
31 | }; |
32 | |
33 | static inline void io_wq_put_hash(struct io_wq_hash *hash) |
34 | { |
35 | if (refcount_dec_and_test(r: &hash->refs)) |
36 | kfree(objp: hash); |
37 | } |
38 | |
39 | struct io_wq_data { |
40 | struct io_wq_hash *hash; |
41 | struct task_struct *task; |
42 | io_wq_work_fn *do_work; |
43 | free_work_fn *free_work; |
44 | }; |
45 | |
46 | struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data); |
47 | void io_wq_exit_start(struct io_wq *wq); |
48 | void io_wq_put_and_exit(struct io_wq *wq); |
49 | |
50 | void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work); |
51 | void io_wq_hash_work(struct io_wq_work *work, void *val); |
52 | |
53 | int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask); |
54 | int io_wq_max_workers(struct io_wq *wq, int *new_count); |
55 | bool io_wq_worker_stopped(void); |
56 | |
57 | static inline bool io_wq_is_hashed(struct io_wq_work *work) |
58 | { |
59 | return work->flags & IO_WQ_WORK_HASHED; |
60 | } |
61 | |
62 | typedef bool (work_cancel_fn)(struct io_wq_work *, void *); |
63 | |
64 | enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel, |
65 | void *data, bool cancel_all); |
66 | |
67 | #if defined(CONFIG_IO_WQ) |
68 | extern void io_wq_worker_sleeping(struct task_struct *); |
69 | extern void io_wq_worker_running(struct task_struct *); |
70 | #else |
71 | static inline void io_wq_worker_sleeping(struct task_struct *tsk) |
72 | { |
73 | } |
74 | static inline void io_wq_worker_running(struct task_struct *tsk) |
75 | { |
76 | } |
77 | #endif |
78 | |
79 | static inline bool io_wq_current_is_worker(void) |
80 | { |
81 | return in_task() && (current->flags & PF_IO_WORKER) && |
82 | current->worker_private; |
83 | } |
84 | #endif |
85 | |