1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef IOCONTEXT_H |
3 | #define IOCONTEXT_H |
4 | |
5 | #include <linux/radix-tree.h> |
6 | #include <linux/rcupdate.h> |
7 | #include <linux/workqueue.h> |
8 | |
9 | enum { |
10 | ICQ_EXITED = 1 << 2, |
11 | ICQ_DESTROYED = 1 << 3, |
12 | }; |
13 | |
14 | /* |
15 | * An io_cq (icq) is association between an io_context (ioc) and a |
16 | * request_queue (q). This is used by elevators which need to track |
17 | * information per ioc - q pair. |
18 | * |
19 | * Elevator can request use of icq by setting elevator_type->icq_size and |
20 | * ->icq_align. Both size and align must be larger than that of struct |
21 | * io_cq and elevator can use the tail area for private information. The |
22 | * recommended way to do this is defining a struct which contains io_cq as |
23 | * the first member followed by private members and using its size and |
24 | * align. For example, |
25 | * |
26 | * struct snail_io_cq { |
27 | * struct io_cq icq; |
28 | * int poke_snail; |
29 | * int feed_snail; |
30 | * }; |
31 | * |
32 | * struct elevator_type snail_elv_type { |
33 | * .ops = { ... }, |
34 | * .icq_size = sizeof(struct snail_io_cq), |
35 | * .icq_align = __alignof__(struct snail_io_cq), |
36 | * ... |
37 | * }; |
38 | * |
39 | * If icq_size is set, block core will manage icq's. All requests will |
40 | * have its ->elv.icq field set before elevator_ops->elevator_set_req_fn() |
41 | * is called and be holding a reference to the associated io_context. |
42 | * |
43 | * Whenever a new icq is created, elevator_ops->elevator_init_icq_fn() is |
44 | * called and, on destruction, ->elevator_exit_icq_fn(). Both functions |
45 | * are called with both the associated io_context and queue locks held. |
46 | * |
47 | * Elevator is allowed to lookup icq using ioc_lookup_icq() while holding |
48 | * queue lock but the returned icq is valid only until the queue lock is |
49 | * released. Elevators can not and should not try to create or destroy |
50 | * icq's. |
51 | * |
52 | * As icq's are linked from both ioc and q, the locking rules are a bit |
53 | * complex. |
54 | * |
55 | * - ioc lock nests inside q lock. |
56 | * |
57 | * - ioc->icq_list and icq->ioc_node are protected by ioc lock. |
58 | * q->icq_list and icq->q_node by q lock. |
59 | * |
60 | * - ioc->icq_tree and ioc->icq_hint are protected by ioc lock, while icq |
61 | * itself is protected by q lock. However, both the indexes and icq |
62 | * itself are also RCU managed and lookup can be performed holding only |
63 | * the q lock. |
64 | * |
65 | * - icq's are not reference counted. They are destroyed when either the |
66 | * ioc or q goes away. Each request with icq set holds an extra |
67 | * reference to ioc to ensure it stays until the request is completed. |
68 | * |
69 | * - Linking and unlinking icq's are performed while holding both ioc and q |
70 | * locks. Due to the lock ordering, q exit is simple but ioc exit |
71 | * requires reverse-order double lock dance. |
72 | */ |
73 | struct io_cq { |
74 | struct request_queue *q; |
75 | struct io_context *ioc; |
76 | |
77 | /* |
78 | * q_node and ioc_node link io_cq through icq_list of q and ioc |
79 | * respectively. Both fields are unused once ioc_exit_icq() is |
80 | * called and shared with __rcu_icq_cache and __rcu_head which are |
81 | * used for RCU free of io_cq. |
82 | */ |
83 | union { |
84 | struct list_head q_node; |
85 | struct kmem_cache *__rcu_icq_cache; |
86 | }; |
87 | union { |
88 | struct hlist_node ioc_node; |
89 | struct rcu_head __rcu_head; |
90 | }; |
91 | |
92 | unsigned int flags; |
93 | }; |
94 | |
95 | /* |
96 | * I/O subsystem state of the associated processes. It is refcounted |
97 | * and kmalloc'ed. These could be shared between processes. |
98 | */ |
99 | struct io_context { |
100 | atomic_long_t refcount; |
101 | atomic_t active_ref; |
102 | |
103 | unsigned short ioprio; |
104 | |
105 | #ifdef CONFIG_BLK_ICQ |
106 | /* all the fields below are protected by this lock */ |
107 | spinlock_t lock; |
108 | |
109 | struct radix_tree_root icq_tree; |
110 | struct io_cq __rcu *icq_hint; |
111 | struct hlist_head icq_list; |
112 | |
113 | struct work_struct release_work; |
114 | #endif /* CONFIG_BLK_ICQ */ |
115 | }; |
116 | |
117 | struct task_struct; |
118 | #ifdef CONFIG_BLOCK |
119 | void put_io_context(struct io_context *ioc); |
120 | void exit_io_context(struct task_struct *task); |
121 | int __copy_io(unsigned long clone_flags, struct task_struct *tsk); |
122 | static inline int copy_io(unsigned long clone_flags, struct task_struct *tsk) |
123 | { |
124 | if (!current->io_context) |
125 | return 0; |
126 | return __copy_io(clone_flags, tsk); |
127 | } |
128 | #else |
129 | struct io_context; |
130 | static inline void put_io_context(struct io_context *ioc) { } |
131 | static inline void exit_io_context(struct task_struct *task) { } |
132 | static inline int copy_io(unsigned long clone_flags, struct task_struct *tsk) |
133 | { |
134 | return 0; |
135 | } |
136 | #endif /* CONFIG_BLOCK */ |
137 | |
138 | #endif /* IOCONTEXT_H */ |
139 | |