1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Read with PG_private_2 [DEPRECATED]. |
3 | * |
4 | * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved. |
5 | * Written by David Howells (dhowells@redhat.com) |
6 | */ |
7 | |
8 | #include <linux/export.h> |
9 | #include <linux/fs.h> |
10 | #include <linux/mm.h> |
11 | #include <linux/pagemap.h> |
12 | #include <linux/slab.h> |
13 | #include <linux/task_io_accounting_ops.h> |
14 | #include "internal.h" |
15 | |
16 | /* |
17 | * [DEPRECATED] Copy a folio to the cache with PG_private_2 set. |
18 | */ |
19 | static void netfs_pgpriv2_copy_folio(struct netfs_io_request *creq, struct folio *folio) |
20 | { |
21 | struct netfs_io_stream *cache = &creq->io_streams[1]; |
22 | size_t fsize = folio_size(folio), flen = fsize; |
23 | loff_t fpos = folio_pos(folio), i_size; |
24 | bool to_eof = false; |
25 | |
26 | _enter("" ); |
27 | |
28 | /* netfs_perform_write() may shift i_size around the page or from out |
29 | * of the page to beyond it, but cannot move i_size into or through the |
30 | * page since we have it locked. |
31 | */ |
32 | i_size = i_size_read(inode: creq->inode); |
33 | |
34 | if (fpos >= i_size) { |
35 | /* mmap beyond eof. */ |
36 | _debug("beyond eof" ); |
37 | folio_end_private_2(folio); |
38 | return; |
39 | } |
40 | |
41 | if (fpos + fsize > creq->i_size) |
42 | creq->i_size = i_size; |
43 | |
44 | if (flen > i_size - fpos) { |
45 | flen = i_size - fpos; |
46 | to_eof = true; |
47 | } else if (flen == i_size - fpos) { |
48 | to_eof = true; |
49 | } |
50 | |
51 | _debug("folio %zx %zx" , flen, fsize); |
52 | |
53 | trace_netfs_folio(folio, why: netfs_folio_trace_store_copy); |
54 | |
55 | /* Attach the folio to the rolling buffer. */ |
56 | if (rolling_buffer_append(roll: &creq->buffer, folio, flags: 0) < 0) { |
57 | clear_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, addr: &creq->flags); |
58 | return; |
59 | } |
60 | |
61 | cache->submit_extendable_to = fsize; |
62 | cache->submit_off = 0; |
63 | cache->submit_len = flen; |
64 | |
65 | /* Attach the folio to one or more subrequests. For a big folio, we |
66 | * could end up with thousands of subrequests if the wsize is small - |
67 | * but we might need to wait during the creation of subrequests for |
68 | * network resources (eg. SMB credits). |
69 | */ |
70 | do { |
71 | ssize_t part; |
72 | |
73 | creq->buffer.iter.iov_offset = cache->submit_off; |
74 | |
75 | atomic64_set(v: &creq->issued_to, i: fpos + cache->submit_off); |
76 | cache->submit_extendable_to = fsize - cache->submit_off; |
77 | part = netfs_advance_write(wreq: creq, stream: cache, start: fpos + cache->submit_off, |
78 | len: cache->submit_len, to_eof); |
79 | cache->submit_off += part; |
80 | if (part > cache->submit_len) |
81 | cache->submit_len = 0; |
82 | else |
83 | cache->submit_len -= part; |
84 | } while (cache->submit_len > 0); |
85 | |
86 | creq->buffer.iter.iov_offset = 0; |
87 | rolling_buffer_advance(roll: &creq->buffer, amount: fsize); |
88 | atomic64_set(v: &creq->issued_to, i: fpos + fsize); |
89 | |
90 | if (flen < fsize) |
91 | netfs_issue_write(wreq: creq, stream: cache); |
92 | } |
93 | |
94 | /* |
95 | * [DEPRECATED] Set up copying to the cache. |
96 | */ |
97 | static struct netfs_io_request *netfs_pgpriv2_begin_copy_to_cache( |
98 | struct netfs_io_request *rreq, struct folio *folio) |
99 | { |
100 | struct netfs_io_request *creq; |
101 | |
102 | if (!fscache_resources_valid(&rreq->cache_resources)) |
103 | goto cancel; |
104 | |
105 | creq = netfs_create_write_req(mapping: rreq->mapping, NULL, start: folio_pos(folio), |
106 | origin: NETFS_PGPRIV2_COPY_TO_CACHE); |
107 | if (IS_ERR(ptr: creq)) |
108 | goto cancel; |
109 | |
110 | if (!creq->io_streams[1].avail) |
111 | goto cancel_put; |
112 | |
113 | trace_netfs_write(wreq: creq, what: netfs_write_trace_copy_to_cache); |
114 | netfs_stat(stat: &netfs_n_wh_copy_to_cache); |
115 | rreq->copy_to_cache = creq; |
116 | return creq; |
117 | |
118 | cancel_put: |
119 | netfs_put_request(rreq: creq, what: netfs_rreq_trace_put_return); |
120 | cancel: |
121 | rreq->copy_to_cache = ERR_PTR(error: -ENOBUFS); |
122 | clear_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, addr: &rreq->flags); |
123 | return ERR_PTR(error: -ENOBUFS); |
124 | } |
125 | |
126 | /* |
127 | * [DEPRECATED] Mark page as requiring copy-to-cache using PG_private_2 and add |
128 | * it to the copy write request. |
129 | */ |
130 | void netfs_pgpriv2_copy_to_cache(struct netfs_io_request *rreq, struct folio *folio) |
131 | { |
132 | struct netfs_io_request *creq = rreq->copy_to_cache; |
133 | |
134 | if (!creq) |
135 | creq = netfs_pgpriv2_begin_copy_to_cache(rreq, folio); |
136 | if (IS_ERR(ptr: creq)) |
137 | return; |
138 | |
139 | trace_netfs_folio(folio, why: netfs_folio_trace_copy_to_cache); |
140 | folio_start_private_2(folio); |
141 | netfs_pgpriv2_copy_folio(creq, folio); |
142 | } |
143 | |
144 | /* |
145 | * [DEPRECATED] End writing to the cache, flushing out any outstanding writes. |
146 | */ |
147 | void netfs_pgpriv2_end_copy_to_cache(struct netfs_io_request *rreq) |
148 | { |
149 | struct netfs_io_request *creq = rreq->copy_to_cache; |
150 | |
151 | if (IS_ERR_OR_NULL(ptr: creq)) |
152 | return; |
153 | |
154 | netfs_issue_write(wreq: creq, stream: &creq->io_streams[1]); |
155 | smp_wmb(); /* Write lists before ALL_QUEUED. */ |
156 | set_bit(NETFS_RREQ_ALL_QUEUED, addr: &creq->flags); |
157 | |
158 | netfs_put_request(rreq: creq, what: netfs_rreq_trace_put_return); |
159 | creq->copy_to_cache = NULL; |
160 | } |
161 | |
162 | /* |
163 | * [DEPRECATED] Remove the PG_private_2 mark from any folios we've finished |
164 | * copying. |
165 | */ |
166 | bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *creq) |
167 | { |
168 | struct folio_queue *folioq = creq->buffer.tail; |
169 | unsigned long long collected_to = creq->collected_to; |
170 | unsigned int slot = creq->buffer.first_tail_slot; |
171 | bool made_progress = false; |
172 | |
173 | if (slot >= folioq_nr_slots(folioq)) { |
174 | folioq = rolling_buffer_delete_spent(roll: &creq->buffer); |
175 | slot = 0; |
176 | } |
177 | |
178 | for (;;) { |
179 | struct folio *folio; |
180 | unsigned long long fpos, fend; |
181 | size_t fsize, flen; |
182 | |
183 | folio = folioq_folio(folioq, slot); |
184 | if (WARN_ONCE(!folio_test_private_2(folio), |
185 | "R=%08x: folio %lx is not marked private_2\n" , |
186 | creq->debug_id, folio->index)) |
187 | trace_netfs_folio(folio, why: netfs_folio_trace_not_under_wback); |
188 | |
189 | fpos = folio_pos(folio); |
190 | fsize = folio_size(folio); |
191 | flen = fsize; |
192 | |
193 | fend = min_t(unsigned long long, fpos + flen, creq->i_size); |
194 | |
195 | trace_netfs_collect_folio(wreq: creq, folio, fend, collected_to); |
196 | |
197 | /* Unlock any folio we've transferred all of. */ |
198 | if (collected_to < fend) |
199 | break; |
200 | |
201 | trace_netfs_folio(folio, why: netfs_folio_trace_end_copy); |
202 | folio_end_private_2(folio); |
203 | creq->cleaned_to = fpos + fsize; |
204 | made_progress = true; |
205 | |
206 | /* Clean up the head folioq. If we clear an entire folioq, then |
207 | * we can get rid of it provided it's not also the tail folioq |
208 | * being filled by the issuer. |
209 | */ |
210 | folioq_clear(folioq, slot); |
211 | slot++; |
212 | if (slot >= folioq_nr_slots(folioq)) { |
213 | folioq = rolling_buffer_delete_spent(roll: &creq->buffer); |
214 | if (!folioq) |
215 | goto done; |
216 | slot = 0; |
217 | } |
218 | |
219 | if (fpos + fsize >= collected_to) |
220 | break; |
221 | } |
222 | |
223 | creq->buffer.tail = folioq; |
224 | done: |
225 | creq->buffer.first_tail_slot = slot; |
226 | return made_progress; |
227 | } |
228 | |