1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* handling of writes to regular files and writing back to the server |
3 | * |
4 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
5 | * Written by David Howells (dhowells@redhat.com) |
6 | */ |
7 | |
8 | #include <linux/backing-dev.h> |
9 | #include <linux/slab.h> |
10 | #include <linux/fs.h> |
11 | #include <linux/pagemap.h> |
12 | #include <linux/writeback.h> |
13 | #include <linux/pagevec.h> |
14 | #include <linux/netfs.h> |
15 | #include <trace/events/netfs.h> |
16 | #include "internal.h" |
17 | |
18 | /* |
19 | * completion of write to server |
20 | */ |
21 | static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len) |
22 | { |
23 | _enter("{%llx:%llu},{%x @%llx}" , |
24 | vnode->fid.vid, vnode->fid.vnode, len, start); |
25 | |
26 | afs_prune_wb_keys(vnode); |
27 | _leave("" ); |
28 | } |
29 | |
30 | /* |
31 | * Find a key to use for the writeback. We cached the keys used to author the |
32 | * writes on the vnode. *_wbk will contain the last writeback key used or NULL |
33 | * and we need to start from there if it's set. |
34 | */ |
35 | static int afs_get_writeback_key(struct afs_vnode *vnode, |
36 | struct afs_wb_key **_wbk) |
37 | { |
38 | struct afs_wb_key *wbk = NULL; |
39 | struct list_head *p; |
40 | int ret = -ENOKEY, ret2; |
41 | |
42 | spin_lock(lock: &vnode->wb_lock); |
43 | if (*_wbk) |
44 | p = (*_wbk)->vnode_link.next; |
45 | else |
46 | p = vnode->wb_keys.next; |
47 | |
48 | while (p != &vnode->wb_keys) { |
49 | wbk = list_entry(p, struct afs_wb_key, vnode_link); |
50 | _debug("wbk %u" , key_serial(wbk->key)); |
51 | ret2 = key_validate(key: wbk->key); |
52 | if (ret2 == 0) { |
53 | refcount_inc(r: &wbk->usage); |
54 | _debug("USE WB KEY %u" , key_serial(wbk->key)); |
55 | break; |
56 | } |
57 | |
58 | wbk = NULL; |
59 | if (ret == -ENOKEY) |
60 | ret = ret2; |
61 | p = p->next; |
62 | } |
63 | |
64 | spin_unlock(lock: &vnode->wb_lock); |
65 | if (*_wbk) |
66 | afs_put_wb_key(*_wbk); |
67 | *_wbk = wbk; |
68 | return 0; |
69 | } |
70 | |
71 | static void afs_store_data_success(struct afs_operation *op) |
72 | { |
73 | struct afs_vnode *vnode = op->file[0].vnode; |
74 | |
75 | op->ctime = op->file[0].scb.status.mtime_client; |
76 | afs_vnode_commit_status(op, &op->file[0]); |
77 | if (!afs_op_error(op)) { |
78 | if (!op->store.laundering) |
79 | afs_pages_written_back(vnode, start: op->store.pos, len: op->store.size); |
80 | afs_stat_v(vnode, n_stores); |
81 | atomic_long_add(i: op->store.size, v: &afs_v2net(vnode)->n_store_bytes); |
82 | } |
83 | } |
84 | |
85 | static const struct afs_operation_ops afs_store_data_operation = { |
86 | .issue_afs_rpc = afs_fs_store_data, |
87 | .issue_yfs_rpc = yfs_fs_store_data, |
88 | .success = afs_store_data_success, |
89 | }; |
90 | |
91 | /* |
92 | * write to a file |
93 | */ |
94 | static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos, |
95 | bool laundering) |
96 | { |
97 | struct afs_operation *op; |
98 | struct afs_wb_key *wbk = NULL; |
99 | loff_t size = iov_iter_count(i: iter); |
100 | int ret = -ENOKEY; |
101 | |
102 | _enter("%s{%llx:%llu.%u},%llx,%llx" , |
103 | vnode->volume->name, |
104 | vnode->fid.vid, |
105 | vnode->fid.vnode, |
106 | vnode->fid.unique, |
107 | size, pos); |
108 | |
109 | ret = afs_get_writeback_key(vnode, wbk: &wbk); |
110 | if (ret) { |
111 | _leave(" = %d [no keys]" , ret); |
112 | return ret; |
113 | } |
114 | |
115 | op = afs_alloc_operation(wbk->key, vnode->volume); |
116 | if (IS_ERR(ptr: op)) { |
117 | afs_put_wb_key(wbk); |
118 | return -ENOMEM; |
119 | } |
120 | |
121 | afs_op_set_vnode(op, n: 0, vnode); |
122 | op->file[0].dv_delta = 1; |
123 | op->file[0].modification = true; |
124 | op->store.pos = pos; |
125 | op->store.size = size; |
126 | op->store.laundering = laundering; |
127 | op->flags |= AFS_OPERATION_UNINTR; |
128 | op->ops = &afs_store_data_operation; |
129 | |
130 | try_next_key: |
131 | afs_begin_vnode_operation(op); |
132 | |
133 | op->store.write_iter = iter; |
134 | op->store.i_size = max(pos + size, vnode->netfs.remote_i_size); |
135 | op->mtime = inode_get_mtime(inode: &vnode->netfs.inode); |
136 | |
137 | afs_wait_for_operation(op); |
138 | |
139 | switch (afs_op_error(op)) { |
140 | case -EACCES: |
141 | case -EPERM: |
142 | case -ENOKEY: |
143 | case -EKEYEXPIRED: |
144 | case -EKEYREJECTED: |
145 | case -EKEYREVOKED: |
146 | _debug("next" ); |
147 | |
148 | ret = afs_get_writeback_key(vnode, wbk: &wbk); |
149 | if (ret == 0) { |
150 | key_put(key: op->key); |
151 | op->key = key_get(key: wbk->key); |
152 | goto try_next_key; |
153 | } |
154 | break; |
155 | } |
156 | |
157 | afs_put_wb_key(wbk); |
158 | _leave(" = %d" , afs_op_error(op)); |
159 | return afs_put_operation(op); |
160 | } |
161 | |
162 | static void afs_upload_to_server(struct netfs_io_subrequest *subreq) |
163 | { |
164 | struct afs_vnode *vnode = AFS_FS_I(inode: subreq->rreq->inode); |
165 | ssize_t ret; |
166 | |
167 | _enter("%x[%x],%zx" , |
168 | subreq->rreq->debug_id, subreq->debug_index, subreq->io_iter.count); |
169 | |
170 | trace_netfs_sreq(sreq: subreq, what: netfs_sreq_trace_submit); |
171 | ret = afs_store_data(vnode, iter: &subreq->io_iter, pos: subreq->start, |
172 | laundering: subreq->rreq->origin == NETFS_LAUNDER_WRITE); |
173 | netfs_write_subrequest_terminated(op: subreq, transferred_or_error: ret < 0 ? ret : subreq->len, |
174 | was_async: false); |
175 | } |
176 | |
177 | static void afs_upload_to_server_worker(struct work_struct *work) |
178 | { |
179 | struct netfs_io_subrequest *subreq = |
180 | container_of(work, struct netfs_io_subrequest, work); |
181 | |
182 | afs_upload_to_server(subreq); |
183 | } |
184 | |
185 | /* |
186 | * Set up write requests for a writeback slice. We need to add a write request |
187 | * for each write we want to make. |
188 | */ |
189 | void afs_create_write_requests(struct netfs_io_request *wreq, loff_t start, size_t len) |
190 | { |
191 | struct netfs_io_subrequest *subreq; |
192 | |
193 | _enter("%x,%llx-%llx" , wreq->debug_id, start, start + len); |
194 | |
195 | subreq = netfs_create_write_request(wreq, dest: NETFS_UPLOAD_TO_SERVER, |
196 | start, len, worker: afs_upload_to_server_worker); |
197 | if (subreq) |
198 | netfs_queue_write_request(subreq); |
199 | } |
200 | |
201 | /* |
202 | * write some of the pending data back to the server |
203 | */ |
204 | int afs_writepages(struct address_space *mapping, struct writeback_control *wbc) |
205 | { |
206 | struct afs_vnode *vnode = AFS_FS_I(inode: mapping->host); |
207 | int ret; |
208 | |
209 | /* We have to be careful as we can end up racing with setattr() |
210 | * truncating the pagecache since the caller doesn't take a lock here |
211 | * to prevent it. |
212 | */ |
213 | if (wbc->sync_mode == WB_SYNC_ALL) |
214 | down_read(sem: &vnode->validate_lock); |
215 | else if (!down_read_trylock(sem: &vnode->validate_lock)) |
216 | return 0; |
217 | |
218 | ret = netfs_writepages(mapping, wbc); |
219 | up_read(sem: &vnode->validate_lock); |
220 | return ret; |
221 | } |
222 | |
223 | /* |
224 | * flush any dirty pages for this process, and check for write errors. |
225 | * - the return status from this call provides a reliable indication of |
226 | * whether any write errors occurred for this process. |
227 | */ |
228 | int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync) |
229 | { |
230 | struct afs_vnode *vnode = AFS_FS_I(inode: file_inode(f: file)); |
231 | struct afs_file *af = file->private_data; |
232 | int ret; |
233 | |
234 | _enter("{%llx:%llu},{n=%pD},%d" , |
235 | vnode->fid.vid, vnode->fid.vnode, file, |
236 | datasync); |
237 | |
238 | ret = afs_validate(vnode, key: af->key); |
239 | if (ret < 0) |
240 | return ret; |
241 | |
242 | return file_write_and_wait_range(file, start, end); |
243 | } |
244 | |
245 | /* |
246 | * notification that a previously read-only page is about to become writable |
247 | * - if it returns an error, the caller will deliver a bus error signal |
248 | */ |
249 | vm_fault_t afs_page_mkwrite(struct vm_fault *vmf) |
250 | { |
251 | struct file *file = vmf->vma->vm_file; |
252 | |
253 | if (afs_validate(vnode: AFS_FS_I(inode: file_inode(f: file)), key: afs_file_key(file)) < 0) |
254 | return VM_FAULT_SIGBUS; |
255 | return netfs_page_mkwrite(vmf, NULL); |
256 | } |
257 | |
258 | /* |
259 | * Prune the keys cached for writeback. The caller must hold vnode->wb_lock. |
260 | */ |
261 | void afs_prune_wb_keys(struct afs_vnode *vnode) |
262 | { |
263 | LIST_HEAD(graveyard); |
264 | struct afs_wb_key *wbk, *tmp; |
265 | |
266 | /* Discard unused keys */ |
267 | spin_lock(lock: &vnode->wb_lock); |
268 | |
269 | if (!mapping_tagged(mapping: &vnode->netfs.inode.i_data, PAGECACHE_TAG_WRITEBACK) && |
270 | !mapping_tagged(mapping: &vnode->netfs.inode.i_data, PAGECACHE_TAG_DIRTY)) { |
271 | list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) { |
272 | if (refcount_read(r: &wbk->usage) == 1) |
273 | list_move(list: &wbk->vnode_link, head: &graveyard); |
274 | } |
275 | } |
276 | |
277 | spin_unlock(lock: &vnode->wb_lock); |
278 | |
279 | while (!list_empty(head: &graveyard)) { |
280 | wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link); |
281 | list_del(entry: &wbk->vnode_link); |
282 | afs_put_wb_key(wbk); |
283 | } |
284 | } |
285 | |