1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* AFS filesystem file handling |
3 | * |
4 | * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. |
5 | * Written by David Howells (dhowells@redhat.com) |
6 | */ |
7 | |
8 | #include <linux/kernel.h> |
9 | #include <linux/module.h> |
10 | #include <linux/init.h> |
11 | #include <linux/fs.h> |
12 | #include <linux/pagemap.h> |
13 | #include <linux/writeback.h> |
14 | #include <linux/gfp.h> |
15 | #include <linux/task_io_accounting_ops.h> |
16 | #include <linux/mm.h> |
17 | #include <linux/swap.h> |
18 | #include <linux/netfs.h> |
19 | #include "internal.h" |
20 | |
21 | static int afs_file_mmap(struct file *file, struct vm_area_struct *vma); |
22 | static int afs_symlink_read_folio(struct file *file, struct folio *folio); |
23 | |
24 | static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter); |
25 | static ssize_t afs_file_splice_read(struct file *in, loff_t *ppos, |
26 | struct pipe_inode_info *pipe, |
27 | size_t len, unsigned int flags); |
28 | static void afs_vm_open(struct vm_area_struct *area); |
29 | static void afs_vm_close(struct vm_area_struct *area); |
30 | static vm_fault_t afs_vm_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff); |
31 | |
32 | const struct file_operations afs_file_operations = { |
33 | .open = afs_open, |
34 | .release = afs_release, |
35 | .llseek = generic_file_llseek, |
36 | .read_iter = afs_file_read_iter, |
37 | .write_iter = netfs_file_write_iter, |
38 | .mmap = afs_file_mmap, |
39 | .splice_read = afs_file_splice_read, |
40 | .splice_write = iter_file_splice_write, |
41 | .fsync = afs_fsync, |
42 | .lock = afs_lock, |
43 | .flock = afs_flock, |
44 | }; |
45 | |
46 | const struct inode_operations afs_file_inode_operations = { |
47 | .getattr = afs_getattr, |
48 | .setattr = afs_setattr, |
49 | .permission = afs_permission, |
50 | }; |
51 | |
52 | const struct address_space_operations afs_file_aops = { |
53 | .direct_IO = noop_direct_IO, |
54 | .read_folio = netfs_read_folio, |
55 | .readahead = netfs_readahead, |
56 | .dirty_folio = netfs_dirty_folio, |
57 | .launder_folio = netfs_launder_folio, |
58 | .release_folio = netfs_release_folio, |
59 | .invalidate_folio = netfs_invalidate_folio, |
60 | .migrate_folio = filemap_migrate_folio, |
61 | .writepages = afs_writepages, |
62 | }; |
63 | |
64 | const struct address_space_operations afs_symlink_aops = { |
65 | .read_folio = afs_symlink_read_folio, |
66 | .release_folio = netfs_release_folio, |
67 | .invalidate_folio = netfs_invalidate_folio, |
68 | .migrate_folio = filemap_migrate_folio, |
69 | }; |
70 | |
71 | static const struct vm_operations_struct afs_vm_ops = { |
72 | .open = afs_vm_open, |
73 | .close = afs_vm_close, |
74 | .fault = filemap_fault, |
75 | .map_pages = afs_vm_map_pages, |
76 | .page_mkwrite = afs_page_mkwrite, |
77 | }; |
78 | |
79 | /* |
80 | * Discard a pin on a writeback key. |
81 | */ |
82 | void afs_put_wb_key(struct afs_wb_key *wbk) |
83 | { |
84 | if (wbk && refcount_dec_and_test(r: &wbk->usage)) { |
85 | key_put(key: wbk->key); |
86 | kfree(objp: wbk); |
87 | } |
88 | } |
89 | |
90 | /* |
91 | * Cache key for writeback. |
92 | */ |
93 | int afs_cache_wb_key(struct afs_vnode *vnode, struct afs_file *af) |
94 | { |
95 | struct afs_wb_key *wbk, *p; |
96 | |
97 | wbk = kzalloc(size: sizeof(struct afs_wb_key), GFP_KERNEL); |
98 | if (!wbk) |
99 | return -ENOMEM; |
100 | refcount_set(r: &wbk->usage, n: 2); |
101 | wbk->key = af->key; |
102 | |
103 | spin_lock(lock: &vnode->wb_lock); |
104 | list_for_each_entry(p, &vnode->wb_keys, vnode_link) { |
105 | if (p->key == wbk->key) |
106 | goto found; |
107 | } |
108 | |
109 | key_get(key: wbk->key); |
110 | list_add_tail(new: &wbk->vnode_link, head: &vnode->wb_keys); |
111 | spin_unlock(lock: &vnode->wb_lock); |
112 | af->wb = wbk; |
113 | return 0; |
114 | |
115 | found: |
116 | refcount_inc(r: &p->usage); |
117 | spin_unlock(lock: &vnode->wb_lock); |
118 | af->wb = p; |
119 | kfree(objp: wbk); |
120 | return 0; |
121 | } |
122 | |
123 | /* |
124 | * open an AFS file or directory and attach a key to it |
125 | */ |
126 | int afs_open(struct inode *inode, struct file *file) |
127 | { |
128 | struct afs_vnode *vnode = AFS_FS_I(inode); |
129 | struct afs_file *af; |
130 | struct key *key; |
131 | int ret; |
132 | |
133 | _enter("{%llx:%llu}," , vnode->fid.vid, vnode->fid.vnode); |
134 | |
135 | key = afs_request_key(vnode->volume->cell); |
136 | if (IS_ERR(ptr: key)) { |
137 | ret = PTR_ERR(ptr: key); |
138 | goto error; |
139 | } |
140 | |
141 | af = kzalloc(size: sizeof(*af), GFP_KERNEL); |
142 | if (!af) { |
143 | ret = -ENOMEM; |
144 | goto error_key; |
145 | } |
146 | af->key = key; |
147 | |
148 | ret = afs_validate(vnode, key); |
149 | if (ret < 0) |
150 | goto error_af; |
151 | |
152 | if (file->f_mode & FMODE_WRITE) { |
153 | ret = afs_cache_wb_key(vnode, af); |
154 | if (ret < 0) |
155 | goto error_af; |
156 | } |
157 | |
158 | if (file->f_flags & O_TRUNC) |
159 | set_bit(AFS_VNODE_NEW_CONTENT, addr: &vnode->flags); |
160 | |
161 | fscache_use_cookie(cookie: afs_vnode_cache(vnode), will_modify: file->f_mode & FMODE_WRITE); |
162 | |
163 | file->private_data = af; |
164 | _leave(" = 0" ); |
165 | return 0; |
166 | |
167 | error_af: |
168 | kfree(objp: af); |
169 | error_key: |
170 | key_put(key); |
171 | error: |
172 | _leave(" = %d" , ret); |
173 | return ret; |
174 | } |
175 | |
176 | /* |
177 | * release an AFS file or directory and discard its key |
178 | */ |
179 | int afs_release(struct inode *inode, struct file *file) |
180 | { |
181 | struct afs_vnode_cache_aux aux; |
182 | struct afs_vnode *vnode = AFS_FS_I(inode); |
183 | struct afs_file *af = file->private_data; |
184 | loff_t i_size; |
185 | int ret = 0; |
186 | |
187 | _enter("{%llx:%llu}," , vnode->fid.vid, vnode->fid.vnode); |
188 | |
189 | if ((file->f_mode & FMODE_WRITE)) |
190 | ret = vfs_fsync(file, datasync: 0); |
191 | |
192 | file->private_data = NULL; |
193 | if (af->wb) |
194 | afs_put_wb_key(wbk: af->wb); |
195 | |
196 | if ((file->f_mode & FMODE_WRITE)) { |
197 | i_size = i_size_read(inode: &vnode->netfs.inode); |
198 | afs_set_cache_aux(vnode, aux: &aux); |
199 | fscache_unuse_cookie(cookie: afs_vnode_cache(vnode), aux_data: &aux, object_size: &i_size); |
200 | } else { |
201 | fscache_unuse_cookie(cookie: afs_vnode_cache(vnode), NULL, NULL); |
202 | } |
203 | |
204 | key_put(key: af->key); |
205 | kfree(objp: af); |
206 | afs_prune_wb_keys(vnode); |
207 | _leave(" = %d" , ret); |
208 | return ret; |
209 | } |
210 | |
211 | /* |
212 | * Allocate a new read record. |
213 | */ |
214 | struct afs_read *afs_alloc_read(gfp_t gfp) |
215 | { |
216 | struct afs_read *req; |
217 | |
218 | req = kzalloc(size: sizeof(struct afs_read), flags: gfp); |
219 | if (req) |
220 | refcount_set(r: &req->usage, n: 1); |
221 | |
222 | return req; |
223 | } |
224 | |
225 | /* |
226 | * Dispose of a ref to a read record. |
227 | */ |
228 | void afs_put_read(struct afs_read *req) |
229 | { |
230 | if (refcount_dec_and_test(r: &req->usage)) { |
231 | if (req->cleanup) |
232 | req->cleanup(req); |
233 | key_put(key: req->key); |
234 | kfree(objp: req); |
235 | } |
236 | } |
237 | |
238 | static void afs_fetch_data_notify(struct afs_operation *op) |
239 | { |
240 | struct afs_read *req = op->fetch.req; |
241 | struct netfs_io_subrequest *subreq = req->subreq; |
242 | int error = afs_op_error(op); |
243 | |
244 | req->error = error; |
245 | if (subreq) { |
246 | __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); |
247 | netfs_subreq_terminated(subreq, error ?: req->actual_len, false); |
248 | req->subreq = NULL; |
249 | } else if (req->done) { |
250 | req->done(req); |
251 | } |
252 | } |
253 | |
254 | static void afs_fetch_data_success(struct afs_operation *op) |
255 | { |
256 | struct afs_vnode *vnode = op->file[0].vnode; |
257 | |
258 | _enter("op=%08x" , op->debug_id); |
259 | afs_vnode_commit_status(op, &op->file[0]); |
260 | afs_stat_v(vnode, n_fetches); |
261 | atomic_long_add(i: op->fetch.req->actual_len, v: &op->net->n_fetch_bytes); |
262 | afs_fetch_data_notify(op); |
263 | } |
264 | |
265 | static void afs_fetch_data_put(struct afs_operation *op) |
266 | { |
267 | op->fetch.req->error = afs_op_error(op); |
268 | afs_put_read(req: op->fetch.req); |
269 | } |
270 | |
271 | static const struct afs_operation_ops afs_fetch_data_operation = { |
272 | .issue_afs_rpc = afs_fs_fetch_data, |
273 | .issue_yfs_rpc = yfs_fs_fetch_data, |
274 | .success = afs_fetch_data_success, |
275 | .aborted = afs_check_for_remote_deletion, |
276 | .failed = afs_fetch_data_notify, |
277 | .put = afs_fetch_data_put, |
278 | }; |
279 | |
280 | /* |
281 | * Fetch file data from the volume. |
282 | */ |
283 | int afs_fetch_data(struct afs_vnode *vnode, struct afs_read *req) |
284 | { |
285 | struct afs_operation *op; |
286 | |
287 | _enter("%s{%llx:%llu.%u},%x,,," , |
288 | vnode->volume->name, |
289 | vnode->fid.vid, |
290 | vnode->fid.vnode, |
291 | vnode->fid.unique, |
292 | key_serial(req->key)); |
293 | |
294 | op = afs_alloc_operation(req->key, vnode->volume); |
295 | if (IS_ERR(ptr: op)) { |
296 | if (req->subreq) |
297 | netfs_subreq_terminated(req->subreq, PTR_ERR(ptr: op), false); |
298 | return PTR_ERR(ptr: op); |
299 | } |
300 | |
301 | afs_op_set_vnode(op, n: 0, vnode); |
302 | |
303 | op->fetch.req = afs_get_read(req); |
304 | op->ops = &afs_fetch_data_operation; |
305 | return afs_do_sync_operation(op); |
306 | } |
307 | |
308 | static void afs_issue_read(struct netfs_io_subrequest *subreq) |
309 | { |
310 | struct afs_vnode *vnode = AFS_FS_I(inode: subreq->rreq->inode); |
311 | struct afs_read *fsreq; |
312 | |
313 | fsreq = afs_alloc_read(GFP_NOFS); |
314 | if (!fsreq) |
315 | return netfs_subreq_terminated(subreq, -ENOMEM, false); |
316 | |
317 | fsreq->subreq = subreq; |
318 | fsreq->pos = subreq->start + subreq->transferred; |
319 | fsreq->len = subreq->len - subreq->transferred; |
320 | fsreq->key = key_get(key: subreq->rreq->netfs_priv); |
321 | fsreq->vnode = vnode; |
322 | fsreq->iter = &subreq->io_iter; |
323 | |
324 | afs_fetch_data(vnode: fsreq->vnode, req: fsreq); |
325 | afs_put_read(req: fsreq); |
326 | } |
327 | |
328 | static int afs_symlink_read_folio(struct file *file, struct folio *folio) |
329 | { |
330 | struct afs_vnode *vnode = AFS_FS_I(inode: folio->mapping->host); |
331 | struct afs_read *fsreq; |
332 | int ret; |
333 | |
334 | fsreq = afs_alloc_read(GFP_NOFS); |
335 | if (!fsreq) |
336 | return -ENOMEM; |
337 | |
338 | fsreq->pos = folio_pos(folio); |
339 | fsreq->len = folio_size(folio); |
340 | fsreq->vnode = vnode; |
341 | fsreq->iter = &fsreq->def_iter; |
342 | iov_iter_xarray(i: &fsreq->def_iter, ITER_DEST, xarray: &folio->mapping->i_pages, |
343 | start: fsreq->pos, count: fsreq->len); |
344 | |
345 | ret = afs_fetch_data(vnode: fsreq->vnode, req: fsreq); |
346 | if (ret == 0) |
347 | folio_mark_uptodate(folio); |
348 | folio_unlock(folio); |
349 | return ret; |
350 | } |
351 | |
352 | static int afs_init_request(struct netfs_io_request *rreq, struct file *file) |
353 | { |
354 | if (file) |
355 | rreq->netfs_priv = key_get(key: afs_file_key(file)); |
356 | rreq->rsize = 256 * 1024; |
357 | rreq->wsize = 256 * 1024; |
358 | return 0; |
359 | } |
360 | |
361 | static int afs_check_write_begin(struct file *file, loff_t pos, unsigned len, |
362 | struct folio **foliop, void **_fsdata) |
363 | { |
364 | struct afs_vnode *vnode = AFS_FS_I(inode: file_inode(f: file)); |
365 | |
366 | return test_bit(AFS_VNODE_DELETED, &vnode->flags) ? -ESTALE : 0; |
367 | } |
368 | |
369 | static void afs_free_request(struct netfs_io_request *rreq) |
370 | { |
371 | key_put(key: rreq->netfs_priv); |
372 | } |
373 | |
374 | static void afs_update_i_size(struct inode *inode, loff_t new_i_size) |
375 | { |
376 | struct afs_vnode *vnode = AFS_FS_I(inode); |
377 | loff_t i_size; |
378 | |
379 | write_seqlock(sl: &vnode->cb_lock); |
380 | i_size = i_size_read(inode: &vnode->netfs.inode); |
381 | if (new_i_size > i_size) { |
382 | i_size_write(inode: &vnode->netfs.inode, i_size: new_i_size); |
383 | inode_set_bytes(inode: &vnode->netfs.inode, bytes: new_i_size); |
384 | } |
385 | write_sequnlock(sl: &vnode->cb_lock); |
386 | fscache_update_cookie(cookie: afs_vnode_cache(vnode), NULL, object_size: &new_i_size); |
387 | } |
388 | |
389 | static void afs_netfs_invalidate_cache(struct netfs_io_request *wreq) |
390 | { |
391 | struct afs_vnode *vnode = AFS_FS_I(inode: wreq->inode); |
392 | |
393 | afs_invalidate_cache(vnode, flags: 0); |
394 | } |
395 | |
396 | const struct netfs_request_ops afs_req_ops = { |
397 | .init_request = afs_init_request, |
398 | .free_request = afs_free_request, |
399 | .check_write_begin = afs_check_write_begin, |
400 | .issue_read = afs_issue_read, |
401 | .update_i_size = afs_update_i_size, |
402 | .invalidate_cache = afs_netfs_invalidate_cache, |
403 | .create_write_requests = afs_create_write_requests, |
404 | }; |
405 | |
406 | static void afs_add_open_mmap(struct afs_vnode *vnode) |
407 | { |
408 | if (atomic_inc_return(v: &vnode->cb_nr_mmap) == 1) { |
409 | down_write(sem: &vnode->volume->open_mmaps_lock); |
410 | |
411 | if (list_empty(head: &vnode->cb_mmap_link)) |
412 | list_add_tail(new: &vnode->cb_mmap_link, head: &vnode->volume->open_mmaps); |
413 | |
414 | up_write(sem: &vnode->volume->open_mmaps_lock); |
415 | } |
416 | } |
417 | |
418 | static void afs_drop_open_mmap(struct afs_vnode *vnode) |
419 | { |
420 | if (atomic_add_unless(v: &vnode->cb_nr_mmap, a: -1, u: 1)) |
421 | return; |
422 | |
423 | down_write(sem: &vnode->volume->open_mmaps_lock); |
424 | |
425 | read_seqlock_excl(sl: &vnode->cb_lock); |
426 | // the only place where ->cb_nr_mmap may hit 0 |
427 | // see __afs_break_callback() for the other side... |
428 | if (atomic_dec_and_test(v: &vnode->cb_nr_mmap)) |
429 | list_del_init(entry: &vnode->cb_mmap_link); |
430 | read_sequnlock_excl(sl: &vnode->cb_lock); |
431 | |
432 | up_write(sem: &vnode->volume->open_mmaps_lock); |
433 | flush_work(work: &vnode->cb_work); |
434 | } |
435 | |
436 | /* |
437 | * Handle setting up a memory mapping on an AFS file. |
438 | */ |
439 | static int afs_file_mmap(struct file *file, struct vm_area_struct *vma) |
440 | { |
441 | struct afs_vnode *vnode = AFS_FS_I(inode: file_inode(f: file)); |
442 | int ret; |
443 | |
444 | afs_add_open_mmap(vnode); |
445 | |
446 | ret = generic_file_mmap(file, vma); |
447 | if (ret == 0) |
448 | vma->vm_ops = &afs_vm_ops; |
449 | else |
450 | afs_drop_open_mmap(vnode); |
451 | return ret; |
452 | } |
453 | |
454 | static void afs_vm_open(struct vm_area_struct *vma) |
455 | { |
456 | afs_add_open_mmap(vnode: AFS_FS_I(inode: file_inode(f: vma->vm_file))); |
457 | } |
458 | |
459 | static void afs_vm_close(struct vm_area_struct *vma) |
460 | { |
461 | afs_drop_open_mmap(vnode: AFS_FS_I(inode: file_inode(f: vma->vm_file))); |
462 | } |
463 | |
464 | static vm_fault_t afs_vm_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff) |
465 | { |
466 | struct afs_vnode *vnode = AFS_FS_I(inode: file_inode(f: vmf->vma->vm_file)); |
467 | |
468 | if (afs_check_validity(vnode)) |
469 | return filemap_map_pages(vmf, start_pgoff, end_pgoff); |
470 | return 0; |
471 | } |
472 | |
473 | static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) |
474 | { |
475 | struct inode *inode = file_inode(f: iocb->ki_filp); |
476 | struct afs_vnode *vnode = AFS_FS_I(inode); |
477 | struct afs_file *af = iocb->ki_filp->private_data; |
478 | ssize_t ret; |
479 | |
480 | if (iocb->ki_flags & IOCB_DIRECT) |
481 | return netfs_unbuffered_read_iter(iocb, iter); |
482 | |
483 | ret = netfs_start_io_read(inode); |
484 | if (ret < 0) |
485 | return ret; |
486 | ret = afs_validate(vnode, key: af->key); |
487 | if (ret == 0) |
488 | ret = filemap_read(iocb, to: iter, already_read: 0); |
489 | netfs_end_io_read(inode); |
490 | return ret; |
491 | } |
492 | |
493 | static ssize_t afs_file_splice_read(struct file *in, loff_t *ppos, |
494 | struct pipe_inode_info *pipe, |
495 | size_t len, unsigned int flags) |
496 | { |
497 | struct inode *inode = file_inode(f: in); |
498 | struct afs_vnode *vnode = AFS_FS_I(inode); |
499 | struct afs_file *af = in->private_data; |
500 | ssize_t ret; |
501 | |
502 | ret = netfs_start_io_read(inode); |
503 | if (ret < 0) |
504 | return ret; |
505 | ret = afs_validate(vnode, key: af->key); |
506 | if (ret == 0) |
507 | ret = filemap_splice_read(in, ppos, pipe, len, flags); |
508 | netfs_end_io_read(inode); |
509 | return ret; |
510 | } |
511 | |