blob: 58d4546a208e663229b3b585101ddadbe06aa4e9 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-or-later
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/* -*- mode: c; c-basic-offset: 8; -*-
3 * vim: noexpandtab sw=8 ts=8 sts=0:
4 *
5 * file.c
6 *
7 * File open, close, extend, truncate
8 *
9 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010 */
11
12#include <linux/capability.h>
13#include <linux/fs.h>
14#include <linux/types.h>
15#include <linux/slab.h>
16#include <linux/highmem.h>
17#include <linux/pagemap.h>
18#include <linux/uio.h>
19#include <linux/sched.h>
20#include <linux/splice.h>
21#include <linux/mount.h>
22#include <linux/writeback.h>
23#include <linux/falloc.h>
24#include <linux/quotaops.h>
25#include <linux/blkdev.h>
26#include <linux/backing-dev.h>
27
28#include <cluster/masklog.h>
29
30#include "ocfs2.h"
31
32#include "alloc.h"
33#include "aops.h"
34#include "dir.h"
35#include "dlmglue.h"
36#include "extent_map.h"
37#include "file.h"
38#include "sysfile.h"
39#include "inode.h"
40#include "ioctl.h"
41#include "journal.h"
42#include "locks.h"
43#include "mmap.h"
44#include "suballoc.h"
45#include "super.h"
46#include "xattr.h"
47#include "acl.h"
48#include "quota.h"
49#include "refcounttree.h"
50#include "ocfs2_trace.h"
51
52#include "buffer_head_io.h"
53
54static int ocfs2_init_file_private(struct inode *inode, struct file *file)
55{
56 struct ocfs2_file_private *fp;
57
58 fp = kzalloc(sizeof(struct ocfs2_file_private), GFP_KERNEL);
59 if (!fp)
60 return -ENOMEM;
61
62 fp->fp_file = file;
63 mutex_init(&fp->fp_mutex);
64 ocfs2_file_lock_res_init(&fp->fp_flock, fp);
65 file->private_data = fp;
66
67 return 0;
68}
69
70static void ocfs2_free_file_private(struct inode *inode, struct file *file)
71{
72 struct ocfs2_file_private *fp = file->private_data;
73 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
74
75 if (fp) {
76 ocfs2_simple_drop_lockres(osb, &fp->fp_flock);
77 ocfs2_lock_res_free(&fp->fp_flock);
78 kfree(fp);
79 file->private_data = NULL;
80 }
81}
82
83static int ocfs2_file_open(struct inode *inode, struct file *file)
84{
85 int status;
86 int mode = file->f_flags;
87 struct ocfs2_inode_info *oi = OCFS2_I(inode);
88
89 trace_ocfs2_file_open(inode, file, file->f_path.dentry,
90 (unsigned long long)oi->ip_blkno,
91 file->f_path.dentry->d_name.len,
92 file->f_path.dentry->d_name.name, mode);
93
94 if (file->f_mode & FMODE_WRITE) {
95 status = dquot_initialize(inode);
96 if (status)
97 goto leave;
98 }
99
100 spin_lock(&oi->ip_lock);
101
102 /* Check that the inode hasn't been wiped from disk by another
103 * node. If it hasn't then we're safe as long as we hold the
104 * spin lock until our increment of open count. */
105 if (oi->ip_flags & OCFS2_INODE_DELETED) {
106 spin_unlock(&oi->ip_lock);
107
108 status = -ENOENT;
109 goto leave;
110 }
111
112 if (mode & O_DIRECT)
113 oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT;
114
115 oi->ip_open_count++;
116 spin_unlock(&oi->ip_lock);
117
118 status = ocfs2_init_file_private(inode, file);
119 if (status) {
120 /*
121 * We want to set open count back if we're failing the
122 * open.
123 */
124 spin_lock(&oi->ip_lock);
125 oi->ip_open_count--;
126 spin_unlock(&oi->ip_lock);
127 }
128
129 file->f_mode |= FMODE_NOWAIT;
130
131leave:
132 return status;
133}
134
135static int ocfs2_file_release(struct inode *inode, struct file *file)
136{
137 struct ocfs2_inode_info *oi = OCFS2_I(inode);
138
139 spin_lock(&oi->ip_lock);
140 if (!--oi->ip_open_count)
141 oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT;
142
143 trace_ocfs2_file_release(inode, file, file->f_path.dentry,
144 oi->ip_blkno,
145 file->f_path.dentry->d_name.len,
146 file->f_path.dentry->d_name.name,
147 oi->ip_open_count);
148 spin_unlock(&oi->ip_lock);
149
150 ocfs2_free_file_private(inode, file);
151
152 return 0;
153}
154
155static int ocfs2_dir_open(struct inode *inode, struct file *file)
156{
157 return ocfs2_init_file_private(inode, file);
158}
159
160static int ocfs2_dir_release(struct inode *inode, struct file *file)
161{
162 ocfs2_free_file_private(inode, file);
163 return 0;
164}
165
166static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end,
167 int datasync)
168{
169 int err = 0;
170 struct inode *inode = file->f_mapping->host;
171 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
172 struct ocfs2_inode_info *oi = OCFS2_I(inode);
173 journal_t *journal = osb->journal->j_journal;
174 int ret;
175 tid_t commit_tid;
176 bool needs_barrier = false;
177
178 trace_ocfs2_sync_file(inode, file, file->f_path.dentry,
179 oi->ip_blkno,
180 file->f_path.dentry->d_name.len,
181 file->f_path.dentry->d_name.name,
182 (unsigned long long)datasync);
183
184 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
185 return -EROFS;
186
187 err = file_write_and_wait_range(file, start, end);
188 if (err)
189 return err;
190
191 commit_tid = datasync ? oi->i_datasync_tid : oi->i_sync_tid;
192 if (journal->j_flags & JBD2_BARRIER &&
193 !jbd2_trans_will_send_data_barrier(journal, commit_tid))
194 needs_barrier = true;
195 err = jbd2_complete_transaction(journal, commit_tid);
196 if (needs_barrier) {
197 ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
198 if (!err)
199 err = ret;
200 }
201
202 if (err)
203 mlog_errno(err);
204
205 return (err < 0) ? -EIO : 0;
206}
207
208int ocfs2_should_update_atime(struct inode *inode,
209 struct vfsmount *vfsmnt)
210{
211 struct timespec64 now;
212 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
213
214 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
215 return 0;
216
217 if ((inode->i_flags & S_NOATIME) ||
218 ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode)))
219 return 0;
220
221 /*
222 * We can be called with no vfsmnt structure - NFSD will
223 * sometimes do this.
224 *
225 * Note that our action here is different than touch_atime() -
226 * if we can't tell whether this is a noatime mount, then we
227 * don't know whether to trust the value of s_atime_quantum.
228 */
229 if (vfsmnt == NULL)
230 return 0;
231
232 if ((vfsmnt->mnt_flags & MNT_NOATIME) ||
233 ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
234 return 0;
235
236 if (vfsmnt->mnt_flags & MNT_RELATIME) {
237 if ((timespec64_compare(&inode->i_atime, &inode->i_mtime) <= 0) ||
238 (timespec64_compare(&inode->i_atime, &inode->i_ctime) <= 0))
239 return 1;
240
241 return 0;
242 }
243
244 now = current_time(inode);
245 if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
246 return 0;
247 else
248 return 1;
249}
250
251int ocfs2_update_inode_atime(struct inode *inode,
252 struct buffer_head *bh)
253{
254 int ret;
255 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
256 handle_t *handle;
257 struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data;
258
259 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
260 if (IS_ERR(handle)) {
261 ret = PTR_ERR(handle);
262 mlog_errno(ret);
263 goto out;
264 }
265
266 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
267 OCFS2_JOURNAL_ACCESS_WRITE);
268 if (ret) {
269 mlog_errno(ret);
270 goto out_commit;
271 }
272
273 /*
274 * Don't use ocfs2_mark_inode_dirty() here as we don't always
275 * have i_mutex to guard against concurrent changes to other
276 * inode fields.
277 */
278 inode->i_atime = current_time(inode);
279 di->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
280 di->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
281 ocfs2_update_inode_fsync_trans(handle, inode, 0);
282 ocfs2_journal_dirty(handle, bh);
283
284out_commit:
285 ocfs2_commit_trans(osb, handle);
286out:
287 return ret;
288}
289
290int ocfs2_set_inode_size(handle_t *handle,
291 struct inode *inode,
292 struct buffer_head *fe_bh,
293 u64 new_i_size)
294{
295 int status;
296
297 i_size_write(inode, new_i_size);
298 inode->i_blocks = ocfs2_inode_sector_count(inode);
299 inode->i_ctime = inode->i_mtime = current_time(inode);
300
301 status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
302 if (status < 0) {
303 mlog_errno(status);
304 goto bail;
305 }
306
307bail:
308 return status;
309}
310
311int ocfs2_simple_size_update(struct inode *inode,
312 struct buffer_head *di_bh,
313 u64 new_i_size)
314{
315 int ret;
316 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
317 handle_t *handle = NULL;
318
319 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
320 if (IS_ERR(handle)) {
321 ret = PTR_ERR(handle);
322 mlog_errno(ret);
323 goto out;
324 }
325
326 ret = ocfs2_set_inode_size(handle, inode, di_bh,
327 new_i_size);
328 if (ret < 0)
329 mlog_errno(ret);
330
331 ocfs2_update_inode_fsync_trans(handle, inode, 0);
332 ocfs2_commit_trans(osb, handle);
333out:
334 return ret;
335}
336
337static int ocfs2_cow_file_pos(struct inode *inode,
338 struct buffer_head *fe_bh,
339 u64 offset)
340{
341 int status;
342 u32 phys, cpos = offset >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
343 unsigned int num_clusters = 0;
344 unsigned int ext_flags = 0;
345
346 /*
347 * If the new offset is aligned to the range of the cluster, there is
348 * no space for ocfs2_zero_range_for_truncate to fill, so no need to
349 * CoW either.
350 */
351 if ((offset & (OCFS2_SB(inode->i_sb)->s_clustersize - 1)) == 0)
352 return 0;
353
354 status = ocfs2_get_clusters(inode, cpos, &phys,
355 &num_clusters, &ext_flags);
356 if (status) {
357 mlog_errno(status);
358 goto out;
359 }
360
361 if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
362 goto out;
363
364 return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1);
365
366out:
367 return status;
368}
369
370static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
371 struct inode *inode,
372 struct buffer_head *fe_bh,
373 u64 new_i_size)
374{
375 int status;
376 handle_t *handle;
377 struct ocfs2_dinode *di;
378 u64 cluster_bytes;
379
380 /*
381 * We need to CoW the cluster contains the offset if it is reflinked
382 * since we will call ocfs2_zero_range_for_truncate later which will
383 * write "0" from offset to the end of the cluster.
384 */
385 status = ocfs2_cow_file_pos(inode, fe_bh, new_i_size);
386 if (status) {
387 mlog_errno(status);
388 return status;
389 }
390
391 /* TODO: This needs to actually orphan the inode in this
392 * transaction. */
393
394 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
395 if (IS_ERR(handle)) {
396 status = PTR_ERR(handle);
397 mlog_errno(status);
398 goto out;
399 }
400
401 status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), fe_bh,
402 OCFS2_JOURNAL_ACCESS_WRITE);
403 if (status < 0) {
404 mlog_errno(status);
405 goto out_commit;
406 }
407
408 /*
409 * Do this before setting i_size.
410 */
411 cluster_bytes = ocfs2_align_bytes_to_clusters(inode->i_sb, new_i_size);
412 status = ocfs2_zero_range_for_truncate(inode, handle, new_i_size,
413 cluster_bytes);
414 if (status) {
415 mlog_errno(status);
416 goto out_commit;
417 }
418
419 i_size_write(inode, new_i_size);
420 inode->i_ctime = inode->i_mtime = current_time(inode);
421
422 di = (struct ocfs2_dinode *) fe_bh->b_data;
423 di->i_size = cpu_to_le64(new_i_size);
424 di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
425 di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
426 ocfs2_update_inode_fsync_trans(handle, inode, 0);
427
428 ocfs2_journal_dirty(handle, fe_bh);
429
430out_commit:
431 ocfs2_commit_trans(osb, handle);
432out:
433 return status;
434}
435
436int ocfs2_truncate_file(struct inode *inode,
437 struct buffer_head *di_bh,
438 u64 new_i_size)
439{
440 int status = 0;
441 struct ocfs2_dinode *fe = NULL;
442 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
443
444 /* We trust di_bh because it comes from ocfs2_inode_lock(), which
445 * already validated it */
446 fe = (struct ocfs2_dinode *) di_bh->b_data;
447
448 trace_ocfs2_truncate_file((unsigned long long)OCFS2_I(inode)->ip_blkno,
449 (unsigned long long)le64_to_cpu(fe->i_size),
450 (unsigned long long)new_i_size);
451
452 mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
453 "Inode %llu, inode i_size = %lld != di "
454 "i_size = %llu, i_flags = 0x%x\n",
455 (unsigned long long)OCFS2_I(inode)->ip_blkno,
456 i_size_read(inode),
457 (unsigned long long)le64_to_cpu(fe->i_size),
458 le32_to_cpu(fe->i_flags));
459
460 if (new_i_size > le64_to_cpu(fe->i_size)) {
461 trace_ocfs2_truncate_file_error(
462 (unsigned long long)le64_to_cpu(fe->i_size),
463 (unsigned long long)new_i_size);
464 status = -EINVAL;
465 mlog_errno(status);
466 goto bail;
467 }
468
469 down_write(&OCFS2_I(inode)->ip_alloc_sem);
470
471 ocfs2_resv_discard(&osb->osb_la_resmap,
472 &OCFS2_I(inode)->ip_la_data_resv);
473
474 /*
475 * The inode lock forced other nodes to sync and drop their
476 * pages, which (correctly) happens even if we have a truncate
477 * without allocation change - ocfs2 cluster sizes can be much
478 * greater than page size, so we have to truncate them
479 * anyway.
480 */
481 unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1);
482 truncate_inode_pages(inode->i_mapping, new_i_size);
483
484 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
485 status = ocfs2_truncate_inline(inode, di_bh, new_i_size,
486 i_size_read(inode), 1);
487 if (status)
488 mlog_errno(status);
489
490 goto bail_unlock_sem;
491 }
492
493 /* alright, we're going to need to do a full blown alloc size
494 * change. Orphan the inode so that recovery can complete the
495 * truncate if necessary. This does the task of marking
496 * i_size. */
497 status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size);
498 if (status < 0) {
499 mlog_errno(status);
500 goto bail_unlock_sem;
501 }
502
503 status = ocfs2_commit_truncate(osb, inode, di_bh);
504 if (status < 0) {
505 mlog_errno(status);
506 goto bail_unlock_sem;
507 }
508
509 /* TODO: orphan dir cleanup here. */
510bail_unlock_sem:
511 up_write(&OCFS2_I(inode)->ip_alloc_sem);
512
513bail:
514 if (!status && OCFS2_I(inode)->ip_clusters == 0)
515 status = ocfs2_try_remove_refcount_tree(inode, di_bh);
516
517 return status;
518}
519
520/*
521 * extend file allocation only here.
522 * we'll update all the disk stuff, and oip->alloc_size
523 *
524 * expect stuff to be locked, a transaction started and enough data /
525 * metadata reservations in the contexts.
526 *
527 * Will return -EAGAIN, and a reason if a restart is needed.
528 * If passed in, *reason will always be set, even in error.
529 */
530int ocfs2_add_inode_data(struct ocfs2_super *osb,
531 struct inode *inode,
532 u32 *logical_offset,
533 u32 clusters_to_add,
534 int mark_unwritten,
535 struct buffer_head *fe_bh,
536 handle_t *handle,
537 struct ocfs2_alloc_context *data_ac,
538 struct ocfs2_alloc_context *meta_ac,
539 enum ocfs2_alloc_restarted *reason_ret)
540{
541 int ret;
542 struct ocfs2_extent_tree et;
543
544 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), fe_bh);
545 ret = ocfs2_add_clusters_in_btree(handle, &et, logical_offset,
546 clusters_to_add, mark_unwritten,
547 data_ac, meta_ac, reason_ret);
548
549 return ret;
550}
551
552static int ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
553 u32 clusters_to_add, int mark_unwritten)
554{
555 int status = 0;
556 int restart_func = 0;
557 int credits;
558 u32 prev_clusters;
559 struct buffer_head *bh = NULL;
560 struct ocfs2_dinode *fe = NULL;
561 handle_t *handle = NULL;
562 struct ocfs2_alloc_context *data_ac = NULL;
563 struct ocfs2_alloc_context *meta_ac = NULL;
564 enum ocfs2_alloc_restarted why = RESTART_NONE;
565 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
566 struct ocfs2_extent_tree et;
567 int did_quota = 0;
568
569 /*
570 * Unwritten extent only exists for file systems which
571 * support holes.
572 */
573 BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb));
574
575 status = ocfs2_read_inode_block(inode, &bh);
576 if (status < 0) {
577 mlog_errno(status);
578 goto leave;
579 }
580 fe = (struct ocfs2_dinode *) bh->b_data;
581
582restart_all:
583 BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
584
585 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), bh);
586 status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0,
587 &data_ac, &meta_ac);
588 if (status) {
589 mlog_errno(status);
590 goto leave;
591 }
592
593 credits = ocfs2_calc_extend_credits(osb->sb, &fe->id2.i_list);
594 handle = ocfs2_start_trans(osb, credits);
595 if (IS_ERR(handle)) {
596 status = PTR_ERR(handle);
597 handle = NULL;
598 mlog_errno(status);
599 goto leave;
600 }
601
602restarted_transaction:
603 trace_ocfs2_extend_allocation(
604 (unsigned long long)OCFS2_I(inode)->ip_blkno,
605 (unsigned long long)i_size_read(inode),
606 le32_to_cpu(fe->i_clusters), clusters_to_add,
607 why, restart_func);
608
609 status = dquot_alloc_space_nodirty(inode,
610 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
611 if (status)
612 goto leave;
613 did_quota = 1;
614
615 /* reserve a write to the file entry early on - that we if we
616 * run out of credits in the allocation path, we can still
617 * update i_size. */
618 status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
619 OCFS2_JOURNAL_ACCESS_WRITE);
620 if (status < 0) {
621 mlog_errno(status);
622 goto leave;
623 }
624
625 prev_clusters = OCFS2_I(inode)->ip_clusters;
626
627 status = ocfs2_add_inode_data(osb,
628 inode,
629 &logical_start,
630 clusters_to_add,
631 mark_unwritten,
632 bh,
633 handle,
634 data_ac,
635 meta_ac,
636 &why);
637 if ((status < 0) && (status != -EAGAIN)) {
638 if (status != -ENOSPC)
639 mlog_errno(status);
640 goto leave;
641 }
642 ocfs2_update_inode_fsync_trans(handle, inode, 1);
643 ocfs2_journal_dirty(handle, bh);
644
645 spin_lock(&OCFS2_I(inode)->ip_lock);
646 clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
647 spin_unlock(&OCFS2_I(inode)->ip_lock);
648 /* Release unused quota reservation */
649 dquot_free_space(inode,
650 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
651 did_quota = 0;
652
653 if (why != RESTART_NONE && clusters_to_add) {
654 if (why == RESTART_META) {
655 restart_func = 1;
656 status = 0;
657 } else {
658 BUG_ON(why != RESTART_TRANS);
659
660 status = ocfs2_allocate_extend_trans(handle, 1);
661 if (status < 0) {
662 /* handle still has to be committed at
663 * this point. */
664 status = -ENOMEM;
665 mlog_errno(status);
666 goto leave;
667 }
668 goto restarted_transaction;
669 }
670 }
671
672 trace_ocfs2_extend_allocation_end(OCFS2_I(inode)->ip_blkno,
673 le32_to_cpu(fe->i_clusters),
674 (unsigned long long)le64_to_cpu(fe->i_size),
675 OCFS2_I(inode)->ip_clusters,
676 (unsigned long long)i_size_read(inode));
677
678leave:
679 if (status < 0 && did_quota)
680 dquot_free_space(inode,
681 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
682 if (handle) {
683 ocfs2_commit_trans(osb, handle);
684 handle = NULL;
685 }
686 if (data_ac) {
687 ocfs2_free_alloc_context(data_ac);
688 data_ac = NULL;
689 }
690 if (meta_ac) {
691 ocfs2_free_alloc_context(meta_ac);
692 meta_ac = NULL;
693 }
694 if ((!status) && restart_func) {
695 restart_func = 0;
696 goto restart_all;
697 }
698 brelse(bh);
699 bh = NULL;
700
701 return status;
702}
703
704/*
705 * While a write will already be ordering the data, a truncate will not.
706 * Thus, we need to explicitly order the zeroed pages.
707 */
708static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode,
David Brazdil0f672f62019-12-10 10:32:29 +0000709 struct buffer_head *di_bh,
710 loff_t start_byte,
711 loff_t length)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000712{
713 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
714 handle_t *handle = NULL;
715 int ret = 0;
716
717 if (!ocfs2_should_order_data(inode))
718 goto out;
719
720 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
721 if (IS_ERR(handle)) {
722 ret = -ENOMEM;
723 mlog_errno(ret);
724 goto out;
725 }
726
David Brazdil0f672f62019-12-10 10:32:29 +0000727 ret = ocfs2_jbd2_inode_add_write(handle, inode, start_byte, length);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000728 if (ret < 0) {
729 mlog_errno(ret);
730 goto out;
731 }
732
733 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
734 OCFS2_JOURNAL_ACCESS_WRITE);
735 if (ret)
736 mlog_errno(ret);
737 ocfs2_update_inode_fsync_trans(handle, inode, 1);
738
739out:
740 if (ret) {
741 if (!IS_ERR(handle))
742 ocfs2_commit_trans(osb, handle);
743 handle = ERR_PTR(ret);
744 }
745 return handle;
746}
747
748/* Some parts of this taken from generic_cont_expand, which turned out
749 * to be too fragile to do exactly what we need without us having to
750 * worry about recursive locking in ->write_begin() and ->write_end(). */
751static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
752 u64 abs_to, struct buffer_head *di_bh)
753{
754 struct address_space *mapping = inode->i_mapping;
755 struct page *page;
756 unsigned long index = abs_from >> PAGE_SHIFT;
757 handle_t *handle;
758 int ret = 0;
759 unsigned zero_from, zero_to, block_start, block_end;
760 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
761
762 BUG_ON(abs_from >= abs_to);
763 BUG_ON(abs_to > (((u64)index + 1) << PAGE_SHIFT));
764 BUG_ON(abs_from & (inode->i_blkbits - 1));
765
David Brazdil0f672f62019-12-10 10:32:29 +0000766 handle = ocfs2_zero_start_ordered_transaction(inode, di_bh,
767 abs_from,
768 abs_to - abs_from);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000769 if (IS_ERR(handle)) {
770 ret = PTR_ERR(handle);
771 goto out;
772 }
773
774 page = find_or_create_page(mapping, index, GFP_NOFS);
775 if (!page) {
776 ret = -ENOMEM;
777 mlog_errno(ret);
778 goto out_commit_trans;
779 }
780
781 /* Get the offsets within the page that we want to zero */
782 zero_from = abs_from & (PAGE_SIZE - 1);
783 zero_to = abs_to & (PAGE_SIZE - 1);
784 if (!zero_to)
785 zero_to = PAGE_SIZE;
786
787 trace_ocfs2_write_zero_page(
788 (unsigned long long)OCFS2_I(inode)->ip_blkno,
789 (unsigned long long)abs_from,
790 (unsigned long long)abs_to,
791 index, zero_from, zero_to);
792
793 /* We know that zero_from is block aligned */
794 for (block_start = zero_from; block_start < zero_to;
795 block_start = block_end) {
796 block_end = block_start + i_blocksize(inode);
797
798 /*
799 * block_start is block-aligned. Bump it by one to force
800 * __block_write_begin and block_commit_write to zero the
801 * whole block.
802 */
803 ret = __block_write_begin(page, block_start + 1, 0,
804 ocfs2_get_block);
805 if (ret < 0) {
806 mlog_errno(ret);
807 goto out_unlock;
808 }
809
810
811 /* must not update i_size! */
812 ret = block_commit_write(page, block_start + 1,
813 block_start + 1);
814 if (ret < 0)
815 mlog_errno(ret);
816 else
817 ret = 0;
818 }
819
820 /*
821 * fs-writeback will release the dirty pages without page lock
822 * whose offset are over inode size, the release happens at
823 * block_write_full_page().
824 */
825 i_size_write(inode, abs_to);
826 inode->i_blocks = ocfs2_inode_sector_count(inode);
827 di->i_size = cpu_to_le64((u64)i_size_read(inode));
828 inode->i_mtime = inode->i_ctime = current_time(inode);
829 di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
830 di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
831 di->i_mtime_nsec = di->i_ctime_nsec;
832 if (handle) {
833 ocfs2_journal_dirty(handle, di_bh);
834 ocfs2_update_inode_fsync_trans(handle, inode, 1);
835 }
836
837out_unlock:
838 unlock_page(page);
839 put_page(page);
840out_commit_trans:
841 if (handle)
842 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
843out:
844 return ret;
845}
846
847/*
848 * Find the next range to zero. We do this in terms of bytes because
849 * that's what ocfs2_zero_extend() wants, and it is dealing with the
850 * pagecache. We may return multiple extents.
851 *
852 * zero_start and zero_end are ocfs2_zero_extend()s current idea of what
853 * needs to be zeroed. range_start and range_end return the next zeroing
854 * range. A subsequent call should pass the previous range_end as its
855 * zero_start. If range_end is 0, there's nothing to do.
856 *
857 * Unwritten extents are skipped over. Refcounted extents are CoWd.
858 */
859static int ocfs2_zero_extend_get_range(struct inode *inode,
860 struct buffer_head *di_bh,
861 u64 zero_start, u64 zero_end,
862 u64 *range_start, u64 *range_end)
863{
864 int rc = 0, needs_cow = 0;
865 u32 p_cpos, zero_clusters = 0;
866 u32 zero_cpos =
867 zero_start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
868 u32 last_cpos = ocfs2_clusters_for_bytes(inode->i_sb, zero_end);
869 unsigned int num_clusters = 0;
870 unsigned int ext_flags = 0;
871
872 while (zero_cpos < last_cpos) {
873 rc = ocfs2_get_clusters(inode, zero_cpos, &p_cpos,
874 &num_clusters, &ext_flags);
875 if (rc) {
876 mlog_errno(rc);
877 goto out;
878 }
879
880 if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
881 zero_clusters = num_clusters;
882 if (ext_flags & OCFS2_EXT_REFCOUNTED)
883 needs_cow = 1;
884 break;
885 }
886
887 zero_cpos += num_clusters;
888 }
889 if (!zero_clusters) {
890 *range_end = 0;
891 goto out;
892 }
893
894 while ((zero_cpos + zero_clusters) < last_cpos) {
895 rc = ocfs2_get_clusters(inode, zero_cpos + zero_clusters,
896 &p_cpos, &num_clusters,
897 &ext_flags);
898 if (rc) {
899 mlog_errno(rc);
900 goto out;
901 }
902
903 if (!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN))
904 break;
905 if (ext_flags & OCFS2_EXT_REFCOUNTED)
906 needs_cow = 1;
907 zero_clusters += num_clusters;
908 }
909 if ((zero_cpos + zero_clusters) > last_cpos)
910 zero_clusters = last_cpos - zero_cpos;
911
912 if (needs_cow) {
913 rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos,
914 zero_clusters, UINT_MAX);
915 if (rc) {
916 mlog_errno(rc);
917 goto out;
918 }
919 }
920
921 *range_start = ocfs2_clusters_to_bytes(inode->i_sb, zero_cpos);
922 *range_end = ocfs2_clusters_to_bytes(inode->i_sb,
923 zero_cpos + zero_clusters);
924
925out:
926 return rc;
927}
928
929/*
930 * Zero one range returned from ocfs2_zero_extend_get_range(). The caller
931 * has made sure that the entire range needs zeroing.
932 */
933static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
934 u64 range_end, struct buffer_head *di_bh)
935{
936 int rc = 0;
937 u64 next_pos;
938 u64 zero_pos = range_start;
939
940 trace_ocfs2_zero_extend_range(
941 (unsigned long long)OCFS2_I(inode)->ip_blkno,
942 (unsigned long long)range_start,
943 (unsigned long long)range_end);
944 BUG_ON(range_start >= range_end);
945
946 while (zero_pos < range_end) {
947 next_pos = (zero_pos & PAGE_MASK) + PAGE_SIZE;
948 if (next_pos > range_end)
949 next_pos = range_end;
950 rc = ocfs2_write_zero_page(inode, zero_pos, next_pos, di_bh);
951 if (rc < 0) {
952 mlog_errno(rc);
953 break;
954 }
955 zero_pos = next_pos;
956
957 /*
958 * Very large extends have the potential to lock up
959 * the cpu for extended periods of time.
960 */
961 cond_resched();
962 }
963
964 return rc;
965}
966
967int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
968 loff_t zero_to_size)
969{
970 int ret = 0;
971 u64 zero_start, range_start = 0, range_end = 0;
972 struct super_block *sb = inode->i_sb;
973
974 zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
975 trace_ocfs2_zero_extend((unsigned long long)OCFS2_I(inode)->ip_blkno,
976 (unsigned long long)zero_start,
977 (unsigned long long)i_size_read(inode));
978 while (zero_start < zero_to_size) {
979 ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start,
980 zero_to_size,
981 &range_start,
982 &range_end);
983 if (ret) {
984 mlog_errno(ret);
985 break;
986 }
987 if (!range_end)
988 break;
989 /* Trim the ends */
990 if (range_start < zero_start)
991 range_start = zero_start;
992 if (range_end > zero_to_size)
993 range_end = zero_to_size;
994
995 ret = ocfs2_zero_extend_range(inode, range_start,
996 range_end, di_bh);
997 if (ret) {
998 mlog_errno(ret);
999 break;
1000 }
1001 zero_start = range_end;
1002 }
1003
1004 return ret;
1005}
1006
1007int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh,
1008 u64 new_i_size, u64 zero_to)
1009{
1010 int ret;
1011 u32 clusters_to_add;
1012 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1013
1014 /*
1015 * Only quota files call this without a bh, and they can't be
1016 * refcounted.
1017 */
1018 BUG_ON(!di_bh && ocfs2_is_refcount_inode(inode));
1019 BUG_ON(!di_bh && !(oi->ip_flags & OCFS2_INODE_SYSTEM_FILE));
1020
1021 clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size);
1022 if (clusters_to_add < oi->ip_clusters)
1023 clusters_to_add = 0;
1024 else
1025 clusters_to_add -= oi->ip_clusters;
1026
1027 if (clusters_to_add) {
1028 ret = ocfs2_extend_allocation(inode, oi->ip_clusters,
1029 clusters_to_add, 0);
1030 if (ret) {
1031 mlog_errno(ret);
1032 goto out;
1033 }
1034 }
1035
1036 /*
1037 * Call this even if we don't add any clusters to the tree. We
1038 * still need to zero the area between the old i_size and the
1039 * new i_size.
1040 */
1041 ret = ocfs2_zero_extend(inode, di_bh, zero_to);
1042 if (ret < 0)
1043 mlog_errno(ret);
1044
1045out:
1046 return ret;
1047}
1048
1049static int ocfs2_extend_file(struct inode *inode,
1050 struct buffer_head *di_bh,
1051 u64 new_i_size)
1052{
1053 int ret = 0;
1054 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1055
1056 BUG_ON(!di_bh);
1057
1058 /* setattr sometimes calls us like this. */
1059 if (new_i_size == 0)
1060 goto out;
1061
1062 if (i_size_read(inode) == new_i_size)
1063 goto out;
1064 BUG_ON(new_i_size < i_size_read(inode));
1065
1066 /*
1067 * The alloc sem blocks people in read/write from reading our
1068 * allocation until we're done changing it. We depend on
1069 * i_mutex to block other extend/truncate calls while we're
1070 * here. We even have to hold it for sparse files because there
1071 * might be some tail zeroing.
1072 */
1073 down_write(&oi->ip_alloc_sem);
1074
1075 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1076 /*
1077 * We can optimize small extends by keeping the inodes
1078 * inline data.
1079 */
1080 if (ocfs2_size_fits_inline_data(di_bh, new_i_size)) {
1081 up_write(&oi->ip_alloc_sem);
1082 goto out_update_size;
1083 }
1084
1085 ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1086 if (ret) {
1087 up_write(&oi->ip_alloc_sem);
1088 mlog_errno(ret);
1089 goto out;
1090 }
1091 }
1092
1093 if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
1094 ret = ocfs2_zero_extend(inode, di_bh, new_i_size);
1095 else
1096 ret = ocfs2_extend_no_holes(inode, di_bh, new_i_size,
1097 new_i_size);
1098
1099 up_write(&oi->ip_alloc_sem);
1100
1101 if (ret < 0) {
1102 mlog_errno(ret);
1103 goto out;
1104 }
1105
1106out_update_size:
1107 ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
1108 if (ret < 0)
1109 mlog_errno(ret);
1110
1111out:
1112 return ret;
1113}
1114
1115int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
1116{
1117 int status = 0, size_change;
1118 int inode_locked = 0;
1119 struct inode *inode = d_inode(dentry);
1120 struct super_block *sb = inode->i_sb;
1121 struct ocfs2_super *osb = OCFS2_SB(sb);
1122 struct buffer_head *bh = NULL;
1123 handle_t *handle = NULL;
1124 struct dquot *transfer_to[MAXQUOTAS] = { };
1125 int qtype;
1126 int had_lock;
1127 struct ocfs2_lock_holder oh;
1128
1129 trace_ocfs2_setattr(inode, dentry,
1130 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1131 dentry->d_name.len, dentry->d_name.name,
1132 attr->ia_valid, attr->ia_mode,
1133 from_kuid(&init_user_ns, attr->ia_uid),
1134 from_kgid(&init_user_ns, attr->ia_gid));
1135
1136 /* ensuring we don't even attempt to truncate a symlink */
1137 if (S_ISLNK(inode->i_mode))
1138 attr->ia_valid &= ~ATTR_SIZE;
1139
1140#define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
1141 | ATTR_GID | ATTR_UID | ATTR_MODE)
1142 if (!(attr->ia_valid & OCFS2_VALID_ATTRS))
1143 return 0;
1144
1145 status = setattr_prepare(dentry, attr);
1146 if (status)
1147 return status;
1148
1149 if (is_quota_modification(inode, attr)) {
1150 status = dquot_initialize(inode);
1151 if (status)
1152 return status;
1153 }
1154 size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
1155 if (size_change) {
1156 /*
1157 * Here we should wait dio to finish before inode lock
1158 * to avoid a deadlock between ocfs2_setattr() and
1159 * ocfs2_dio_end_io_write()
1160 */
1161 inode_dio_wait(inode);
1162
1163 status = ocfs2_rw_lock(inode, 1);
1164 if (status < 0) {
1165 mlog_errno(status);
1166 goto bail;
1167 }
1168 }
1169
1170 had_lock = ocfs2_inode_lock_tracker(inode, &bh, 1, &oh);
1171 if (had_lock < 0) {
1172 status = had_lock;
1173 goto bail_unlock_rw;
1174 } else if (had_lock) {
1175 /*
1176 * As far as we know, ocfs2_setattr() could only be the first
1177 * VFS entry point in the call chain of recursive cluster
1178 * locking issue.
1179 *
1180 * For instance:
1181 * chmod_common()
1182 * notify_change()
1183 * ocfs2_setattr()
1184 * posix_acl_chmod()
1185 * ocfs2_iop_get_acl()
1186 *
1187 * But, we're not 100% sure if it's always true, because the
1188 * ordering of the VFS entry points in the call chain is out
1189 * of our control. So, we'd better dump the stack here to
1190 * catch the other cases of recursive locking.
1191 */
1192 mlog(ML_ERROR, "Another case of recursive locking:\n");
1193 dump_stack();
1194 }
1195 inode_locked = 1;
1196
1197 if (size_change) {
1198 status = inode_newsize_ok(inode, attr->ia_size);
1199 if (status)
1200 goto bail_unlock;
1201
1202 if (i_size_read(inode) >= attr->ia_size) {
1203 if (ocfs2_should_order_data(inode)) {
1204 status = ocfs2_begin_ordered_truncate(inode,
1205 attr->ia_size);
1206 if (status)
1207 goto bail_unlock;
1208 }
1209 status = ocfs2_truncate_file(inode, bh, attr->ia_size);
1210 } else
1211 status = ocfs2_extend_file(inode, bh, attr->ia_size);
1212 if (status < 0) {
1213 if (status != -ENOSPC)
1214 mlog_errno(status);
1215 status = -ENOSPC;
1216 goto bail_unlock;
1217 }
1218 }
1219
1220 if ((attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
1221 (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
1222 /*
1223 * Gather pointers to quota structures so that allocation /
1224 * freeing of quota structures happens here and not inside
1225 * dquot_transfer() where we have problems with lock ordering
1226 */
1227 if (attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)
1228 && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1229 OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
1230 transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid));
1231 if (IS_ERR(transfer_to[USRQUOTA])) {
1232 status = PTR_ERR(transfer_to[USRQUOTA]);
David Brazdil0f672f62019-12-10 10:32:29 +00001233 transfer_to[USRQUOTA] = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001234 goto bail_unlock;
1235 }
1236 }
1237 if (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid)
1238 && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1239 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
1240 transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid));
1241 if (IS_ERR(transfer_to[GRPQUOTA])) {
1242 status = PTR_ERR(transfer_to[GRPQUOTA]);
David Brazdil0f672f62019-12-10 10:32:29 +00001243 transfer_to[GRPQUOTA] = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001244 goto bail_unlock;
1245 }
1246 }
Olivier Deprez0e641232021-09-23 10:07:05 +02001247 down_write(&OCFS2_I(inode)->ip_alloc_sem);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001248 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS +
1249 2 * ocfs2_quota_trans_credits(sb));
1250 if (IS_ERR(handle)) {
1251 status = PTR_ERR(handle);
1252 mlog_errno(status);
Olivier Deprez0e641232021-09-23 10:07:05 +02001253 goto bail_unlock_alloc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001254 }
1255 status = __dquot_transfer(inode, transfer_to);
1256 if (status < 0)
1257 goto bail_commit;
1258 } else {
Olivier Deprez0e641232021-09-23 10:07:05 +02001259 down_write(&OCFS2_I(inode)->ip_alloc_sem);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001260 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1261 if (IS_ERR(handle)) {
1262 status = PTR_ERR(handle);
1263 mlog_errno(status);
Olivier Deprez0e641232021-09-23 10:07:05 +02001264 goto bail_unlock_alloc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001265 }
1266 }
1267
1268 setattr_copy(inode, attr);
1269 mark_inode_dirty(inode);
1270
1271 status = ocfs2_mark_inode_dirty(handle, inode, bh);
1272 if (status < 0)
1273 mlog_errno(status);
1274
1275bail_commit:
1276 ocfs2_commit_trans(osb, handle);
Olivier Deprez0e641232021-09-23 10:07:05 +02001277bail_unlock_alloc:
1278 up_write(&OCFS2_I(inode)->ip_alloc_sem);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001279bail_unlock:
1280 if (status && inode_locked) {
1281 ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
1282 inode_locked = 0;
1283 }
1284bail_unlock_rw:
1285 if (size_change)
1286 ocfs2_rw_unlock(inode, 1);
1287bail:
1288
1289 /* Release quota pointers in case we acquired them */
1290 for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++)
1291 dqput(transfer_to[qtype]);
1292
1293 if (!status && attr->ia_valid & ATTR_MODE) {
1294 status = ocfs2_acl_chmod(inode, bh);
1295 if (status < 0)
1296 mlog_errno(status);
1297 }
1298 if (inode_locked)
1299 ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
1300
1301 brelse(bh);
1302 return status;
1303}
1304
1305int ocfs2_getattr(const struct path *path, struct kstat *stat,
1306 u32 request_mask, unsigned int flags)
1307{
1308 struct inode *inode = d_inode(path->dentry);
1309 struct super_block *sb = path->dentry->d_sb;
1310 struct ocfs2_super *osb = sb->s_fs_info;
1311 int err;
1312
1313 err = ocfs2_inode_revalidate(path->dentry);
1314 if (err) {
1315 if (err != -ENOENT)
1316 mlog_errno(err);
1317 goto bail;
1318 }
1319
1320 generic_fillattr(inode, stat);
1321 /*
1322 * If there is inline data in the inode, the inode will normally not
1323 * have data blocks allocated (it may have an external xattr block).
1324 * Report at least one sector for such files, so tools like tar, rsync,
1325 * others don't incorrectly think the file is completely sparse.
1326 */
1327 if (unlikely(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
1328 stat->blocks += (stat->size + 511)>>9;
1329
1330 /* We set the blksize from the cluster size for performance */
1331 stat->blksize = osb->s_clustersize;
1332
1333bail:
1334 return err;
1335}
1336
1337int ocfs2_permission(struct inode *inode, int mask)
1338{
1339 int ret, had_lock;
1340 struct ocfs2_lock_holder oh;
1341
1342 if (mask & MAY_NOT_BLOCK)
1343 return -ECHILD;
1344
1345 had_lock = ocfs2_inode_lock_tracker(inode, NULL, 0, &oh);
1346 if (had_lock < 0) {
1347 ret = had_lock;
1348 goto out;
1349 } else if (had_lock) {
1350 /* See comments in ocfs2_setattr() for details.
1351 * The call chain of this case could be:
1352 * do_sys_open()
1353 * may_open()
1354 * inode_permission()
1355 * ocfs2_permission()
1356 * ocfs2_iop_get_acl()
1357 */
1358 mlog(ML_ERROR, "Another case of recursive locking:\n");
1359 dump_stack();
1360 }
1361
1362 ret = generic_permission(inode, mask);
1363
1364 ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock);
1365out:
1366 return ret;
1367}
1368
1369static int __ocfs2_write_remove_suid(struct inode *inode,
1370 struct buffer_head *bh)
1371{
1372 int ret;
1373 handle_t *handle;
1374 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1375 struct ocfs2_dinode *di;
1376
1377 trace_ocfs2_write_remove_suid(
1378 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1379 inode->i_mode);
1380
1381 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1382 if (IS_ERR(handle)) {
1383 ret = PTR_ERR(handle);
1384 mlog_errno(ret);
1385 goto out;
1386 }
1387
1388 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
1389 OCFS2_JOURNAL_ACCESS_WRITE);
1390 if (ret < 0) {
1391 mlog_errno(ret);
1392 goto out_trans;
1393 }
1394
1395 inode->i_mode &= ~S_ISUID;
1396 if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP))
1397 inode->i_mode &= ~S_ISGID;
1398
1399 di = (struct ocfs2_dinode *) bh->b_data;
1400 di->i_mode = cpu_to_le16(inode->i_mode);
1401 ocfs2_update_inode_fsync_trans(handle, inode, 0);
1402
1403 ocfs2_journal_dirty(handle, bh);
1404
1405out_trans:
1406 ocfs2_commit_trans(osb, handle);
1407out:
1408 return ret;
1409}
1410
1411static int ocfs2_write_remove_suid(struct inode *inode)
1412{
1413 int ret;
1414 struct buffer_head *bh = NULL;
1415
1416 ret = ocfs2_read_inode_block(inode, &bh);
1417 if (ret < 0) {
1418 mlog_errno(ret);
1419 goto out;
1420 }
1421
1422 ret = __ocfs2_write_remove_suid(inode, bh);
1423out:
1424 brelse(bh);
1425 return ret;
1426}
1427
1428/*
1429 * Allocate enough extents to cover the region starting at byte offset
1430 * start for len bytes. Existing extents are skipped, any extents
1431 * added are marked as "unwritten".
1432 */
1433static int ocfs2_allocate_unwritten_extents(struct inode *inode,
1434 u64 start, u64 len)
1435{
1436 int ret;
1437 u32 cpos, phys_cpos, clusters, alloc_size;
1438 u64 end = start + len;
1439 struct buffer_head *di_bh = NULL;
1440
1441 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1442 ret = ocfs2_read_inode_block(inode, &di_bh);
1443 if (ret) {
1444 mlog_errno(ret);
1445 goto out;
1446 }
1447
1448 /*
1449 * Nothing to do if the requested reservation range
1450 * fits within the inode.
1451 */
1452 if (ocfs2_size_fits_inline_data(di_bh, end))
1453 goto out;
1454
1455 ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1456 if (ret) {
1457 mlog_errno(ret);
1458 goto out;
1459 }
1460 }
1461
1462 /*
1463 * We consider both start and len to be inclusive.
1464 */
1465 cpos = start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
1466 clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len);
1467 clusters -= cpos;
1468
1469 while (clusters) {
1470 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
1471 &alloc_size, NULL);
1472 if (ret) {
1473 mlog_errno(ret);
1474 goto out;
1475 }
1476
1477 /*
1478 * Hole or existing extent len can be arbitrary, so
1479 * cap it to our own allocation request.
1480 */
1481 if (alloc_size > clusters)
1482 alloc_size = clusters;
1483
1484 if (phys_cpos) {
1485 /*
1486 * We already have an allocation at this
1487 * region so we can safely skip it.
1488 */
1489 goto next;
1490 }
1491
1492 ret = ocfs2_extend_allocation(inode, cpos, alloc_size, 1);
1493 if (ret) {
1494 if (ret != -ENOSPC)
1495 mlog_errno(ret);
1496 goto out;
1497 }
1498
1499next:
1500 cpos += alloc_size;
1501 clusters -= alloc_size;
1502 }
1503
1504 ret = 0;
1505out:
1506
1507 brelse(di_bh);
1508 return ret;
1509}
1510
1511/*
1512 * Truncate a byte range, avoiding pages within partial clusters. This
1513 * preserves those pages for the zeroing code to write to.
1514 */
1515static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
1516 u64 byte_len)
1517{
1518 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1519 loff_t start, end;
1520 struct address_space *mapping = inode->i_mapping;
1521
1522 start = (loff_t)ocfs2_align_bytes_to_clusters(inode->i_sb, byte_start);
1523 end = byte_start + byte_len;
1524 end = end & ~(osb->s_clustersize - 1);
1525
1526 if (start < end) {
1527 unmap_mapping_range(mapping, start, end - start, 0);
1528 truncate_inode_pages_range(mapping, start, end - 1);
1529 }
1530}
1531
Olivier Deprez0e641232021-09-23 10:07:05 +02001532/*
1533 * zero out partial blocks of one cluster.
1534 *
1535 * start: file offset where zero starts, will be made upper block aligned.
1536 * len: it will be trimmed to the end of current cluster if "start + len"
1537 * is bigger than it.
1538 */
1539static int ocfs2_zeroout_partial_cluster(struct inode *inode,
1540 u64 start, u64 len)
1541{
1542 int ret;
1543 u64 start_block, end_block, nr_blocks;
1544 u64 p_block, offset;
1545 u32 cluster, p_cluster, nr_clusters;
1546 struct super_block *sb = inode->i_sb;
1547 u64 end = ocfs2_align_bytes_to_clusters(sb, start);
1548
1549 if (start + len < end)
1550 end = start + len;
1551
1552 start_block = ocfs2_blocks_for_bytes(sb, start);
1553 end_block = ocfs2_blocks_for_bytes(sb, end);
1554 nr_blocks = end_block - start_block;
1555 if (!nr_blocks)
1556 return 0;
1557
1558 cluster = ocfs2_bytes_to_clusters(sb, start);
1559 ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
1560 &nr_clusters, NULL);
1561 if (ret)
1562 return ret;
1563 if (!p_cluster)
1564 return 0;
1565
1566 offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
1567 p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
1568 return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
1569}
1570
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001571static int ocfs2_zero_partial_clusters(struct inode *inode,
1572 u64 start, u64 len)
1573{
1574 int ret = 0;
1575 u64 tmpend = 0;
1576 u64 end = start + len;
1577 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1578 unsigned int csize = osb->s_clustersize;
1579 handle_t *handle;
Olivier Deprez0e641232021-09-23 10:07:05 +02001580 loff_t isize = i_size_read(inode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001581
1582 /*
1583 * The "start" and "end" values are NOT necessarily part of
1584 * the range whose allocation is being deleted. Rather, this
1585 * is what the user passed in with the request. We must zero
1586 * partial clusters here. There's no need to worry about
1587 * physical allocation - the zeroing code knows to skip holes.
1588 */
1589 trace_ocfs2_zero_partial_clusters(
1590 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1591 (unsigned long long)start, (unsigned long long)end);
1592
1593 /*
1594 * If both edges are on a cluster boundary then there's no
1595 * zeroing required as the region is part of the allocation to
1596 * be truncated.
1597 */
1598 if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
1599 goto out;
1600
Olivier Deprez0e641232021-09-23 10:07:05 +02001601 /* No page cache for EOF blocks, issue zero out to disk. */
1602 if (end > isize) {
1603 /*
1604 * zeroout eof blocks in last cluster starting from
1605 * "isize" even "start" > "isize" because it is
1606 * complicated to zeroout just at "start" as "start"
1607 * may be not aligned with block size, buffer write
1608 * would be required to do that, but out of eof buffer
1609 * write is not supported.
1610 */
1611 ret = ocfs2_zeroout_partial_cluster(inode, isize,
1612 end - isize);
1613 if (ret) {
1614 mlog_errno(ret);
1615 goto out;
1616 }
1617 if (start >= isize)
1618 goto out;
1619 end = isize;
1620 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001621 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1622 if (IS_ERR(handle)) {
1623 ret = PTR_ERR(handle);
1624 mlog_errno(ret);
1625 goto out;
1626 }
1627
1628 /*
1629 * If start is on a cluster boundary and end is somewhere in another
1630 * cluster, we have not COWed the cluster starting at start, unless
1631 * end is also within the same cluster. So, in this case, we skip this
1632 * first call to ocfs2_zero_range_for_truncate() truncate and move on
1633 * to the next one.
1634 */
1635 if ((start & (csize - 1)) != 0) {
1636 /*
1637 * We want to get the byte offset of the end of the 1st
1638 * cluster.
1639 */
1640 tmpend = (u64)osb->s_clustersize +
1641 (start & ~(osb->s_clustersize - 1));
1642 if (tmpend > end)
1643 tmpend = end;
1644
1645 trace_ocfs2_zero_partial_clusters_range1(
1646 (unsigned long long)start,
1647 (unsigned long long)tmpend);
1648
1649 ret = ocfs2_zero_range_for_truncate(inode, handle, start,
1650 tmpend);
1651 if (ret)
1652 mlog_errno(ret);
1653 }
1654
1655 if (tmpend < end) {
1656 /*
1657 * This may make start and end equal, but the zeroing
1658 * code will skip any work in that case so there's no
1659 * need to catch it up here.
1660 */
1661 start = end & ~(osb->s_clustersize - 1);
1662
1663 trace_ocfs2_zero_partial_clusters_range2(
1664 (unsigned long long)start, (unsigned long long)end);
1665
1666 ret = ocfs2_zero_range_for_truncate(inode, handle, start, end);
1667 if (ret)
1668 mlog_errno(ret);
1669 }
1670 ocfs2_update_inode_fsync_trans(handle, inode, 1);
1671
1672 ocfs2_commit_trans(osb, handle);
1673out:
1674 return ret;
1675}
1676
1677static int ocfs2_find_rec(struct ocfs2_extent_list *el, u32 pos)
1678{
1679 int i;
1680 struct ocfs2_extent_rec *rec = NULL;
1681
1682 for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
1683
1684 rec = &el->l_recs[i];
1685
1686 if (le32_to_cpu(rec->e_cpos) < pos)
1687 break;
1688 }
1689
1690 return i;
1691}
1692
1693/*
1694 * Helper to calculate the punching pos and length in one run, we handle the
1695 * following three cases in order:
1696 *
1697 * - remove the entire record
1698 * - remove a partial record
1699 * - no record needs to be removed (hole-punching completed)
1700*/
1701static void ocfs2_calc_trunc_pos(struct inode *inode,
1702 struct ocfs2_extent_list *el,
1703 struct ocfs2_extent_rec *rec,
1704 u32 trunc_start, u32 *trunc_cpos,
1705 u32 *trunc_len, u32 *trunc_end,
1706 u64 *blkno, int *done)
1707{
1708 int ret = 0;
1709 u32 coff, range;
1710
1711 range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
1712
1713 if (le32_to_cpu(rec->e_cpos) >= trunc_start) {
1714 /*
1715 * remove an entire extent record.
1716 */
1717 *trunc_cpos = le32_to_cpu(rec->e_cpos);
1718 /*
1719 * Skip holes if any.
1720 */
1721 if (range < *trunc_end)
1722 *trunc_end = range;
1723 *trunc_len = *trunc_end - le32_to_cpu(rec->e_cpos);
1724 *blkno = le64_to_cpu(rec->e_blkno);
1725 *trunc_end = le32_to_cpu(rec->e_cpos);
1726 } else if (range > trunc_start) {
1727 /*
1728 * remove a partial extent record, which means we're
1729 * removing the last extent record.
1730 */
1731 *trunc_cpos = trunc_start;
1732 /*
1733 * skip hole if any.
1734 */
1735 if (range < *trunc_end)
1736 *trunc_end = range;
1737 *trunc_len = *trunc_end - trunc_start;
1738 coff = trunc_start - le32_to_cpu(rec->e_cpos);
1739 *blkno = le64_to_cpu(rec->e_blkno) +
1740 ocfs2_clusters_to_blocks(inode->i_sb, coff);
1741 *trunc_end = trunc_start;
1742 } else {
1743 /*
1744 * It may have two following possibilities:
1745 *
1746 * - last record has been removed
1747 * - trunc_start was within a hole
1748 *
1749 * both two cases mean the completion of hole punching.
1750 */
1751 ret = 1;
1752 }
1753
1754 *done = ret;
1755}
1756
1757int ocfs2_remove_inode_range(struct inode *inode,
1758 struct buffer_head *di_bh, u64 byte_start,
1759 u64 byte_len)
1760{
1761 int ret = 0, flags = 0, done = 0, i;
1762 u32 trunc_start, trunc_len, trunc_end, trunc_cpos, phys_cpos;
1763 u32 cluster_in_el;
1764 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1765 struct ocfs2_cached_dealloc_ctxt dealloc;
1766 struct address_space *mapping = inode->i_mapping;
1767 struct ocfs2_extent_tree et;
1768 struct ocfs2_path *path = NULL;
1769 struct ocfs2_extent_list *el = NULL;
1770 struct ocfs2_extent_rec *rec = NULL;
1771 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
1772 u64 blkno, refcount_loc = le64_to_cpu(di->i_refcount_loc);
1773
1774 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
1775 ocfs2_init_dealloc_ctxt(&dealloc);
1776
1777 trace_ocfs2_remove_inode_range(
1778 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1779 (unsigned long long)byte_start,
1780 (unsigned long long)byte_len);
1781
1782 if (byte_len == 0)
1783 return 0;
1784
1785 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1786 ret = ocfs2_truncate_inline(inode, di_bh, byte_start,
1787 byte_start + byte_len, 0);
1788 if (ret) {
1789 mlog_errno(ret);
1790 goto out;
1791 }
1792 /*
1793 * There's no need to get fancy with the page cache
1794 * truncate of an inline-data inode. We're talking
1795 * about less than a page here, which will be cached
1796 * in the dinode buffer anyway.
1797 */
1798 unmap_mapping_range(mapping, 0, 0, 0);
1799 truncate_inode_pages(mapping, 0);
1800 goto out;
1801 }
1802
1803 /*
1804 * For reflinks, we may need to CoW 2 clusters which might be
1805 * partially zero'd later, if hole's start and end offset were
1806 * within one cluster(means is not exactly aligned to clustersize).
1807 */
1808
1809 if (ocfs2_is_refcount_inode(inode)) {
1810 ret = ocfs2_cow_file_pos(inode, di_bh, byte_start);
1811 if (ret) {
1812 mlog_errno(ret);
1813 goto out;
1814 }
1815
1816 ret = ocfs2_cow_file_pos(inode, di_bh, byte_start + byte_len);
1817 if (ret) {
1818 mlog_errno(ret);
1819 goto out;
1820 }
1821 }
1822
1823 trunc_start = ocfs2_clusters_for_bytes(osb->sb, byte_start);
1824 trunc_end = (byte_start + byte_len) >> osb->s_clustersize_bits;
1825 cluster_in_el = trunc_end;
1826
1827 ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len);
1828 if (ret) {
1829 mlog_errno(ret);
1830 goto out;
1831 }
1832
1833 path = ocfs2_new_path_from_et(&et);
1834 if (!path) {
1835 ret = -ENOMEM;
1836 mlog_errno(ret);
1837 goto out;
1838 }
1839
1840 while (trunc_end > trunc_start) {
1841
1842 ret = ocfs2_find_path(INODE_CACHE(inode), path,
1843 cluster_in_el);
1844 if (ret) {
1845 mlog_errno(ret);
1846 goto out;
1847 }
1848
1849 el = path_leaf_el(path);
1850
1851 i = ocfs2_find_rec(el, trunc_end);
1852 /*
1853 * Need to go to previous extent block.
1854 */
1855 if (i < 0) {
1856 if (path->p_tree_depth == 0)
1857 break;
1858
1859 ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb,
1860 path,
1861 &cluster_in_el);
1862 if (ret) {
1863 mlog_errno(ret);
1864 goto out;
1865 }
1866
1867 /*
1868 * We've reached the leftmost extent block,
1869 * it's safe to leave.
1870 */
1871 if (cluster_in_el == 0)
1872 break;
1873
1874 /*
1875 * The 'pos' searched for previous extent block is
1876 * always one cluster less than actual trunc_end.
1877 */
1878 trunc_end = cluster_in_el + 1;
1879
1880 ocfs2_reinit_path(path, 1);
1881
1882 continue;
1883
1884 } else
1885 rec = &el->l_recs[i];
1886
1887 ocfs2_calc_trunc_pos(inode, el, rec, trunc_start, &trunc_cpos,
1888 &trunc_len, &trunc_end, &blkno, &done);
1889 if (done)
1890 break;
1891
1892 flags = rec->e_flags;
1893 phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
1894
1895 ret = ocfs2_remove_btree_range(inode, &et, trunc_cpos,
1896 phys_cpos, trunc_len, flags,
1897 &dealloc, refcount_loc, false);
1898 if (ret < 0) {
1899 mlog_errno(ret);
1900 goto out;
1901 }
1902
1903 cluster_in_el = trunc_end;
1904
1905 ocfs2_reinit_path(path, 1);
1906 }
1907
1908 ocfs2_truncate_cluster_pages(inode, byte_start, byte_len);
1909
1910out:
1911 ocfs2_free_path(path);
1912 ocfs2_schedule_truncate_log_flush(osb, 1);
1913 ocfs2_run_deallocs(osb, &dealloc);
1914
1915 return ret;
1916}
1917
1918/*
1919 * Parts of this function taken from xfs_change_file_space()
1920 */
1921static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
1922 loff_t f_pos, unsigned int cmd,
1923 struct ocfs2_space_resv *sr,
1924 int change_size)
1925{
1926 int ret;
1927 s64 llen;
Olivier Deprez0e641232021-09-23 10:07:05 +02001928 loff_t size, orig_isize;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001929 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1930 struct buffer_head *di_bh = NULL;
1931 handle_t *handle;
1932 unsigned long long max_off = inode->i_sb->s_maxbytes;
1933
1934 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
1935 return -EROFS;
1936
1937 inode_lock(inode);
1938
1939 /*
1940 * This prevents concurrent writes on other nodes
1941 */
1942 ret = ocfs2_rw_lock(inode, 1);
1943 if (ret) {
1944 mlog_errno(ret);
1945 goto out;
1946 }
1947
1948 ret = ocfs2_inode_lock(inode, &di_bh, 1);
1949 if (ret) {
1950 mlog_errno(ret);
1951 goto out_rw_unlock;
1952 }
1953
1954 if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
1955 ret = -EPERM;
1956 goto out_inode_unlock;
1957 }
1958
1959 switch (sr->l_whence) {
1960 case 0: /*SEEK_SET*/
1961 break;
1962 case 1: /*SEEK_CUR*/
1963 sr->l_start += f_pos;
1964 break;
1965 case 2: /*SEEK_END*/
1966 sr->l_start += i_size_read(inode);
1967 break;
1968 default:
1969 ret = -EINVAL;
1970 goto out_inode_unlock;
1971 }
1972 sr->l_whence = 0;
1973
1974 llen = sr->l_len > 0 ? sr->l_len - 1 : sr->l_len;
1975
1976 if (sr->l_start < 0
1977 || sr->l_start > max_off
1978 || (sr->l_start + llen) < 0
1979 || (sr->l_start + llen) > max_off) {
1980 ret = -EINVAL;
1981 goto out_inode_unlock;
1982 }
1983 size = sr->l_start + sr->l_len;
1984
1985 if (cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64 ||
1986 cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) {
1987 if (sr->l_len <= 0) {
1988 ret = -EINVAL;
1989 goto out_inode_unlock;
1990 }
1991 }
1992
1993 if (file && should_remove_suid(file->f_path.dentry)) {
1994 ret = __ocfs2_write_remove_suid(inode, di_bh);
1995 if (ret) {
1996 mlog_errno(ret);
1997 goto out_inode_unlock;
1998 }
1999 }
2000
2001 down_write(&OCFS2_I(inode)->ip_alloc_sem);
2002 switch (cmd) {
2003 case OCFS2_IOC_RESVSP:
2004 case OCFS2_IOC_RESVSP64:
2005 /*
2006 * This takes unsigned offsets, but the signed ones we
2007 * pass have been checked against overflow above.
2008 */
2009 ret = ocfs2_allocate_unwritten_extents(inode, sr->l_start,
2010 sr->l_len);
2011 break;
2012 case OCFS2_IOC_UNRESVSP:
2013 case OCFS2_IOC_UNRESVSP64:
2014 ret = ocfs2_remove_inode_range(inode, di_bh, sr->l_start,
2015 sr->l_len);
2016 break;
2017 default:
2018 ret = -EINVAL;
2019 }
Olivier Deprez0e641232021-09-23 10:07:05 +02002020
2021 orig_isize = i_size_read(inode);
2022 /* zeroout eof blocks in the cluster. */
2023 if (!ret && change_size && orig_isize < size) {
2024 ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
2025 size - orig_isize);
2026 if (!ret)
2027 i_size_write(inode, size);
2028 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002029 up_write(&OCFS2_I(inode)->ip_alloc_sem);
2030 if (ret) {
2031 mlog_errno(ret);
2032 goto out_inode_unlock;
2033 }
2034
2035 /*
2036 * We update c/mtime for these changes
2037 */
2038 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
2039 if (IS_ERR(handle)) {
2040 ret = PTR_ERR(handle);
2041 mlog_errno(ret);
2042 goto out_inode_unlock;
2043 }
2044
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002045 inode->i_ctime = inode->i_mtime = current_time(inode);
2046 ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
2047 if (ret < 0)
2048 mlog_errno(ret);
2049
2050 if (file && (file->f_flags & O_SYNC))
2051 handle->h_sync = 1;
2052
2053 ocfs2_commit_trans(osb, handle);
2054
2055out_inode_unlock:
2056 brelse(di_bh);
2057 ocfs2_inode_unlock(inode, 1);
2058out_rw_unlock:
2059 ocfs2_rw_unlock(inode, 1);
2060
2061out:
2062 inode_unlock(inode);
2063 return ret;
2064}
2065
2066int ocfs2_change_file_space(struct file *file, unsigned int cmd,
2067 struct ocfs2_space_resv *sr)
2068{
2069 struct inode *inode = file_inode(file);
2070 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2071 int ret;
2072
2073 if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) &&
2074 !ocfs2_writes_unwritten_extents(osb))
2075 return -ENOTTY;
2076 else if ((cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) &&
2077 !ocfs2_sparse_alloc(osb))
2078 return -ENOTTY;
2079
2080 if (!S_ISREG(inode->i_mode))
2081 return -EINVAL;
2082
2083 if (!(file->f_mode & FMODE_WRITE))
2084 return -EBADF;
2085
2086 ret = mnt_want_write_file(file);
2087 if (ret)
2088 return ret;
2089 ret = __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0);
2090 mnt_drop_write_file(file);
2091 return ret;
2092}
2093
2094static long ocfs2_fallocate(struct file *file, int mode, loff_t offset,
2095 loff_t len)
2096{
2097 struct inode *inode = file_inode(file);
2098 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2099 struct ocfs2_space_resv sr;
2100 int change_size = 1;
2101 int cmd = OCFS2_IOC_RESVSP64;
2102
2103 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2104 return -EOPNOTSUPP;
2105 if (!ocfs2_writes_unwritten_extents(osb))
2106 return -EOPNOTSUPP;
2107
2108 if (mode & FALLOC_FL_KEEP_SIZE)
2109 change_size = 0;
2110
2111 if (mode & FALLOC_FL_PUNCH_HOLE)
2112 cmd = OCFS2_IOC_UNRESVSP64;
2113
2114 sr.l_whence = 0;
2115 sr.l_start = (s64)offset;
2116 sr.l_len = (s64)len;
2117
2118 return __ocfs2_change_file_space(NULL, inode, offset, cmd, &sr,
2119 change_size);
2120}
2121
2122int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos,
2123 size_t count)
2124{
2125 int ret = 0;
2126 unsigned int extent_flags;
2127 u32 cpos, clusters, extent_len, phys_cpos;
2128 struct super_block *sb = inode->i_sb;
2129
2130 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) ||
2131 !ocfs2_is_refcount_inode(inode) ||
2132 OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
2133 return 0;
2134
2135 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
2136 clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
2137
2138 while (clusters) {
2139 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
2140 &extent_flags);
2141 if (ret < 0) {
2142 mlog_errno(ret);
2143 goto out;
2144 }
2145
2146 if (phys_cpos && (extent_flags & OCFS2_EXT_REFCOUNTED)) {
2147 ret = 1;
2148 break;
2149 }
2150
2151 if (extent_len > clusters)
2152 extent_len = clusters;
2153
2154 clusters -= extent_len;
2155 cpos += extent_len;
2156 }
2157out:
2158 return ret;
2159}
2160
2161static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
2162{
2163 int blockmask = inode->i_sb->s_blocksize - 1;
2164 loff_t final_size = pos + count;
2165
2166 if ((pos & blockmask) || (final_size & blockmask))
2167 return 1;
2168 return 0;
2169}
2170
David Brazdil0f672f62019-12-10 10:32:29 +00002171static int ocfs2_inode_lock_for_extent_tree(struct inode *inode,
2172 struct buffer_head **di_bh,
2173 int meta_level,
David Brazdil0f672f62019-12-10 10:32:29 +00002174 int write_sem,
2175 int wait)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002176{
David Brazdil0f672f62019-12-10 10:32:29 +00002177 int ret = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002178
David Brazdil0f672f62019-12-10 10:32:29 +00002179 if (wait)
Olivier Deprez0e641232021-09-23 10:07:05 +02002180 ret = ocfs2_inode_lock(inode, di_bh, meta_level);
David Brazdil0f672f62019-12-10 10:32:29 +00002181 else
Olivier Deprez0e641232021-09-23 10:07:05 +02002182 ret = ocfs2_try_inode_lock(inode, di_bh, meta_level);
David Brazdil0f672f62019-12-10 10:32:29 +00002183 if (ret < 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002184 goto out;
David Brazdil0f672f62019-12-10 10:32:29 +00002185
2186 if (wait) {
2187 if (write_sem)
2188 down_write(&OCFS2_I(inode)->ip_alloc_sem);
2189 else
2190 down_read(&OCFS2_I(inode)->ip_alloc_sem);
2191 } else {
2192 if (write_sem)
2193 ret = down_write_trylock(&OCFS2_I(inode)->ip_alloc_sem);
2194 else
2195 ret = down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem);
2196
2197 if (!ret) {
2198 ret = -EAGAIN;
2199 goto out_unlock;
2200 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002201 }
2202
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002203 return ret;
David Brazdil0f672f62019-12-10 10:32:29 +00002204
2205out_unlock:
2206 brelse(*di_bh);
Olivier Deprez0e641232021-09-23 10:07:05 +02002207 *di_bh = NULL;
David Brazdil0f672f62019-12-10 10:32:29 +00002208 ocfs2_inode_unlock(inode, meta_level);
2209out:
2210 return ret;
2211}
2212
2213static void ocfs2_inode_unlock_for_extent_tree(struct inode *inode,
2214 struct buffer_head **di_bh,
2215 int meta_level,
2216 int write_sem)
2217{
2218 if (write_sem)
2219 up_write(&OCFS2_I(inode)->ip_alloc_sem);
2220 else
2221 up_read(&OCFS2_I(inode)->ip_alloc_sem);
2222
2223 brelse(*di_bh);
2224 *di_bh = NULL;
2225
2226 if (meta_level >= 0)
2227 ocfs2_inode_unlock(inode, meta_level);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002228}
2229
2230static int ocfs2_prepare_inode_for_write(struct file *file,
2231 loff_t pos, size_t count, int wait)
2232{
2233 int ret = 0, meta_level = 0, overwrite_io = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00002234 int write_sem = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002235 struct dentry *dentry = file->f_path.dentry;
2236 struct inode *inode = d_inode(dentry);
2237 struct buffer_head *di_bh = NULL;
David Brazdil0f672f62019-12-10 10:32:29 +00002238 u32 cpos;
2239 u32 clusters;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002240
2241 /*
2242 * We start with a read level meta lock and only jump to an ex
2243 * if we need to make modifications here.
2244 */
2245 for(;;) {
David Brazdil0f672f62019-12-10 10:32:29 +00002246 ret = ocfs2_inode_lock_for_extent_tree(inode,
2247 &di_bh,
2248 meta_level,
David Brazdil0f672f62019-12-10 10:32:29 +00002249 write_sem,
2250 wait);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002251 if (ret < 0) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002252 if (ret != -EAGAIN)
2253 mlog_errno(ret);
2254 goto out;
2255 }
2256
2257 /*
2258 * Check if IO will overwrite allocated blocks in case
2259 * IOCB_NOWAIT flag is set.
2260 */
2261 if (!wait && !overwrite_io) {
2262 overwrite_io = 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002263
2264 ret = ocfs2_overwrite_io(inode, di_bh, pos, count);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002265 if (ret < 0) {
2266 if (ret != -EAGAIN)
2267 mlog_errno(ret);
2268 goto out_unlock;
2269 }
2270 }
2271
2272 /* Clear suid / sgid if necessary. We do this here
2273 * instead of later in the write path because
2274 * remove_suid() calls ->setattr without any hint that
2275 * we may have already done our cluster locking. Since
2276 * ocfs2_setattr() *must* take cluster locks to
2277 * proceed, this will lead us to recursively lock the
2278 * inode. There's also the dinode i_size state which
2279 * can be lost via setattr during extending writes (we
2280 * set inode->i_size at the end of a write. */
2281 if (should_remove_suid(dentry)) {
2282 if (meta_level == 0) {
David Brazdil0f672f62019-12-10 10:32:29 +00002283 ocfs2_inode_unlock_for_extent_tree(inode,
2284 &di_bh,
2285 meta_level,
2286 write_sem);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002287 meta_level = 1;
2288 continue;
2289 }
2290
2291 ret = ocfs2_write_remove_suid(inode);
2292 if (ret < 0) {
2293 mlog_errno(ret);
2294 goto out_unlock;
2295 }
2296 }
2297
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002298 ret = ocfs2_check_range_for_refcount(inode, pos, count);
2299 if (ret == 1) {
David Brazdil0f672f62019-12-10 10:32:29 +00002300 ocfs2_inode_unlock_for_extent_tree(inode,
2301 &di_bh,
2302 meta_level,
2303 write_sem);
Olivier Deprez0e641232021-09-23 10:07:05 +02002304 meta_level = 1;
2305 write_sem = 1;
David Brazdil0f672f62019-12-10 10:32:29 +00002306 ret = ocfs2_inode_lock_for_extent_tree(inode,
2307 &di_bh,
2308 meta_level,
Olivier Deprez0e641232021-09-23 10:07:05 +02002309 write_sem,
David Brazdil0f672f62019-12-10 10:32:29 +00002310 wait);
David Brazdil0f672f62019-12-10 10:32:29 +00002311 if (ret < 0) {
2312 if (ret != -EAGAIN)
2313 mlog_errno(ret);
2314 goto out;
2315 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002316
David Brazdil0f672f62019-12-10 10:32:29 +00002317 cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
2318 clusters =
2319 ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
2320 ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002321 }
2322
2323 if (ret < 0) {
David Brazdil0f672f62019-12-10 10:32:29 +00002324 if (ret != -EAGAIN)
2325 mlog_errno(ret);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002326 goto out_unlock;
2327 }
2328
2329 break;
2330 }
2331
2332out_unlock:
2333 trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno,
2334 pos, count, wait);
2335
David Brazdil0f672f62019-12-10 10:32:29 +00002336 ocfs2_inode_unlock_for_extent_tree(inode,
2337 &di_bh,
2338 meta_level,
2339 write_sem);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002340
2341out:
2342 return ret;
2343}
2344
2345static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
2346 struct iov_iter *from)
2347{
2348 int rw_level;
2349 ssize_t written = 0;
2350 ssize_t ret;
2351 size_t count = iov_iter_count(from);
2352 struct file *file = iocb->ki_filp;
2353 struct inode *inode = file_inode(file);
2354 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2355 int full_coherency = !(osb->s_mount_opt &
2356 OCFS2_MOUNT_COHERENCY_BUFFERED);
2357 void *saved_ki_complete = NULL;
2358 int append_write = ((iocb->ki_pos + count) >=
2359 i_size_read(inode) ? 1 : 0);
2360 int direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
2361 int nowait = iocb->ki_flags & IOCB_NOWAIT ? 1 : 0;
2362
2363 trace_ocfs2_file_write_iter(inode, file, file->f_path.dentry,
2364 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2365 file->f_path.dentry->d_name.len,
2366 file->f_path.dentry->d_name.name,
2367 (unsigned int)from->nr_segs); /* GRRRRR */
2368
2369 if (!direct_io && nowait)
2370 return -EOPNOTSUPP;
2371
2372 if (count == 0)
2373 return 0;
2374
2375 if (nowait) {
2376 if (!inode_trylock(inode))
2377 return -EAGAIN;
2378 } else
2379 inode_lock(inode);
2380
2381 /*
2382 * Concurrent O_DIRECT writes are allowed with
2383 * mount_option "coherency=buffered".
2384 * For append write, we must take rw EX.
2385 */
2386 rw_level = (!direct_io || full_coherency || append_write);
2387
2388 if (nowait)
2389 ret = ocfs2_try_rw_lock(inode, rw_level);
2390 else
2391 ret = ocfs2_rw_lock(inode, rw_level);
2392 if (ret < 0) {
2393 if (ret != -EAGAIN)
2394 mlog_errno(ret);
2395 goto out_mutex;
2396 }
2397
2398 /*
2399 * O_DIRECT writes with "coherency=full" need to take EX cluster
2400 * inode_lock to guarantee coherency.
2401 */
2402 if (direct_io && full_coherency) {
2403 /*
2404 * We need to take and drop the inode lock to force
2405 * other nodes to drop their caches. Buffered I/O
2406 * already does this in write_begin().
2407 */
2408 if (nowait)
2409 ret = ocfs2_try_inode_lock(inode, NULL, 1);
2410 else
2411 ret = ocfs2_inode_lock(inode, NULL, 1);
2412 if (ret < 0) {
2413 if (ret != -EAGAIN)
2414 mlog_errno(ret);
2415 goto out;
2416 }
2417
2418 ocfs2_inode_unlock(inode, 1);
2419 }
2420
2421 ret = generic_write_checks(iocb, from);
2422 if (ret <= 0) {
2423 if (ret)
2424 mlog_errno(ret);
2425 goto out;
2426 }
2427 count = ret;
2428
2429 ret = ocfs2_prepare_inode_for_write(file, iocb->ki_pos, count, !nowait);
2430 if (ret < 0) {
2431 if (ret != -EAGAIN)
2432 mlog_errno(ret);
2433 goto out;
2434 }
2435
2436 if (direct_io && !is_sync_kiocb(iocb) &&
2437 ocfs2_is_io_unaligned(inode, count, iocb->ki_pos)) {
2438 /*
2439 * Make it a sync io if it's an unaligned aio.
2440 */
2441 saved_ki_complete = xchg(&iocb->ki_complete, NULL);
2442 }
2443
2444 /* communicate with ocfs2_dio_end_io */
2445 ocfs2_iocb_set_rw_locked(iocb, rw_level);
2446
2447 written = __generic_file_write_iter(iocb, from);
2448 /* buffered aio wouldn't have proper lock coverage today */
David Brazdil0f672f62019-12-10 10:32:29 +00002449 BUG_ON(written == -EIOCBQUEUED && !direct_io);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002450
2451 /*
2452 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
2453 * function pointer which is called when o_direct io completes so that
2454 * it can unlock our rw lock.
2455 * Unfortunately there are error cases which call end_io and others
2456 * that don't. so we don't have to unlock the rw_lock if either an
2457 * async dio is going to do it in the future or an end_io after an
2458 * error has already done it.
2459 */
2460 if ((written == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
2461 rw_level = -1;
2462 }
2463
2464 if (unlikely(written <= 0))
2465 goto out;
2466
2467 if (((file->f_flags & O_DSYNC) && !direct_io) ||
2468 IS_SYNC(inode)) {
2469 ret = filemap_fdatawrite_range(file->f_mapping,
2470 iocb->ki_pos - written,
2471 iocb->ki_pos - 1);
2472 if (ret < 0)
2473 written = ret;
2474
2475 if (!ret) {
2476 ret = jbd2_journal_force_commit(osb->journal->j_journal);
2477 if (ret < 0)
2478 written = ret;
2479 }
2480
2481 if (!ret)
2482 ret = filemap_fdatawait_range(file->f_mapping,
2483 iocb->ki_pos - written,
2484 iocb->ki_pos - 1);
2485 }
2486
2487out:
2488 if (saved_ki_complete)
2489 xchg(&iocb->ki_complete, saved_ki_complete);
2490
2491 if (rw_level != -1)
2492 ocfs2_rw_unlock(inode, rw_level);
2493
2494out_mutex:
2495 inode_unlock(inode);
2496
2497 if (written)
2498 ret = written;
2499 return ret;
2500}
2501
2502static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
2503 struct iov_iter *to)
2504{
2505 int ret = 0, rw_level = -1, lock_level = 0;
2506 struct file *filp = iocb->ki_filp;
2507 struct inode *inode = file_inode(filp);
2508 int direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
2509 int nowait = iocb->ki_flags & IOCB_NOWAIT ? 1 : 0;
2510
2511 trace_ocfs2_file_read_iter(inode, filp, filp->f_path.dentry,
2512 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2513 filp->f_path.dentry->d_name.len,
2514 filp->f_path.dentry->d_name.name,
2515 to->nr_segs); /* GRRRRR */
2516
2517
2518 if (!inode) {
2519 ret = -EINVAL;
2520 mlog_errno(ret);
2521 goto bail;
2522 }
2523
2524 if (!direct_io && nowait)
2525 return -EOPNOTSUPP;
2526
2527 /*
2528 * buffered reads protect themselves in ->readpage(). O_DIRECT reads
2529 * need locks to protect pending reads from racing with truncate.
2530 */
2531 if (direct_io) {
2532 if (nowait)
2533 ret = ocfs2_try_rw_lock(inode, 0);
2534 else
2535 ret = ocfs2_rw_lock(inode, 0);
2536
2537 if (ret < 0) {
2538 if (ret != -EAGAIN)
2539 mlog_errno(ret);
2540 goto bail;
2541 }
2542 rw_level = 0;
2543 /* communicate with ocfs2_dio_end_io */
2544 ocfs2_iocb_set_rw_locked(iocb, rw_level);
2545 }
2546
2547 /*
2548 * We're fine letting folks race truncates and extending
2549 * writes with read across the cluster, just like they can
2550 * locally. Hence no rw_lock during read.
2551 *
2552 * Take and drop the meta data lock to update inode fields
2553 * like i_size. This allows the checks down below
2554 * generic_file_read_iter() a chance of actually working.
2555 */
2556 ret = ocfs2_inode_lock_atime(inode, filp->f_path.mnt, &lock_level,
2557 !nowait);
2558 if (ret < 0) {
2559 if (ret != -EAGAIN)
2560 mlog_errno(ret);
2561 goto bail;
2562 }
2563 ocfs2_inode_unlock(inode, lock_level);
2564
2565 ret = generic_file_read_iter(iocb, to);
2566 trace_generic_file_read_iter_ret(ret);
2567
2568 /* buffered aio wouldn't have proper lock coverage today */
David Brazdil0f672f62019-12-10 10:32:29 +00002569 BUG_ON(ret == -EIOCBQUEUED && !direct_io);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002570
2571 /* see ocfs2_file_write_iter */
2572 if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
2573 rw_level = -1;
2574 }
2575
2576bail:
2577 if (rw_level != -1)
2578 ocfs2_rw_unlock(inode, rw_level);
2579
2580 return ret;
2581}
2582
2583/* Refer generic_file_llseek_unlocked() */
2584static loff_t ocfs2_file_llseek(struct file *file, loff_t offset, int whence)
2585{
2586 struct inode *inode = file->f_mapping->host;
2587 int ret = 0;
2588
2589 inode_lock(inode);
2590
2591 switch (whence) {
2592 case SEEK_SET:
2593 break;
2594 case SEEK_END:
2595 /* SEEK_END requires the OCFS2 inode lock for the file
2596 * because it references the file's size.
2597 */
2598 ret = ocfs2_inode_lock(inode, NULL, 0);
2599 if (ret < 0) {
2600 mlog_errno(ret);
2601 goto out;
2602 }
2603 offset += i_size_read(inode);
2604 ocfs2_inode_unlock(inode, 0);
2605 break;
2606 case SEEK_CUR:
2607 if (offset == 0) {
2608 offset = file->f_pos;
2609 goto out;
2610 }
2611 offset += file->f_pos;
2612 break;
2613 case SEEK_DATA:
2614 case SEEK_HOLE:
2615 ret = ocfs2_seek_data_hole_offset(file, &offset, whence);
2616 if (ret)
2617 goto out;
2618 break;
2619 default:
2620 ret = -EINVAL;
2621 goto out;
2622 }
2623
2624 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
2625
2626out:
2627 inode_unlock(inode);
2628 if (ret)
2629 return ret;
2630 return offset;
2631}
2632
David Brazdil0f672f62019-12-10 10:32:29 +00002633static loff_t ocfs2_remap_file_range(struct file *file_in, loff_t pos_in,
2634 struct file *file_out, loff_t pos_out,
2635 loff_t len, unsigned int remap_flags)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002636{
David Brazdil0f672f62019-12-10 10:32:29 +00002637 struct inode *inode_in = file_inode(file_in);
2638 struct inode *inode_out = file_inode(file_out);
2639 struct ocfs2_super *osb = OCFS2_SB(inode_in->i_sb);
2640 struct buffer_head *in_bh = NULL, *out_bh = NULL;
2641 bool same_inode = (inode_in == inode_out);
2642 loff_t remapped = 0;
2643 ssize_t ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002644
David Brazdil0f672f62019-12-10 10:32:29 +00002645 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
2646 return -EINVAL;
2647 if (!ocfs2_refcount_tree(osb))
2648 return -EOPNOTSUPP;
2649 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
2650 return -EROFS;
2651
2652 /* Lock both files against IO */
2653 ret = ocfs2_reflink_inodes_lock(inode_in, &in_bh, inode_out, &out_bh);
2654 if (ret)
2655 return ret;
2656
2657 /* Check file eligibility and prepare for block sharing. */
2658 ret = -EINVAL;
2659 if ((OCFS2_I(inode_in)->ip_flags & OCFS2_INODE_SYSTEM_FILE) ||
2660 (OCFS2_I(inode_out)->ip_flags & OCFS2_INODE_SYSTEM_FILE))
2661 goto out_unlock;
2662
2663 ret = generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out,
2664 &len, remap_flags);
2665 if (ret < 0 || len == 0)
2666 goto out_unlock;
2667
2668 /* Lock out changes to the allocation maps and remap. */
2669 down_write(&OCFS2_I(inode_in)->ip_alloc_sem);
2670 if (!same_inode)
2671 down_write_nested(&OCFS2_I(inode_out)->ip_alloc_sem,
2672 SINGLE_DEPTH_NESTING);
2673
2674 /* Zap any page cache for the destination file's range. */
2675 truncate_inode_pages_range(&inode_out->i_data,
2676 round_down(pos_out, PAGE_SIZE),
2677 round_up(pos_out + len, PAGE_SIZE) - 1);
2678
2679 remapped = ocfs2_reflink_remap_blocks(inode_in, in_bh, pos_in,
2680 inode_out, out_bh, pos_out, len);
2681 up_write(&OCFS2_I(inode_in)->ip_alloc_sem);
2682 if (!same_inode)
2683 up_write(&OCFS2_I(inode_out)->ip_alloc_sem);
2684 if (remapped < 0) {
2685 ret = remapped;
2686 mlog_errno(ret);
2687 goto out_unlock;
2688 }
2689
2690 /*
2691 * Empty the extent map so that we may get the right extent
2692 * record from the disk.
2693 */
2694 ocfs2_extent_map_trunc(inode_in, 0);
2695 ocfs2_extent_map_trunc(inode_out, 0);
2696
2697 ret = ocfs2_reflink_update_dest(inode_out, out_bh, pos_out + len);
2698 if (ret) {
2699 mlog_errno(ret);
2700 goto out_unlock;
2701 }
2702
2703out_unlock:
2704 ocfs2_reflink_inodes_unlock(inode_in, in_bh, inode_out, out_bh);
2705 return remapped > 0 ? remapped : ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002706}
2707
2708const struct inode_operations ocfs2_file_iops = {
2709 .setattr = ocfs2_setattr,
2710 .getattr = ocfs2_getattr,
2711 .permission = ocfs2_permission,
2712 .listxattr = ocfs2_listxattr,
2713 .fiemap = ocfs2_fiemap,
2714 .get_acl = ocfs2_iop_get_acl,
2715 .set_acl = ocfs2_iop_set_acl,
2716};
2717
2718const struct inode_operations ocfs2_special_file_iops = {
2719 .setattr = ocfs2_setattr,
2720 .getattr = ocfs2_getattr,
2721 .permission = ocfs2_permission,
2722 .get_acl = ocfs2_iop_get_acl,
2723 .set_acl = ocfs2_iop_set_acl,
2724};
2725
2726/*
2727 * Other than ->lock, keep ocfs2_fops and ocfs2_dops in sync with
2728 * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
2729 */
2730const struct file_operations ocfs2_fops = {
2731 .llseek = ocfs2_file_llseek,
2732 .mmap = ocfs2_mmap,
2733 .fsync = ocfs2_sync_file,
2734 .release = ocfs2_file_release,
2735 .open = ocfs2_file_open,
2736 .read_iter = ocfs2_file_read_iter,
2737 .write_iter = ocfs2_file_write_iter,
2738 .unlocked_ioctl = ocfs2_ioctl,
2739#ifdef CONFIG_COMPAT
2740 .compat_ioctl = ocfs2_compat_ioctl,
2741#endif
2742 .lock = ocfs2_lock,
2743 .flock = ocfs2_flock,
2744 .splice_read = generic_file_splice_read,
2745 .splice_write = iter_file_splice_write,
2746 .fallocate = ocfs2_fallocate,
David Brazdil0f672f62019-12-10 10:32:29 +00002747 .remap_file_range = ocfs2_remap_file_range,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002748};
2749
2750const struct file_operations ocfs2_dops = {
2751 .llseek = generic_file_llseek,
2752 .read = generic_read_dir,
2753 .iterate = ocfs2_readdir,
2754 .fsync = ocfs2_sync_file,
2755 .release = ocfs2_dir_release,
2756 .open = ocfs2_dir_open,
2757 .unlocked_ioctl = ocfs2_ioctl,
2758#ifdef CONFIG_COMPAT
2759 .compat_ioctl = ocfs2_compat_ioctl,
2760#endif
2761 .lock = ocfs2_lock,
2762 .flock = ocfs2_flock,
2763};
2764
2765/*
2766 * POSIX-lockless variants of our file_operations.
2767 *
2768 * These will be used if the underlying cluster stack does not support
2769 * posix file locking, if the user passes the "localflocks" mount
2770 * option, or if we have a local-only fs.
2771 *
2772 * ocfs2_flock is in here because all stacks handle UNIX file locks,
2773 * so we still want it in the case of no stack support for
2774 * plocks. Internally, it will do the right thing when asked to ignore
2775 * the cluster.
2776 */
2777const struct file_operations ocfs2_fops_no_plocks = {
2778 .llseek = ocfs2_file_llseek,
2779 .mmap = ocfs2_mmap,
2780 .fsync = ocfs2_sync_file,
2781 .release = ocfs2_file_release,
2782 .open = ocfs2_file_open,
2783 .read_iter = ocfs2_file_read_iter,
2784 .write_iter = ocfs2_file_write_iter,
2785 .unlocked_ioctl = ocfs2_ioctl,
2786#ifdef CONFIG_COMPAT
2787 .compat_ioctl = ocfs2_compat_ioctl,
2788#endif
2789 .flock = ocfs2_flock,
2790 .splice_read = generic_file_splice_read,
2791 .splice_write = iter_file_splice_write,
2792 .fallocate = ocfs2_fallocate,
David Brazdil0f672f62019-12-10 10:32:29 +00002793 .remap_file_range = ocfs2_remap_file_range,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002794};
2795
2796const struct file_operations ocfs2_dops_no_plocks = {
2797 .llseek = generic_file_llseek,
2798 .read = generic_read_dir,
2799 .iterate = ocfs2_readdir,
2800 .fsync = ocfs2_sync_file,
2801 .release = ocfs2_dir_release,
2802 .open = ocfs2_dir_open,
2803 .unlocked_ioctl = ocfs2_ioctl,
2804#ifdef CONFIG_COMPAT
2805 .compat_ioctl = ocfs2_compat_ioctl,
2806#endif
2807 .flock = ocfs2_flock,
2808};