blob: 42d246a9422835993c57017efda293cd02e24f0d [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_FS_H
3#define _LINUX_FS_H
4
5#include <linux/linkage.h>
6#include <linux/wait_bit.h>
7#include <linux/kdev_t.h>
8#include <linux/dcache.h>
9#include <linux/path.h>
10#include <linux/stat.h>
11#include <linux/cache.h>
12#include <linux/list.h>
13#include <linux/list_lru.h>
14#include <linux/llist.h>
15#include <linux/radix-tree.h>
16#include <linux/xarray.h>
17#include <linux/rbtree.h>
18#include <linux/init.h>
19#include <linux/pid.h>
20#include <linux/bug.h>
21#include <linux/mutex.h>
22#include <linux/rwsem.h>
23#include <linux/mm_types.h>
24#include <linux/capability.h>
25#include <linux/semaphore.h>
26#include <linux/fcntl.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000027#include <linux/rculist_bl.h>
28#include <linux/atomic.h>
29#include <linux/shrinker.h>
30#include <linux/migrate_mode.h>
31#include <linux/uidgid.h>
32#include <linux/lockdep.h>
33#include <linux/percpu-rwsem.h>
34#include <linux/workqueue.h>
35#include <linux/delayed_call.h>
36#include <linux/uuid.h>
37#include <linux/errseq.h>
38#include <linux/ioprio.h>
David Brazdil0f672f62019-12-10 10:32:29 +000039#include <linux/fs_types.h>
40#include <linux/build_bug.h>
41#include <linux/stddef.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000042
43#include <asm/byteorder.h>
44#include <uapi/linux/fs.h>
45
46struct backing_dev_info;
47struct bdi_writeback;
48struct bio;
49struct export_operations;
Olivier Deprez157378f2022-04-04 15:47:50 +020050struct fiemap_extent_info;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000051struct hd_geometry;
52struct iovec;
53struct kiocb;
54struct kobject;
55struct pipe_inode_info;
56struct poll_table_struct;
57struct kstatfs;
58struct vm_area_struct;
59struct vfsmount;
60struct cred;
61struct swap_info_struct;
62struct seq_file;
63struct workqueue_struct;
64struct iov_iter;
65struct fscrypt_info;
66struct fscrypt_operations;
David Brazdil0f672f62019-12-10 10:32:29 +000067struct fsverity_info;
68struct fsverity_operations;
69struct fs_context;
Olivier Deprez157378f2022-04-04 15:47:50 +020070struct fs_parameter_spec;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000071
72extern void __init inode_init(void);
73extern void __init inode_init_early(void);
74extern void __init files_init(void);
75extern void __init files_maxfiles_init(void);
76
77extern struct files_stat_struct files_stat;
78extern unsigned long get_max_files(void);
79extern unsigned int sysctl_nr_open;
80extern struct inodes_stat_t inodes_stat;
81extern int leases_enable, lease_break_time;
82extern int sysctl_protected_symlinks;
83extern int sysctl_protected_hardlinks;
84extern int sysctl_protected_fifos;
85extern int sysctl_protected_regular;
86
87typedef __kernel_rwf_t rwf_t;
88
89struct buffer_head;
90typedef int (get_block_t)(struct inode *inode, sector_t iblock,
91 struct buffer_head *bh_result, int create);
92typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
93 ssize_t bytes, void *private);
94
95#define MAY_EXEC 0x00000001
96#define MAY_WRITE 0x00000002
97#define MAY_READ 0x00000004
98#define MAY_APPEND 0x00000008
99#define MAY_ACCESS 0x00000010
100#define MAY_OPEN 0x00000020
101#define MAY_CHDIR 0x00000040
102/* called from RCU mode, don't block */
103#define MAY_NOT_BLOCK 0x00000080
104
105/*
106 * flags in file.f_mode. Note that FMODE_READ and FMODE_WRITE must correspond
107 * to O_WRONLY and O_RDWR via the strange trick in do_dentry_open()
108 */
109
110/* file is open for reading */
111#define FMODE_READ ((__force fmode_t)0x1)
112/* file is open for writing */
113#define FMODE_WRITE ((__force fmode_t)0x2)
114/* file is seekable */
115#define FMODE_LSEEK ((__force fmode_t)0x4)
116/* file can be accessed using pread */
117#define FMODE_PREAD ((__force fmode_t)0x8)
118/* file can be accessed using pwrite */
119#define FMODE_PWRITE ((__force fmode_t)0x10)
120/* File is opened for execution with sys_execve / sys_uselib */
121#define FMODE_EXEC ((__force fmode_t)0x20)
122/* File is opened with O_NDELAY (only set for block devices) */
123#define FMODE_NDELAY ((__force fmode_t)0x40)
124/* File is opened with O_EXCL (only set for block devices) */
125#define FMODE_EXCL ((__force fmode_t)0x80)
126/* File is opened using open(.., 3, ..) and is writeable only for ioctls
127 (specialy hack for floppy.c) */
128#define FMODE_WRITE_IOCTL ((__force fmode_t)0x100)
129/* 32bit hashes as llseek() offset (for directories) */
130#define FMODE_32BITHASH ((__force fmode_t)0x200)
131/* 64bit hashes as llseek() offset (for directories) */
132#define FMODE_64BITHASH ((__force fmode_t)0x400)
133
134/*
135 * Don't update ctime and mtime.
136 *
137 * Currently a special hack for the XFS open_by_handle ioctl, but we'll
138 * hopefully graduate it to a proper O_CMTIME flag supported by open(2) soon.
139 */
140#define FMODE_NOCMTIME ((__force fmode_t)0x800)
141
142/* Expect random access pattern */
143#define FMODE_RANDOM ((__force fmode_t)0x1000)
144
145/* File is huge (eg. /dev/kmem): treat loff_t as unsigned */
146#define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000)
147
148/* File is opened with O_PATH; almost nothing can be done with it */
149#define FMODE_PATH ((__force fmode_t)0x4000)
150
151/* File needs atomic accesses to f_pos */
152#define FMODE_ATOMIC_POS ((__force fmode_t)0x8000)
153/* Write access to underlying fs */
154#define FMODE_WRITER ((__force fmode_t)0x10000)
155/* Has read method(s) */
156#define FMODE_CAN_READ ((__force fmode_t)0x20000)
157/* Has write method(s) */
158#define FMODE_CAN_WRITE ((__force fmode_t)0x40000)
159
160#define FMODE_OPENED ((__force fmode_t)0x80000)
161#define FMODE_CREATED ((__force fmode_t)0x100000)
162
David Brazdil0f672f62019-12-10 10:32:29 +0000163/* File is stream-like */
164#define FMODE_STREAM ((__force fmode_t)0x200000)
165
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000166/* File was opened by fanotify and shouldn't generate fanotify events */
167#define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
168
169/* File is capable of returning -EAGAIN if I/O will block */
David Brazdil0f672f62019-12-10 10:32:29 +0000170#define FMODE_NOWAIT ((__force fmode_t)0x8000000)
171
172/* File represents mount that needs unmounting */
173#define FMODE_NEED_UNMOUNT ((__force fmode_t)0x10000000)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000174
175/* File does not contribute to nr_files count */
David Brazdil0f672f62019-12-10 10:32:29 +0000176#define FMODE_NOACCOUNT ((__force fmode_t)0x20000000)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000177
Olivier Deprez157378f2022-04-04 15:47:50 +0200178/* File supports async buffered reads */
179#define FMODE_BUF_RASYNC ((__force fmode_t)0x40000000)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000180
181/*
182 * Attribute flags. These should be or-ed together to figure out what
183 * has been changed!
184 */
185#define ATTR_MODE (1 << 0)
186#define ATTR_UID (1 << 1)
187#define ATTR_GID (1 << 2)
188#define ATTR_SIZE (1 << 3)
189#define ATTR_ATIME (1 << 4)
190#define ATTR_MTIME (1 << 5)
191#define ATTR_CTIME (1 << 6)
192#define ATTR_ATIME_SET (1 << 7)
193#define ATTR_MTIME_SET (1 << 8)
194#define ATTR_FORCE (1 << 9) /* Not a change, but a change it */
195#define ATTR_KILL_SUID (1 << 11)
196#define ATTR_KILL_SGID (1 << 12)
197#define ATTR_FILE (1 << 13)
198#define ATTR_KILL_PRIV (1 << 14)
199#define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */
200#define ATTR_TIMES_SET (1 << 16)
201#define ATTR_TOUCH (1 << 17)
202
203/*
204 * Whiteout is represented by a char device. The following constants define the
205 * mode and device number to use.
206 */
207#define WHITEOUT_MODE 0
208#define WHITEOUT_DEV 0
209
210/*
211 * This is the Inode Attributes structure, used for notify_change(). It
212 * uses the above definitions as flags, to know which values have changed.
213 * Also, in this manner, a Filesystem can look at only the values it cares
214 * about. Basically, these are the attributes that the VFS layer can
215 * request to change from the FS layer.
216 *
217 * Derek Atkins <warlord@MIT.EDU> 94-10-20
218 */
219struct iattr {
220 unsigned int ia_valid;
221 umode_t ia_mode;
222 kuid_t ia_uid;
223 kgid_t ia_gid;
224 loff_t ia_size;
225 struct timespec64 ia_atime;
226 struct timespec64 ia_mtime;
227 struct timespec64 ia_ctime;
228
229 /*
230 * Not an attribute, but an auxiliary info for filesystems wanting to
231 * implement an ftruncate() like method. NOTE: filesystem should
232 * check for (ia_valid & ATTR_FILE), and not for (ia_file != NULL).
233 */
234 struct file *ia_file;
235};
236
237/*
238 * Includes for diskquotas.
239 */
240#include <linux/quota.h>
241
242/*
243 * Maximum number of layers of fs stack. Needs to be limited to
244 * prevent kernel stack overflow
245 */
246#define FILESYSTEM_MAX_STACK_DEPTH 2
247
248/**
249 * enum positive_aop_returns - aop return codes with specific semantics
250 *
251 * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has
252 * completed, that the page is still locked, and
253 * should be considered active. The VM uses this hint
254 * to return the page to the active list -- it won't
255 * be a candidate for writeback again in the near
256 * future. Other callers must be careful to unlock
257 * the page if they get this return. Returned by
258 * writepage();
259 *
260 * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has
261 * unlocked it and the page might have been truncated.
262 * The caller should back up to acquiring a new page and
263 * trying again. The aop will be taking reasonable
264 * precautions not to livelock. If the caller held a page
265 * reference, it should drop it before retrying. Returned
266 * by readpage().
267 *
268 * address_space_operation functions return these large constants to indicate
269 * special semantics to the caller. These are much larger than the bytes in a
270 * page to allow for functions that return the number of bytes operated on in a
271 * given page.
272 */
273
274enum positive_aop_returns {
275 AOP_WRITEPAGE_ACTIVATE = 0x80000,
276 AOP_TRUNCATED_PAGE = 0x80001,
277};
278
279#define AOP_FLAG_CONT_EXPAND 0x0001 /* called from cont_expand */
280#define AOP_FLAG_NOFS 0x0002 /* used by filesystem to direct
281 * helper code (eg buffer layer)
282 * to clear GFP_FS from alloc */
283
284/*
285 * oh the beauties of C type declarations.
286 */
287struct page;
288struct address_space;
289struct writeback_control;
Olivier Deprez157378f2022-04-04 15:47:50 +0200290struct readahead_control;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000291
292/*
293 * Write life time hint values.
294 * Stored in struct inode as u8.
295 */
296enum rw_hint {
297 WRITE_LIFE_NOT_SET = 0,
298 WRITE_LIFE_NONE = RWH_WRITE_LIFE_NONE,
299 WRITE_LIFE_SHORT = RWH_WRITE_LIFE_SHORT,
300 WRITE_LIFE_MEDIUM = RWH_WRITE_LIFE_MEDIUM,
301 WRITE_LIFE_LONG = RWH_WRITE_LIFE_LONG,
302 WRITE_LIFE_EXTREME = RWH_WRITE_LIFE_EXTREME,
303};
304
Olivier Deprez157378f2022-04-04 15:47:50 +0200305/* Match RWF_* bits to IOCB bits */
306#define IOCB_HIPRI (__force int) RWF_HIPRI
307#define IOCB_DSYNC (__force int) RWF_DSYNC
308#define IOCB_SYNC (__force int) RWF_SYNC
309#define IOCB_NOWAIT (__force int) RWF_NOWAIT
310#define IOCB_APPEND (__force int) RWF_APPEND
311
312/* non-RWF related bits - start at 16 */
313#define IOCB_EVENTFD (1 << 16)
314#define IOCB_DIRECT (1 << 17)
315#define IOCB_WRITE (1 << 18)
316/* iocb->ki_waitq is valid */
317#define IOCB_WAITQ (1 << 19)
318#define IOCB_NOIO (1 << 20)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000319
320struct kiocb {
321 struct file *ki_filp;
David Brazdil0f672f62019-12-10 10:32:29 +0000322
323 /* The 'ki_filp' pointer is shared in a union for aio */
324 randomized_struct_fields_start
325
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000326 loff_t ki_pos;
327 void (*ki_complete)(struct kiocb *iocb, long ret, long ret2);
328 void *private;
329 int ki_flags;
330 u16 ki_hint;
331 u16 ki_ioprio; /* See linux/ioprio.h */
Olivier Deprez157378f2022-04-04 15:47:50 +0200332 union {
333 unsigned int ki_cookie; /* for ->iopoll */
334 struct wait_page_queue *ki_waitq; /* for async buffered IO */
335 };
David Brazdil0f672f62019-12-10 10:32:29 +0000336
337 randomized_struct_fields_end
338};
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000339
340static inline bool is_sync_kiocb(struct kiocb *kiocb)
341{
342 return kiocb->ki_complete == NULL;
343}
344
345/*
346 * "descriptor" for what we're up to with a read.
347 * This allows us to use the same read code yet
348 * have multiple different users of the data that
349 * we read from a file.
350 *
351 * The simplest case just copies the data to user
352 * mode.
353 */
354typedef struct {
355 size_t written;
356 size_t count;
357 union {
358 char __user *buf;
359 void *data;
360 } arg;
361 int error;
362} read_descriptor_t;
363
364typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
365 unsigned long, unsigned long);
366
367struct address_space_operations {
368 int (*writepage)(struct page *page, struct writeback_control *wbc);
369 int (*readpage)(struct file *, struct page *);
370
371 /* Write back some dirty pages from this mapping. */
372 int (*writepages)(struct address_space *, struct writeback_control *);
373
374 /* Set a page dirty. Return true if this dirtied it */
375 int (*set_page_dirty)(struct page *page);
376
377 /*
378 * Reads in the requested pages. Unlike ->readpage(), this is
379 * PURELY used for read-ahead!.
380 */
381 int (*readpages)(struct file *filp, struct address_space *mapping,
382 struct list_head *pages, unsigned nr_pages);
Olivier Deprez157378f2022-04-04 15:47:50 +0200383 void (*readahead)(struct readahead_control *);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000384
385 int (*write_begin)(struct file *, struct address_space *mapping,
386 loff_t pos, unsigned len, unsigned flags,
387 struct page **pagep, void **fsdata);
388 int (*write_end)(struct file *, struct address_space *mapping,
389 loff_t pos, unsigned len, unsigned copied,
390 struct page *page, void *fsdata);
391
392 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
393 sector_t (*bmap)(struct address_space *, sector_t);
394 void (*invalidatepage) (struct page *, unsigned int, unsigned int);
395 int (*releasepage) (struct page *, gfp_t);
396 void (*freepage)(struct page *);
397 ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);
398 /*
399 * migrate the contents of a page to the specified target. If
400 * migrate_mode is MIGRATE_ASYNC, it must not block.
401 */
402 int (*migratepage) (struct address_space *,
403 struct page *, struct page *, enum migrate_mode);
404 bool (*isolate_page)(struct page *, isolate_mode_t);
405 void (*putback_page)(struct page *);
406 int (*launder_page) (struct page *);
407 int (*is_partially_uptodate) (struct page *, unsigned long,
408 unsigned long);
409 void (*is_dirty_writeback) (struct page *, bool *, bool *);
410 int (*error_remove_page)(struct address_space *, struct page *);
411
412 /* swapfile support */
413 int (*swap_activate)(struct swap_info_struct *sis, struct file *file,
414 sector_t *span);
415 void (*swap_deactivate)(struct file *file);
416};
417
418extern const struct address_space_operations empty_aops;
419
420/*
421 * pagecache_write_begin/pagecache_write_end must be used by general code
422 * to write into the pagecache.
423 */
424int pagecache_write_begin(struct file *, struct address_space *mapping,
425 loff_t pos, unsigned len, unsigned flags,
426 struct page **pagep, void **fsdata);
427
428int pagecache_write_end(struct file *, struct address_space *mapping,
429 loff_t pos, unsigned len, unsigned copied,
430 struct page *page, void *fsdata);
431
David Brazdil0f672f62019-12-10 10:32:29 +0000432/**
433 * struct address_space - Contents of a cacheable, mappable object.
434 * @host: Owner, either the inode or the block_device.
435 * @i_pages: Cached pages.
436 * @gfp_mask: Memory allocation flags to use for allocating pages.
437 * @i_mmap_writable: Number of VM_SHARED mappings.
438 * @nr_thps: Number of THPs in the pagecache (non-shmem only).
439 * @i_mmap: Tree of private and shared mappings.
440 * @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable.
441 * @nrpages: Number of page entries, protected by the i_pages lock.
442 * @nrexceptional: Shadow or DAX entries, protected by the i_pages lock.
443 * @writeback_index: Writeback starts here.
444 * @a_ops: Methods.
445 * @flags: Error bits and flags (AS_*).
446 * @wb_err: The most recent error which has occurred.
447 * @private_lock: For use by the owner of the address_space.
448 * @private_list: For use by the owner of the address_space.
449 * @private_data: For use by the owner of the address_space.
450 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000451struct address_space {
David Brazdil0f672f62019-12-10 10:32:29 +0000452 struct inode *host;
453 struct xarray i_pages;
454 gfp_t gfp_mask;
455 atomic_t i_mmap_writable;
456#ifdef CONFIG_READ_ONLY_THP_FOR_FS
457 /* number of thp, only for non-shmem files */
458 atomic_t nr_thps;
459#endif
460 struct rb_root_cached i_mmap;
461 struct rw_semaphore i_mmap_rwsem;
462 unsigned long nrpages;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000463 unsigned long nrexceptional;
David Brazdil0f672f62019-12-10 10:32:29 +0000464 pgoff_t writeback_index;
465 const struct address_space_operations *a_ops;
466 unsigned long flags;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000467 errseq_t wb_err;
David Brazdil0f672f62019-12-10 10:32:29 +0000468 spinlock_t private_lock;
469 struct list_head private_list;
470 void *private_data;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000471} __attribute__((aligned(sizeof(long)))) __randomize_layout;
472 /*
473 * On most architectures that alignment is already the case; but
474 * must be enforced here for CRIS, to let the least significant bit
475 * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON.
476 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000477
David Brazdil0f672f62019-12-10 10:32:29 +0000478/* XArray tags, for tagging dirty and writeback pages in the pagecache. */
479#define PAGECACHE_TAG_DIRTY XA_MARK_0
480#define PAGECACHE_TAG_WRITEBACK XA_MARK_1
481#define PAGECACHE_TAG_TOWRITE XA_MARK_2
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000482
David Brazdil0f672f62019-12-10 10:32:29 +0000483/*
484 * Returns true if any of the pages in the mapping are marked with the tag.
485 */
486static inline bool mapping_tagged(struct address_space *mapping, xa_mark_t tag)
487{
488 return xa_marked(&mapping->i_pages, tag);
489}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000490
491static inline void i_mmap_lock_write(struct address_space *mapping)
492{
493 down_write(&mapping->i_mmap_rwsem);
494}
495
Olivier Deprez157378f2022-04-04 15:47:50 +0200496static inline int i_mmap_trylock_write(struct address_space *mapping)
497{
498 return down_write_trylock(&mapping->i_mmap_rwsem);
499}
500
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000501static inline void i_mmap_unlock_write(struct address_space *mapping)
502{
503 up_write(&mapping->i_mmap_rwsem);
504}
505
506static inline void i_mmap_lock_read(struct address_space *mapping)
507{
508 down_read(&mapping->i_mmap_rwsem);
509}
510
511static inline void i_mmap_unlock_read(struct address_space *mapping)
512{
513 up_read(&mapping->i_mmap_rwsem);
514}
515
Olivier Deprez157378f2022-04-04 15:47:50 +0200516static inline void i_mmap_assert_locked(struct address_space *mapping)
517{
518 lockdep_assert_held(&mapping->i_mmap_rwsem);
519}
520
521static inline void i_mmap_assert_write_locked(struct address_space *mapping)
522{
523 lockdep_assert_held_write(&mapping->i_mmap_rwsem);
524}
525
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000526/*
527 * Might pages of this file be mapped into userspace?
528 */
529static inline int mapping_mapped(struct address_space *mapping)
530{
531 return !RB_EMPTY_ROOT(&mapping->i_mmap.rb_root);
532}
533
534/*
535 * Might pages of this file have been modified in userspace?
Olivier Deprez157378f2022-04-04 15:47:50 +0200536 * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000537 * marks vma as VM_SHARED if it is shared, and the file was opened for
538 * writing i.e. vma may be mprotected writable even if now readonly.
539 *
540 * If i_mmap_writable is negative, no new writable mappings are allowed. You
541 * can only deny writable mappings, if none exists right now.
542 */
543static inline int mapping_writably_mapped(struct address_space *mapping)
544{
545 return atomic_read(&mapping->i_mmap_writable) > 0;
546}
547
548static inline int mapping_map_writable(struct address_space *mapping)
549{
550 return atomic_inc_unless_negative(&mapping->i_mmap_writable) ?
551 0 : -EPERM;
552}
553
554static inline void mapping_unmap_writable(struct address_space *mapping)
555{
556 atomic_dec(&mapping->i_mmap_writable);
557}
558
559static inline int mapping_deny_writable(struct address_space *mapping)
560{
561 return atomic_dec_unless_positive(&mapping->i_mmap_writable) ?
562 0 : -EBUSY;
563}
564
565static inline void mapping_allow_writable(struct address_space *mapping)
566{
567 atomic_inc(&mapping->i_mmap_writable);
568}
569
570/*
571 * Use sequence counter to get consistent i_size on 32-bit processors.
572 */
573#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
574#include <linux/seqlock.h>
575#define __NEED_I_SIZE_ORDERED
576#define i_size_ordered_init(inode) seqcount_init(&inode->i_size_seqcount)
577#else
578#define i_size_ordered_init(inode) do { } while (0)
579#endif
580
581struct posix_acl;
582#define ACL_NOT_CACHED ((void *)(-1))
583#define ACL_DONT_CACHE ((void *)(-3))
584
585static inline struct posix_acl *
586uncached_acl_sentinel(struct task_struct *task)
587{
588 return (void *)task + 1;
589}
590
591static inline bool
592is_uncached_acl(struct posix_acl *acl)
593{
594 return (long)acl & 1;
595}
596
597#define IOP_FASTPERM 0x0001
598#define IOP_LOOKUP 0x0002
599#define IOP_NOFOLLOW 0x0004
600#define IOP_XATTR 0x0008
601#define IOP_DEFAULT_READLINK 0x0010
602
603struct fsnotify_mark_connector;
604
605/*
606 * Keep mostly read-only and often accessed (especially for
607 * the RCU path lookup and 'stat' data) fields at the beginning
608 * of the 'struct inode'
609 */
610struct inode {
611 umode_t i_mode;
612 unsigned short i_opflags;
613 kuid_t i_uid;
614 kgid_t i_gid;
615 unsigned int i_flags;
616
617#ifdef CONFIG_FS_POSIX_ACL
618 struct posix_acl *i_acl;
619 struct posix_acl *i_default_acl;
620#endif
621
622 const struct inode_operations *i_op;
623 struct super_block *i_sb;
624 struct address_space *i_mapping;
625
626#ifdef CONFIG_SECURITY
627 void *i_security;
628#endif
629
630 /* Stat data, not accessed from path walking */
631 unsigned long i_ino;
632 /*
633 * Filesystems may only read i_nlink directly. They shall use the
634 * following functions for modification:
635 *
636 * (set|clear|inc|drop)_nlink
637 * inode_(inc|dec)_link_count
638 */
639 union {
640 const unsigned int i_nlink;
641 unsigned int __i_nlink;
642 };
643 dev_t i_rdev;
644 loff_t i_size;
645 struct timespec64 i_atime;
646 struct timespec64 i_mtime;
647 struct timespec64 i_ctime;
648 spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
649 unsigned short i_bytes;
650 u8 i_blkbits;
651 u8 i_write_hint;
652 blkcnt_t i_blocks;
653
654#ifdef __NEED_I_SIZE_ORDERED
655 seqcount_t i_size_seqcount;
656#endif
657
658 /* Misc */
659 unsigned long i_state;
660 struct rw_semaphore i_rwsem;
661
662 unsigned long dirtied_when; /* jiffies of first dirtying */
663 unsigned long dirtied_time_when;
664
665 struct hlist_node i_hash;
666 struct list_head i_io_list; /* backing dev IO list */
667#ifdef CONFIG_CGROUP_WRITEBACK
668 struct bdi_writeback *i_wb; /* the associated cgroup wb */
669
670 /* foreign inode detection, see wbc_detach_inode() */
671 int i_wb_frn_winner;
672 u16 i_wb_frn_avg_time;
673 u16 i_wb_frn_history;
674#endif
675 struct list_head i_lru; /* inode LRU list */
676 struct list_head i_sb_list;
677 struct list_head i_wb_list; /* backing dev writeback list */
678 union {
679 struct hlist_head i_dentry;
680 struct rcu_head i_rcu;
681 };
682 atomic64_t i_version;
Olivier Deprez0e641232021-09-23 10:07:05 +0200683 atomic64_t i_sequence; /* see futex */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000684 atomic_t i_count;
685 atomic_t i_dio_count;
686 atomic_t i_writecount;
David Brazdil0f672f62019-12-10 10:32:29 +0000687#if defined(CONFIG_IMA) || defined(CONFIG_FILE_LOCKING)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000688 atomic_t i_readcount; /* struct files open RO */
689#endif
David Brazdil0f672f62019-12-10 10:32:29 +0000690 union {
691 const struct file_operations *i_fop; /* former ->i_op->default_file_ops */
692 void (*free_inode)(struct inode *);
693 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000694 struct file_lock_context *i_flctx;
695 struct address_space i_data;
696 struct list_head i_devices;
697 union {
698 struct pipe_inode_info *i_pipe;
699 struct block_device *i_bdev;
700 struct cdev *i_cdev;
701 char *i_link;
702 unsigned i_dir_seq;
703 };
704
705 __u32 i_generation;
706
707#ifdef CONFIG_FSNOTIFY
708 __u32 i_fsnotify_mask; /* all events this inode cares about */
709 struct fsnotify_mark_connector __rcu *i_fsnotify_marks;
710#endif
711
David Brazdil0f672f62019-12-10 10:32:29 +0000712#ifdef CONFIG_FS_ENCRYPTION
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000713 struct fscrypt_info *i_crypt_info;
714#endif
715
David Brazdil0f672f62019-12-10 10:32:29 +0000716#ifdef CONFIG_FS_VERITY
717 struct fsverity_info *i_verity_info;
718#endif
719
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000720 void *i_private; /* fs or device private pointer */
721} __randomize_layout;
722
David Brazdil0f672f62019-12-10 10:32:29 +0000723struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode);
724
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000725static inline unsigned int i_blocksize(const struct inode *node)
726{
727 return (1 << node->i_blkbits);
728}
729
730static inline int inode_unhashed(struct inode *inode)
731{
732 return hlist_unhashed(&inode->i_hash);
733}
734
735/*
736 * __mark_inode_dirty expects inodes to be hashed. Since we don't
737 * want special inodes in the fileset inode space, we make them
738 * appear hashed, but do not put on any lists. hlist_del()
739 * will work fine and require no locking.
740 */
741static inline void inode_fake_hash(struct inode *inode)
742{
743 hlist_add_fake(&inode->i_hash);
744}
745
746/*
747 * inode->i_mutex nesting subclasses for the lock validator:
748 *
749 * 0: the object of the current VFS operation
750 * 1: parent
751 * 2: child/target
752 * 3: xattr
753 * 4: second non-directory
754 * 5: second parent (when locking independent directories in rename)
755 *
756 * I_MUTEX_NONDIR2 is for certain operations (such as rename) which lock two
757 * non-directories at once.
758 *
759 * The locking order between these classes is
760 * parent[2] -> child -> grandchild -> normal -> xattr -> second non-directory
761 */
762enum inode_i_mutex_lock_class
763{
764 I_MUTEX_NORMAL,
765 I_MUTEX_PARENT,
766 I_MUTEX_CHILD,
767 I_MUTEX_XATTR,
768 I_MUTEX_NONDIR2,
769 I_MUTEX_PARENT2,
770};
771
772static inline void inode_lock(struct inode *inode)
773{
774 down_write(&inode->i_rwsem);
775}
776
777static inline void inode_unlock(struct inode *inode)
778{
779 up_write(&inode->i_rwsem);
780}
781
782static inline void inode_lock_shared(struct inode *inode)
783{
784 down_read(&inode->i_rwsem);
785}
786
787static inline void inode_unlock_shared(struct inode *inode)
788{
789 up_read(&inode->i_rwsem);
790}
791
792static inline int inode_trylock(struct inode *inode)
793{
794 return down_write_trylock(&inode->i_rwsem);
795}
796
797static inline int inode_trylock_shared(struct inode *inode)
798{
799 return down_read_trylock(&inode->i_rwsem);
800}
801
802static inline int inode_is_locked(struct inode *inode)
803{
804 return rwsem_is_locked(&inode->i_rwsem);
805}
806
807static inline void inode_lock_nested(struct inode *inode, unsigned subclass)
808{
809 down_write_nested(&inode->i_rwsem, subclass);
810}
811
812static inline void inode_lock_shared_nested(struct inode *inode, unsigned subclass)
813{
814 down_read_nested(&inode->i_rwsem, subclass);
815}
816
817void lock_two_nondirectories(struct inode *, struct inode*);
818void unlock_two_nondirectories(struct inode *, struct inode*);
819
820/*
821 * NOTE: in a 32bit arch with a preemptable kernel and
822 * an UP compile the i_size_read/write must be atomic
823 * with respect to the local cpu (unlike with preempt disabled),
824 * but they don't need to be atomic with respect to other cpus like in
825 * true SMP (so they need either to either locally disable irq around
826 * the read or for example on x86 they can be still implemented as a
827 * cmpxchg8b without the need of the lock prefix). For SMP compiles
828 * and 64bit archs it makes no difference if preempt is enabled or not.
829 */
830static inline loff_t i_size_read(const struct inode *inode)
831{
832#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
833 loff_t i_size;
834 unsigned int seq;
835
836 do {
837 seq = read_seqcount_begin(&inode->i_size_seqcount);
838 i_size = inode->i_size;
839 } while (read_seqcount_retry(&inode->i_size_seqcount, seq));
840 return i_size;
Olivier Deprez157378f2022-04-04 15:47:50 +0200841#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000842 loff_t i_size;
843
844 preempt_disable();
845 i_size = inode->i_size;
846 preempt_enable();
847 return i_size;
848#else
849 return inode->i_size;
850#endif
851}
852
853/*
854 * NOTE: unlike i_size_read(), i_size_write() does need locking around it
855 * (normally i_mutex), otherwise on 32bit/SMP an update of i_size_seqcount
856 * can be lost, resulting in subsequent i_size_read() calls spinning forever.
857 */
858static inline void i_size_write(struct inode *inode, loff_t i_size)
859{
860#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
861 preempt_disable();
862 write_seqcount_begin(&inode->i_size_seqcount);
863 inode->i_size = i_size;
864 write_seqcount_end(&inode->i_size_seqcount);
865 preempt_enable();
Olivier Deprez157378f2022-04-04 15:47:50 +0200866#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000867 preempt_disable();
868 inode->i_size = i_size;
869 preempt_enable();
870#else
871 inode->i_size = i_size;
872#endif
873}
874
875static inline unsigned iminor(const struct inode *inode)
876{
877 return MINOR(inode->i_rdev);
878}
879
880static inline unsigned imajor(const struct inode *inode)
881{
882 return MAJOR(inode->i_rdev);
883}
884
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000885struct fown_struct {
886 rwlock_t lock; /* protects pid, uid, euid fields */
887 struct pid *pid; /* pid or -pgrp where SIGIO should be sent */
888 enum pid_type pid_type; /* Kind of process group SIGIO should be sent to */
889 kuid_t uid, euid; /* uid/euid of process setting the owner */
890 int signum; /* posix.1b rt signal to be delivered on IO */
891};
892
893/*
894 * Track a single file's readahead state
895 */
896struct file_ra_state {
897 pgoff_t start; /* where readahead started */
898 unsigned int size; /* # of readahead pages */
899 unsigned int async_size; /* do asynchronous readahead when
900 there are only # of pages ahead */
901
902 unsigned int ra_pages; /* Maximum readahead window */
903 unsigned int mmap_miss; /* Cache miss stat for mmap accesses */
904 loff_t prev_pos; /* Cache last read() position */
905};
906
907/*
908 * Check if @index falls in the readahead windows.
909 */
910static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
911{
912 return (index >= ra->start &&
913 index < ra->start + ra->size);
914}
915
916struct file {
917 union {
918 struct llist_node fu_llist;
919 struct rcu_head fu_rcuhead;
920 } f_u;
921 struct path f_path;
922 struct inode *f_inode; /* cached value */
923 const struct file_operations *f_op;
924
925 /*
926 * Protects f_ep_links, f_flags.
927 * Must not be taken from IRQ context.
928 */
929 spinlock_t f_lock;
930 enum rw_hint f_write_hint;
931 atomic_long_t f_count;
932 unsigned int f_flags;
933 fmode_t f_mode;
934 struct mutex f_pos_lock;
935 loff_t f_pos;
936 struct fown_struct f_owner;
937 const struct cred *f_cred;
938 struct file_ra_state f_ra;
939
940 u64 f_version;
941#ifdef CONFIG_SECURITY
942 void *f_security;
943#endif
944 /* needed for tty driver, and maybe others */
945 void *private_data;
946
947#ifdef CONFIG_EPOLL
948 /* Used by fs/eventpoll.c to link all the hooks to this file */
949 struct list_head f_ep_links;
950 struct list_head f_tfile_llink;
951#endif /* #ifdef CONFIG_EPOLL */
952 struct address_space *f_mapping;
953 errseq_t f_wb_err;
Olivier Deprez157378f2022-04-04 15:47:50 +0200954 errseq_t f_sb_err; /* for syncfs */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000955} __randomize_layout
956 __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
957
958struct file_handle {
959 __u32 handle_bytes;
960 int handle_type;
961 /* file identifier */
Olivier Deprez0e641232021-09-23 10:07:05 +0200962 unsigned char f_handle[];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000963};
964
965static inline struct file *get_file(struct file *f)
966{
967 atomic_long_inc(&f->f_count);
968 return f;
969}
David Brazdil0f672f62019-12-10 10:32:29 +0000970#define get_file_rcu_many(x, cnt) \
971 atomic_long_add_unless(&(x)->f_count, (cnt), 0)
972#define get_file_rcu(x) get_file_rcu_many((x), 1)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000973#define file_count(x) atomic_long_read(&(x)->f_count)
974
975#define MAX_NON_LFS ((1UL<<31) - 1)
976
977/* Page cache limit. The filesystems should put that into their s_maxbytes
978 limits, otherwise bad things can happen in VM. */
979#if BITS_PER_LONG==32
980#define MAX_LFS_FILESIZE ((loff_t)ULONG_MAX << PAGE_SHIFT)
981#elif BITS_PER_LONG==64
982#define MAX_LFS_FILESIZE ((loff_t)LLONG_MAX)
983#endif
984
985#define FL_POSIX 1
986#define FL_FLOCK 2
987#define FL_DELEG 4 /* NFSv4 delegation */
988#define FL_ACCESS 8 /* not trying to lock, just looking */
989#define FL_EXISTS 16 /* when unlocking, test for existence */
990#define FL_LEASE 32 /* lease held on this file */
991#define FL_CLOSE 64 /* unlock on close */
992#define FL_SLEEP 128 /* A blocking lock */
993#define FL_DOWNGRADE_PENDING 256 /* Lease is being downgraded */
994#define FL_UNLOCK_PENDING 512 /* Lease is being broken */
995#define FL_OFDLCK 1024 /* lock is "owned" by struct file */
996#define FL_LAYOUT 2048 /* outstanding pNFS layout */
997
998#define FL_CLOSE_POSIX (FL_POSIX | FL_CLOSE)
999
1000/*
1001 * Special return value from posix_lock_file() and vfs_lock_file() for
1002 * asynchronous locking.
1003 */
1004#define FILE_LOCK_DEFERRED 1
1005
1006/* legacy typedef, should eventually be removed */
1007typedef void *fl_owner_t;
1008
1009struct file_lock;
1010
1011struct file_lock_operations {
1012 void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
1013 void (*fl_release_private)(struct file_lock *);
1014};
1015
1016struct lock_manager_operations {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001017 fl_owner_t (*lm_get_owner)(fl_owner_t);
1018 void (*lm_put_owner)(fl_owner_t);
1019 void (*lm_notify)(struct file_lock *); /* unblock callback */
1020 int (*lm_grant)(struct file_lock *, int);
1021 bool (*lm_break)(struct file_lock *);
1022 int (*lm_change)(struct file_lock *, int, struct list_head *);
1023 void (*lm_setup)(struct file_lock *, void **);
Olivier Deprez157378f2022-04-04 15:47:50 +02001024 bool (*lm_breaker_owns_lease)(struct file_lock *);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001025};
1026
1027struct lock_manager {
1028 struct list_head list;
1029 /*
1030 * NFSv4 and up also want opens blocked during the grace period;
1031 * NLM doesn't care:
1032 */
1033 bool block_opens;
1034};
1035
1036struct net;
1037void locks_start_grace(struct net *, struct lock_manager *);
1038void locks_end_grace(struct lock_manager *);
1039bool locks_in_grace(struct net *);
1040bool opens_in_grace(struct net *);
1041
1042/* that will die - we need it for nfs_lock_info */
1043#include <linux/nfs_fs_i.h>
1044
1045/*
1046 * struct file_lock represents a generic "file lock". It's used to represent
1047 * POSIX byte range locks, BSD (flock) locks, and leases. It's important to
1048 * note that the same struct is used to represent both a request for a lock and
1049 * the lock itself, but the same object is never used for both.
1050 *
1051 * FIXME: should we create a separate "struct lock_request" to help distinguish
1052 * these two uses?
1053 *
1054 * The varous i_flctx lists are ordered by:
1055 *
1056 * 1) lock owner
1057 * 2) lock range start
1058 * 3) lock range end
1059 *
1060 * Obviously, the last two criteria only matter for POSIX locks.
1061 */
1062struct file_lock {
David Brazdil0f672f62019-12-10 10:32:29 +00001063 struct file_lock *fl_blocker; /* The lock, that is blocking us */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001064 struct list_head fl_list; /* link into file_lock_context */
1065 struct hlist_node fl_link; /* node in global lists */
David Brazdil0f672f62019-12-10 10:32:29 +00001066 struct list_head fl_blocked_requests; /* list of requests with
1067 * ->fl_blocker pointing here
1068 */
1069 struct list_head fl_blocked_member; /* node in
1070 * ->fl_blocker->fl_blocked_requests
1071 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001072 fl_owner_t fl_owner;
1073 unsigned int fl_flags;
1074 unsigned char fl_type;
1075 unsigned int fl_pid;
1076 int fl_link_cpu; /* what cpu's list is this on? */
1077 wait_queue_head_t fl_wait;
1078 struct file *fl_file;
1079 loff_t fl_start;
1080 loff_t fl_end;
1081
1082 struct fasync_struct * fl_fasync; /* for lease break notifications */
1083 /* for lease breaks: */
1084 unsigned long fl_break_time;
1085 unsigned long fl_downgrade_time;
1086
1087 const struct file_lock_operations *fl_ops; /* Callbacks for filesystems */
1088 const struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */
1089 union {
1090 struct nfs_lock_info nfs_fl;
1091 struct nfs4_lock_info nfs4_fl;
1092 struct {
1093 struct list_head link; /* link in AFS vnode's pending_locks list */
1094 int state; /* state of grant or error if -ve */
David Brazdil0f672f62019-12-10 10:32:29 +00001095 unsigned int debug_id;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001096 } afs;
1097 } fl_u;
1098} __randomize_layout;
1099
1100struct file_lock_context {
1101 spinlock_t flc_lock;
1102 struct list_head flc_flock;
1103 struct list_head flc_posix;
1104 struct list_head flc_lease;
1105};
1106
1107/* The following constant reflects the upper bound of the file/locking space */
1108#ifndef OFFSET_MAX
1109#define INT_LIMIT(x) (~((x)1 << (sizeof(x)*8 - 1)))
1110#define OFFSET_MAX INT_LIMIT(loff_t)
1111#define OFFT_OFFSET_MAX INT_LIMIT(off_t)
1112#endif
1113
1114extern void send_sigio(struct fown_struct *fown, int fd, int band);
1115
1116#define locks_inode(f) file_inode(f)
1117
1118#ifdef CONFIG_FILE_LOCKING
1119extern int fcntl_getlk(struct file *, unsigned int, struct flock *);
1120extern int fcntl_setlk(unsigned int, struct file *, unsigned int,
1121 struct flock *);
1122
1123#if BITS_PER_LONG == 32
1124extern int fcntl_getlk64(struct file *, unsigned int, struct flock64 *);
1125extern int fcntl_setlk64(unsigned int, struct file *, unsigned int,
1126 struct flock64 *);
1127#endif
1128
1129extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg);
1130extern int fcntl_getlease(struct file *filp);
1131
1132/* fs/locks.c */
1133void locks_free_lock_context(struct inode *inode);
1134void locks_free_lock(struct file_lock *fl);
1135extern void locks_init_lock(struct file_lock *);
1136extern struct file_lock * locks_alloc_lock(void);
1137extern void locks_copy_lock(struct file_lock *, struct file_lock *);
1138extern void locks_copy_conflock(struct file_lock *, struct file_lock *);
1139extern void locks_remove_posix(struct file *, fl_owner_t);
1140extern void locks_remove_file(struct file *);
1141extern void locks_release_private(struct file_lock *);
1142extern void posix_test_lock(struct file *, struct file_lock *);
1143extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
David Brazdil0f672f62019-12-10 10:32:29 +00001144extern int locks_delete_block(struct file_lock *);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001145extern int vfs_test_lock(struct file *, struct file_lock *);
1146extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
1147extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
1148extern int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl);
1149extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
1150extern void lease_get_mtime(struct inode *, struct timespec64 *time);
1151extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
1152extern int vfs_setlease(struct file *, long, struct file_lock **, void **);
1153extern int lease_modify(struct file_lock *, int, struct list_head *);
David Brazdil0f672f62019-12-10 10:32:29 +00001154
1155struct notifier_block;
1156extern int lease_register_notifier(struct notifier_block *);
1157extern void lease_unregister_notifier(struct notifier_block *);
1158
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001159struct files_struct;
1160extern void show_fd_locks(struct seq_file *f,
1161 struct file *filp, struct files_struct *files);
1162#else /* !CONFIG_FILE_LOCKING */
1163static inline int fcntl_getlk(struct file *file, unsigned int cmd,
1164 struct flock __user *user)
1165{
1166 return -EINVAL;
1167}
1168
1169static inline int fcntl_setlk(unsigned int fd, struct file *file,
1170 unsigned int cmd, struct flock __user *user)
1171{
1172 return -EACCES;
1173}
1174
1175#if BITS_PER_LONG == 32
1176static inline int fcntl_getlk64(struct file *file, unsigned int cmd,
1177 struct flock64 __user *user)
1178{
1179 return -EINVAL;
1180}
1181
1182static inline int fcntl_setlk64(unsigned int fd, struct file *file,
1183 unsigned int cmd, struct flock64 __user *user)
1184{
1185 return -EACCES;
1186}
1187#endif
1188static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1189{
1190 return -EINVAL;
1191}
1192
1193static inline int fcntl_getlease(struct file *filp)
1194{
1195 return F_UNLCK;
1196}
1197
1198static inline void
1199locks_free_lock_context(struct inode *inode)
1200{
1201}
1202
1203static inline void locks_init_lock(struct file_lock *fl)
1204{
1205 return;
1206}
1207
1208static inline void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
1209{
1210 return;
1211}
1212
1213static inline void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
1214{
1215 return;
1216}
1217
1218static inline void locks_remove_posix(struct file *filp, fl_owner_t owner)
1219{
1220 return;
1221}
1222
1223static inline void locks_remove_file(struct file *filp)
1224{
1225 return;
1226}
1227
1228static inline void posix_test_lock(struct file *filp, struct file_lock *fl)
1229{
1230 return;
1231}
1232
1233static inline int posix_lock_file(struct file *filp, struct file_lock *fl,
1234 struct file_lock *conflock)
1235{
1236 return -ENOLCK;
1237}
1238
David Brazdil0f672f62019-12-10 10:32:29 +00001239static inline int locks_delete_block(struct file_lock *waiter)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001240{
1241 return -ENOENT;
1242}
1243
1244static inline int vfs_test_lock(struct file *filp, struct file_lock *fl)
1245{
1246 return 0;
1247}
1248
1249static inline int vfs_lock_file(struct file *filp, unsigned int cmd,
1250 struct file_lock *fl, struct file_lock *conf)
1251{
1252 return -ENOLCK;
1253}
1254
1255static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
1256{
1257 return 0;
1258}
1259
1260static inline int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1261{
1262 return -ENOLCK;
1263}
1264
1265static inline int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
1266{
1267 return 0;
1268}
1269
1270static inline void lease_get_mtime(struct inode *inode,
1271 struct timespec64 *time)
1272{
1273 return;
1274}
1275
1276static inline int generic_setlease(struct file *filp, long arg,
1277 struct file_lock **flp, void **priv)
1278{
1279 return -EINVAL;
1280}
1281
1282static inline int vfs_setlease(struct file *filp, long arg,
1283 struct file_lock **lease, void **priv)
1284{
1285 return -EINVAL;
1286}
1287
1288static inline int lease_modify(struct file_lock *fl, int arg,
1289 struct list_head *dispose)
1290{
1291 return -EINVAL;
1292}
1293
1294struct files_struct;
1295static inline void show_fd_locks(struct seq_file *f,
1296 struct file *filp, struct files_struct *files) {}
1297#endif /* !CONFIG_FILE_LOCKING */
1298
1299static inline struct inode *file_inode(const struct file *f)
1300{
1301 return f->f_inode;
1302}
1303
1304static inline struct dentry *file_dentry(const struct file *file)
1305{
1306 return d_real(file->f_path.dentry, file_inode(file));
1307}
1308
1309static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
1310{
1311 return locks_lock_inode_wait(locks_inode(filp), fl);
1312}
1313
1314struct fasync_struct {
1315 rwlock_t fa_lock;
1316 int magic;
1317 int fa_fd;
1318 struct fasync_struct *fa_next; /* singly linked list */
1319 struct file *fa_file;
1320 struct rcu_head fa_rcu;
1321};
1322
1323#define FASYNC_MAGIC 0x4601
1324
1325/* SMP safe fasync helpers: */
1326extern int fasync_helper(int, struct file *, int, struct fasync_struct **);
1327extern struct fasync_struct *fasync_insert_entry(int, struct file *, struct fasync_struct **, struct fasync_struct *);
1328extern int fasync_remove_entry(struct file *, struct fasync_struct **);
1329extern struct fasync_struct *fasync_alloc(void);
1330extern void fasync_free(struct fasync_struct *);
1331
1332/* can be called from interrupts */
1333extern void kill_fasync(struct fasync_struct **, int, int);
1334
1335extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
1336extern int f_setown(struct file *filp, unsigned long arg, int force);
1337extern void f_delown(struct file *filp);
1338extern pid_t f_getown(struct file *filp);
1339extern int send_sigurg(struct fown_struct *fown);
1340
1341/*
1342 * sb->s_flags. Note that these mirror the equivalent MS_* flags where
1343 * represented in both.
1344 */
1345#define SB_RDONLY 1 /* Mount read-only */
1346#define SB_NOSUID 2 /* Ignore suid and sgid bits */
1347#define SB_NODEV 4 /* Disallow access to device special files */
1348#define SB_NOEXEC 8 /* Disallow program execution */
1349#define SB_SYNCHRONOUS 16 /* Writes are synced at once */
1350#define SB_MANDLOCK 64 /* Allow mandatory locks on an FS */
1351#define SB_DIRSYNC 128 /* Directory modifications are synchronous */
1352#define SB_NOATIME 1024 /* Do not update access times. */
1353#define SB_NODIRATIME 2048 /* Do not update directory access times */
1354#define SB_SILENT 32768
1355#define SB_POSIXACL (1<<16) /* VFS does not apply the umask */
Olivier Deprez157378f2022-04-04 15:47:50 +02001356#define SB_INLINECRYPT (1<<17) /* Use blk-crypto for encrypted files */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001357#define SB_KERNMOUNT (1<<22) /* this is a kern_mount call */
1358#define SB_I_VERSION (1<<23) /* Update inode I_version field */
1359#define SB_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */
1360
1361/* These sb flags are internal to the kernel */
1362#define SB_SUBMOUNT (1<<26)
David Brazdil0f672f62019-12-10 10:32:29 +00001363#define SB_FORCE (1<<27)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001364#define SB_NOSEC (1<<28)
1365#define SB_BORN (1<<29)
1366#define SB_ACTIVE (1<<30)
1367#define SB_NOUSER (1<<31)
1368
Olivier Deprez157378f2022-04-04 15:47:50 +02001369/* These flags relate to encoding and casefolding */
1370#define SB_ENC_STRICT_MODE_FL (1 << 0)
1371
1372#define sb_has_strict_encoding(sb) \
1373 (sb->s_encoding_flags & SB_ENC_STRICT_MODE_FL)
1374
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001375/*
1376 * Umount options
1377 */
1378
1379#define MNT_FORCE 0x00000001 /* Attempt to forcibily umount */
1380#define MNT_DETACH 0x00000002 /* Just detach from the tree */
1381#define MNT_EXPIRE 0x00000004 /* Mark for expiry */
1382#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */
1383#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */
1384
1385/* sb->s_iflags */
1386#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */
1387#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */
1388#define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */
Olivier Deprez157378f2022-04-04 15:47:50 +02001389#define SB_I_STABLE_WRITES 0x00000008 /* don't modify blks until WB is done */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001390
1391/* sb->s_iflags to limit user namespace mounts */
1392#define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */
1393#define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020
1394#define SB_I_UNTRUSTED_MOUNTER 0x00000040
1395
Olivier Deprez157378f2022-04-04 15:47:50 +02001396#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */
1397
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001398/* Possible states of 'frozen' field */
1399enum {
1400 SB_UNFROZEN = 0, /* FS is unfrozen */
1401 SB_FREEZE_WRITE = 1, /* Writes, dir ops, ioctls frozen */
1402 SB_FREEZE_PAGEFAULT = 2, /* Page faults stopped as well */
1403 SB_FREEZE_FS = 3, /* For internal FS use (e.g. to stop
1404 * internal threads if needed) */
1405 SB_FREEZE_COMPLETE = 4, /* ->freeze_fs finished successfully */
1406};
1407
1408#define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1)
1409
1410struct sb_writers {
1411 int frozen; /* Is sb frozen? */
1412 wait_queue_head_t wait_unfrozen; /* for get_super_thawed() */
1413 struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS];
1414};
1415
1416struct super_block {
1417 struct list_head s_list; /* Keep this first */
1418 dev_t s_dev; /* search index; _not_ kdev_t */
1419 unsigned char s_blocksize_bits;
1420 unsigned long s_blocksize;
1421 loff_t s_maxbytes; /* Max file size */
1422 struct file_system_type *s_type;
1423 const struct super_operations *s_op;
1424 const struct dquot_operations *dq_op;
1425 const struct quotactl_ops *s_qcop;
1426 const struct export_operations *s_export_op;
1427 unsigned long s_flags;
1428 unsigned long s_iflags; /* internal SB_I_* flags */
1429 unsigned long s_magic;
1430 struct dentry *s_root;
1431 struct rw_semaphore s_umount;
1432 int s_count;
1433 atomic_t s_active;
1434#ifdef CONFIG_SECURITY
1435 void *s_security;
1436#endif
1437 const struct xattr_handler **s_xattr;
David Brazdil0f672f62019-12-10 10:32:29 +00001438#ifdef CONFIG_FS_ENCRYPTION
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001439 const struct fscrypt_operations *s_cop;
David Brazdil0f672f62019-12-10 10:32:29 +00001440 struct key *s_master_keys; /* master crypto keys in use */
1441#endif
1442#ifdef CONFIG_FS_VERITY
1443 const struct fsverity_operations *s_vop;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001444#endif
Olivier Deprez157378f2022-04-04 15:47:50 +02001445#ifdef CONFIG_UNICODE
1446 struct unicode_map *s_encoding;
1447 __u16 s_encoding_flags;
1448#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001449 struct hlist_bl_head s_roots; /* alternate root dentries for NFS */
1450 struct list_head s_mounts; /* list of mounts; _not_ for fs use */
1451 struct block_device *s_bdev;
1452 struct backing_dev_info *s_bdi;
1453 struct mtd_info *s_mtd;
1454 struct hlist_node s_instances;
1455 unsigned int s_quota_types; /* Bitmask of supported quota types */
1456 struct quota_info s_dquot; /* Diskquota specific options */
1457
1458 struct sb_writers s_writers;
1459
David Brazdil0f672f62019-12-10 10:32:29 +00001460 /*
1461 * Keep s_fs_info, s_time_gran, s_fsnotify_mask, and
1462 * s_fsnotify_marks together for cache efficiency. They are frequently
1463 * accessed and rarely modified.
1464 */
1465 void *s_fs_info; /* Filesystem private info */
1466
1467 /* Granularity of c/m/atime in ns (cannot be worse than a second) */
1468 u32 s_time_gran;
1469 /* Time limits for c/m/atime in seconds */
1470 time64_t s_time_min;
1471 time64_t s_time_max;
1472#ifdef CONFIG_FSNOTIFY
1473 __u32 s_fsnotify_mask;
1474 struct fsnotify_mark_connector __rcu *s_fsnotify_marks;
1475#endif
1476
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001477 char s_id[32]; /* Informational name */
1478 uuid_t s_uuid; /* UUID */
1479
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001480 unsigned int s_max_links;
1481 fmode_t s_mode;
1482
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001483 /*
1484 * The next field is for VFS *only*. No filesystems have any business
1485 * even looking at it. You had been warned.
1486 */
1487 struct mutex s_vfs_rename_mutex; /* Kludge */
1488
1489 /*
1490 * Filesystem subtype. If non-empty the filesystem type field
1491 * in /proc/mounts will be "type.subtype"
1492 */
David Brazdil0f672f62019-12-10 10:32:29 +00001493 const char *s_subtype;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001494
1495 const struct dentry_operations *s_d_op; /* default d_op for dentries */
1496
1497 /*
1498 * Saved pool identifier for cleancache (-1 means none)
1499 */
1500 int cleancache_poolid;
1501
1502 struct shrinker s_shrink; /* per-sb shrinker handle */
1503
1504 /* Number of inodes with nlink == 0 but still referenced */
1505 atomic_long_t s_remove_count;
1506
1507 /* Pending fsnotify inode refs */
1508 atomic_long_t s_fsnotify_inode_refs;
1509
1510 /* Being remounted read-only */
1511 int s_readonly_remount;
1512
Olivier Deprez157378f2022-04-04 15:47:50 +02001513 /* per-sb errseq_t for reporting writeback errors via syncfs */
1514 errseq_t s_wb_err;
1515
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001516 /* AIO completions deferred from interrupt context */
1517 struct workqueue_struct *s_dio_done_wq;
1518 struct hlist_head s_pins;
1519
1520 /*
1521 * Owning user namespace and default context in which to
1522 * interpret filesystem uids, gids, quotas, device nodes,
1523 * xattrs and security labels.
1524 */
1525 struct user_namespace *s_user_ns;
1526
1527 /*
David Brazdil0f672f62019-12-10 10:32:29 +00001528 * The list_lru structure is essentially just a pointer to a table
1529 * of per-node lru lists, each of which has its own spinlock.
1530 * There is no need to put them into separate cachelines.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001531 */
David Brazdil0f672f62019-12-10 10:32:29 +00001532 struct list_lru s_dentry_lru;
1533 struct list_lru s_inode_lru;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001534 struct rcu_head rcu;
1535 struct work_struct destroy_work;
1536
1537 struct mutex s_sync_lock; /* sync serialisation lock */
1538
1539 /*
1540 * Indicates how deep in a filesystem stack this SB is
1541 */
1542 int s_stack_depth;
1543
1544 /* s_inode_list_lock protects s_inodes */
1545 spinlock_t s_inode_list_lock ____cacheline_aligned_in_smp;
1546 struct list_head s_inodes; /* all inodes */
1547
1548 spinlock_t s_inode_wblist_lock;
1549 struct list_head s_inodes_wb; /* writeback inodes */
1550} __randomize_layout;
1551
1552/* Helper functions so that in most cases filesystems will
1553 * not need to deal directly with kuid_t and kgid_t and can
1554 * instead deal with the raw numeric values that are stored
1555 * in the filesystem.
1556 */
1557static inline uid_t i_uid_read(const struct inode *inode)
1558{
1559 return from_kuid(inode->i_sb->s_user_ns, inode->i_uid);
1560}
1561
1562static inline gid_t i_gid_read(const struct inode *inode)
1563{
1564 return from_kgid(inode->i_sb->s_user_ns, inode->i_gid);
1565}
1566
1567static inline void i_uid_write(struct inode *inode, uid_t uid)
1568{
1569 inode->i_uid = make_kuid(inode->i_sb->s_user_ns, uid);
1570}
1571
1572static inline void i_gid_write(struct inode *inode, gid_t gid)
1573{
1574 inode->i_gid = make_kgid(inode->i_sb->s_user_ns, gid);
1575}
1576
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001577extern struct timespec64 current_time(struct inode *inode);
1578
1579/*
1580 * Snapshotting support.
1581 */
1582
Olivier Deprez157378f2022-04-04 15:47:50 +02001583/*
1584 * These are internal functions, please use sb_start_{write,pagefault,intwrite}
1585 * instead.
1586 */
1587static inline void __sb_end_write(struct super_block *sb, int level)
1588{
1589 percpu_up_read(sb->s_writers.rw_sem + level-1);
1590}
1591
1592static inline void __sb_start_write(struct super_block *sb, int level)
1593{
1594 percpu_down_read(sb->s_writers.rw_sem + level - 1);
1595}
1596
1597static inline bool __sb_start_write_trylock(struct super_block *sb, int level)
1598{
1599 return percpu_down_read_trylock(sb->s_writers.rw_sem + level - 1);
1600}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001601
1602#define __sb_writers_acquired(sb, lev) \
1603 percpu_rwsem_acquire(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
1604#define __sb_writers_release(sb, lev) \
1605 percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
1606
1607/**
1608 * sb_end_write - drop write access to a superblock
1609 * @sb: the super we wrote to
1610 *
1611 * Decrement number of writers to the filesystem. Wake up possible waiters
1612 * wanting to freeze the filesystem.
1613 */
1614static inline void sb_end_write(struct super_block *sb)
1615{
1616 __sb_end_write(sb, SB_FREEZE_WRITE);
1617}
1618
1619/**
1620 * sb_end_pagefault - drop write access to a superblock from a page fault
1621 * @sb: the super we wrote to
1622 *
1623 * Decrement number of processes handling write page fault to the filesystem.
1624 * Wake up possible waiters wanting to freeze the filesystem.
1625 */
1626static inline void sb_end_pagefault(struct super_block *sb)
1627{
1628 __sb_end_write(sb, SB_FREEZE_PAGEFAULT);
1629}
1630
1631/**
1632 * sb_end_intwrite - drop write access to a superblock for internal fs purposes
1633 * @sb: the super we wrote to
1634 *
1635 * Decrement fs-internal number of writers to the filesystem. Wake up possible
1636 * waiters wanting to freeze the filesystem.
1637 */
1638static inline void sb_end_intwrite(struct super_block *sb)
1639{
1640 __sb_end_write(sb, SB_FREEZE_FS);
1641}
1642
1643/**
1644 * sb_start_write - get write access to a superblock
1645 * @sb: the super we write to
1646 *
1647 * When a process wants to write data or metadata to a file system (i.e. dirty
1648 * a page or an inode), it should embed the operation in a sb_start_write() -
1649 * sb_end_write() pair to get exclusion against file system freezing. This
1650 * function increments number of writers preventing freezing. If the file
1651 * system is already frozen, the function waits until the file system is
1652 * thawed.
1653 *
1654 * Since freeze protection behaves as a lock, users have to preserve
1655 * ordering of freeze protection and other filesystem locks. Generally,
1656 * freeze protection should be the outermost lock. In particular, we have:
1657 *
1658 * sb_start_write
1659 * -> i_mutex (write path, truncate, directory ops, ...)
1660 * -> s_umount (freeze_super, thaw_super)
1661 */
1662static inline void sb_start_write(struct super_block *sb)
1663{
Olivier Deprez157378f2022-04-04 15:47:50 +02001664 __sb_start_write(sb, SB_FREEZE_WRITE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001665}
1666
Olivier Deprez157378f2022-04-04 15:47:50 +02001667static inline bool sb_start_write_trylock(struct super_block *sb)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001668{
Olivier Deprez157378f2022-04-04 15:47:50 +02001669 return __sb_start_write_trylock(sb, SB_FREEZE_WRITE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001670}
1671
1672/**
1673 * sb_start_pagefault - get write access to a superblock from a page fault
1674 * @sb: the super we write to
1675 *
1676 * When a process starts handling write page fault, it should embed the
1677 * operation into sb_start_pagefault() - sb_end_pagefault() pair to get
1678 * exclusion against file system freezing. This is needed since the page fault
1679 * is going to dirty a page. This function increments number of running page
1680 * faults preventing freezing. If the file system is already frozen, the
1681 * function waits until the file system is thawed.
1682 *
1683 * Since page fault freeze protection behaves as a lock, users have to preserve
1684 * ordering of freeze protection and other filesystem locks. It is advised to
Olivier Deprez157378f2022-04-04 15:47:50 +02001685 * put sb_start_pagefault() close to mmap_lock in lock ordering. Page fault
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001686 * handling code implies lock dependency:
1687 *
Olivier Deprez157378f2022-04-04 15:47:50 +02001688 * mmap_lock
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001689 * -> sb_start_pagefault
1690 */
1691static inline void sb_start_pagefault(struct super_block *sb)
1692{
Olivier Deprez157378f2022-04-04 15:47:50 +02001693 __sb_start_write(sb, SB_FREEZE_PAGEFAULT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001694}
1695
1696/*
1697 * sb_start_intwrite - get write access to a superblock for internal fs purposes
1698 * @sb: the super we write to
1699 *
1700 * This is the third level of protection against filesystem freezing. It is
1701 * free for use by a filesystem. The only requirement is that it must rank
1702 * below sb_start_pagefault.
1703 *
1704 * For example filesystem can call sb_start_intwrite() when starting a
1705 * transaction which somewhat eases handling of freezing for internal sources
1706 * of filesystem changes (internal fs threads, discarding preallocation on file
1707 * close, etc.).
1708 */
1709static inline void sb_start_intwrite(struct super_block *sb)
1710{
Olivier Deprez157378f2022-04-04 15:47:50 +02001711 __sb_start_write(sb, SB_FREEZE_FS);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001712}
1713
Olivier Deprez157378f2022-04-04 15:47:50 +02001714static inline bool sb_start_intwrite_trylock(struct super_block *sb)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001715{
Olivier Deprez157378f2022-04-04 15:47:50 +02001716 return __sb_start_write_trylock(sb, SB_FREEZE_FS);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001717}
1718
1719
1720extern bool inode_owner_or_capable(const struct inode *inode);
1721
1722/*
1723 * VFS helper functions..
1724 */
1725extern int vfs_create(struct inode *, struct dentry *, umode_t, bool);
1726extern int vfs_mkdir(struct inode *, struct dentry *, umode_t);
1727extern int vfs_mknod(struct inode *, struct dentry *, umode_t, dev_t);
1728extern int vfs_symlink(struct inode *, struct dentry *, const char *);
1729extern int vfs_link(struct dentry *, struct inode *, struct dentry *, struct inode **);
1730extern int vfs_rmdir(struct inode *, struct dentry *);
1731extern int vfs_unlink(struct inode *, struct dentry *, struct inode **);
1732extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int);
Olivier Deprez157378f2022-04-04 15:47:50 +02001733
1734static inline int vfs_whiteout(struct inode *dir, struct dentry *dentry)
1735{
1736 return vfs_mknod(dir, dentry, S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
1737}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001738
1739extern struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode,
1740 int open_flag);
1741
1742int vfs_mkobj(struct dentry *, umode_t,
1743 int (*f)(struct dentry *, umode_t, void *),
1744 void *);
1745
Olivier Deprez157378f2022-04-04 15:47:50 +02001746int vfs_fchown(struct file *file, uid_t user, gid_t group);
1747int vfs_fchmod(struct file *file, umode_t mode);
1748int vfs_utimes(const struct path *path, struct timespec64 *times);
1749
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001750extern long vfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
1751
Olivier Deprez0e641232021-09-23 10:07:05 +02001752#ifdef CONFIG_COMPAT
1753extern long compat_ptr_ioctl(struct file *file, unsigned int cmd,
1754 unsigned long arg);
1755#else
1756#define compat_ptr_ioctl NULL
1757#endif
1758
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001759/*
1760 * VFS file helper functions.
1761 */
1762extern void inode_init_owner(struct inode *inode, const struct inode *dir,
1763 umode_t mode);
1764extern bool may_open_dev(const struct path *path);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001765
1766/*
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001767 * This is the "filldir" function type, used by readdir() to let
1768 * the kernel specify what kind of dirent layout it wants to have.
1769 * This allows the kernel to read directories into kernel space or
1770 * to have different dirent layouts depending on the binary type.
1771 */
1772struct dir_context;
1773typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64,
1774 unsigned);
1775
1776struct dir_context {
1777 filldir_t actor;
1778 loff_t pos;
1779};
1780
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001781/*
1782 * These flags let !MMU mmap() govern direct device mapping vs immediate
1783 * copying more easily for MAP_PRIVATE, especially for ROM filesystems.
1784 *
1785 * NOMMU_MAP_COPY: Copy can be mapped (MAP_PRIVATE)
1786 * NOMMU_MAP_DIRECT: Can be mapped directly (MAP_SHARED)
1787 * NOMMU_MAP_READ: Can be mapped for reading
1788 * NOMMU_MAP_WRITE: Can be mapped for writing
1789 * NOMMU_MAP_EXEC: Can be mapped for execution
1790 */
1791#define NOMMU_MAP_COPY 0x00000001
1792#define NOMMU_MAP_DIRECT 0x00000008
1793#define NOMMU_MAP_READ VM_MAYREAD
1794#define NOMMU_MAP_WRITE VM_MAYWRITE
1795#define NOMMU_MAP_EXEC VM_MAYEXEC
1796
1797#define NOMMU_VMFLAGS \
1798 (NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC)
1799
David Brazdil0f672f62019-12-10 10:32:29 +00001800/*
1801 * These flags control the behavior of the remap_file_range function pointer.
1802 * If it is called with len == 0 that means "remap to end of source file".
1803 * See Documentation/filesystems/vfs.rst for more details about this call.
1804 *
1805 * REMAP_FILE_DEDUP: only remap if contents identical (i.e. deduplicate)
1806 * REMAP_FILE_CAN_SHORTEN: caller can handle a shortened request
1807 */
1808#define REMAP_FILE_DEDUP (1 << 0)
1809#define REMAP_FILE_CAN_SHORTEN (1 << 1)
1810
1811/*
1812 * These flags signal that the caller is ok with altering various aspects of
1813 * the behavior of the remap operation. The changes must be made by the
1814 * implementation; the vfs remap helper functions can take advantage of them.
1815 * Flags in this category exist to preserve the quirky behavior of the hoisted
1816 * btrfs clone/dedupe ioctls.
1817 */
1818#define REMAP_FILE_ADVISORY (REMAP_FILE_CAN_SHORTEN)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001819
1820struct iov_iter;
1821
1822struct file_operations {
1823 struct module *owner;
1824 loff_t (*llseek) (struct file *, loff_t, int);
1825 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
1826 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
1827 ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
1828 ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
David Brazdil0f672f62019-12-10 10:32:29 +00001829 int (*iopoll)(struct kiocb *kiocb, bool spin);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001830 int (*iterate) (struct file *, struct dir_context *);
1831 int (*iterate_shared) (struct file *, struct dir_context *);
1832 __poll_t (*poll) (struct file *, struct poll_table_struct *);
1833 long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
1834 long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
1835 int (*mmap) (struct file *, struct vm_area_struct *);
1836 unsigned long mmap_supported_flags;
1837 int (*open) (struct inode *, struct file *);
1838 int (*flush) (struct file *, fl_owner_t id);
1839 int (*release) (struct inode *, struct file *);
1840 int (*fsync) (struct file *, loff_t, loff_t, int datasync);
1841 int (*fasync) (int, struct file *, int);
1842 int (*lock) (struct file *, int, struct file_lock *);
1843 ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
1844 unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
1845 int (*check_flags)(int);
1846 int (*flock) (struct file *, int, struct file_lock *);
1847 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
1848 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
1849 int (*setlease)(struct file *, long, struct file_lock **, void **);
1850 long (*fallocate)(struct file *file, int mode, loff_t offset,
1851 loff_t len);
1852 void (*show_fdinfo)(struct seq_file *m, struct file *f);
1853#ifndef CONFIG_MMU
1854 unsigned (*mmap_capabilities)(struct file *);
1855#endif
1856 ssize_t (*copy_file_range)(struct file *, loff_t, struct file *,
1857 loff_t, size_t, unsigned int);
David Brazdil0f672f62019-12-10 10:32:29 +00001858 loff_t (*remap_file_range)(struct file *file_in, loff_t pos_in,
1859 struct file *file_out, loff_t pos_out,
1860 loff_t len, unsigned int remap_flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001861 int (*fadvise)(struct file *, loff_t, loff_t, int);
1862} __randomize_layout;
1863
1864struct inode_operations {
1865 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
1866 const char * (*get_link) (struct dentry *, struct inode *, struct delayed_call *);
1867 int (*permission) (struct inode *, int);
1868 struct posix_acl * (*get_acl)(struct inode *, int);
1869
1870 int (*readlink) (struct dentry *, char __user *,int);
1871
1872 int (*create) (struct inode *,struct dentry *, umode_t, bool);
1873 int (*link) (struct dentry *,struct inode *,struct dentry *);
1874 int (*unlink) (struct inode *,struct dentry *);
1875 int (*symlink) (struct inode *,struct dentry *,const char *);
1876 int (*mkdir) (struct inode *,struct dentry *,umode_t);
1877 int (*rmdir) (struct inode *,struct dentry *);
1878 int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t);
1879 int (*rename) (struct inode *, struct dentry *,
1880 struct inode *, struct dentry *, unsigned int);
1881 int (*setattr) (struct dentry *, struct iattr *);
1882 int (*getattr) (const struct path *, struct kstat *, u32, unsigned int);
1883 ssize_t (*listxattr) (struct dentry *, char *, size_t);
1884 int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
1885 u64 len);
1886 int (*update_time)(struct inode *, struct timespec64 *, int);
1887 int (*atomic_open)(struct inode *, struct dentry *,
1888 struct file *, unsigned open_flag,
1889 umode_t create_mode);
1890 int (*tmpfile) (struct inode *, struct dentry *, umode_t);
1891 int (*set_acl)(struct inode *, struct posix_acl *, int);
1892} ____cacheline_aligned;
1893
1894static inline ssize_t call_read_iter(struct file *file, struct kiocb *kio,
1895 struct iov_iter *iter)
1896{
1897 return file->f_op->read_iter(kio, iter);
1898}
1899
1900static inline ssize_t call_write_iter(struct file *file, struct kiocb *kio,
1901 struct iov_iter *iter)
1902{
1903 return file->f_op->write_iter(kio, iter);
1904}
1905
1906static inline int call_mmap(struct file *file, struct vm_area_struct *vma)
1907{
1908 return file->f_op->mmap(file, vma);
1909}
1910
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001911extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
1912extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001913extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
1914 loff_t, size_t, unsigned int);
David Brazdil0f672f62019-12-10 10:32:29 +00001915extern ssize_t generic_copy_file_range(struct file *file_in, loff_t pos_in,
1916 struct file *file_out, loff_t pos_out,
1917 size_t len, unsigned int flags);
1918extern int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
1919 struct file *file_out, loff_t pos_out,
1920 loff_t *count,
1921 unsigned int remap_flags);
1922extern loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
1923 struct file *file_out, loff_t pos_out,
1924 loff_t len, unsigned int remap_flags);
1925extern loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
1926 struct file *file_out, loff_t pos_out,
1927 loff_t len, unsigned int remap_flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001928extern int vfs_dedupe_file_range(struct file *file,
1929 struct file_dedupe_range *same);
David Brazdil0f672f62019-12-10 10:32:29 +00001930extern loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
1931 struct file *dst_file, loff_t dst_pos,
1932 loff_t len, unsigned int remap_flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001933
1934
1935struct super_operations {
1936 struct inode *(*alloc_inode)(struct super_block *sb);
1937 void (*destroy_inode)(struct inode *);
David Brazdil0f672f62019-12-10 10:32:29 +00001938 void (*free_inode)(struct inode *);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001939
1940 void (*dirty_inode) (struct inode *, int flags);
1941 int (*write_inode) (struct inode *, struct writeback_control *wbc);
1942 int (*drop_inode) (struct inode *);
1943 void (*evict_inode) (struct inode *);
1944 void (*put_super) (struct super_block *);
1945 int (*sync_fs)(struct super_block *sb, int wait);
1946 int (*freeze_super) (struct super_block *);
1947 int (*freeze_fs) (struct super_block *);
1948 int (*thaw_super) (struct super_block *);
1949 int (*unfreeze_fs) (struct super_block *);
1950 int (*statfs) (struct dentry *, struct kstatfs *);
1951 int (*remount_fs) (struct super_block *, int *, char *);
1952 void (*umount_begin) (struct super_block *);
1953
1954 int (*show_options)(struct seq_file *, struct dentry *);
1955 int (*show_devname)(struct seq_file *, struct dentry *);
1956 int (*show_path)(struct seq_file *, struct dentry *);
1957 int (*show_stats)(struct seq_file *, struct dentry *);
1958#ifdef CONFIG_QUOTA
1959 ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
1960 ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
1961 struct dquot **(*get_dquots)(struct inode *);
1962#endif
1963 int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
1964 long (*nr_cached_objects)(struct super_block *,
1965 struct shrink_control *);
1966 long (*free_cached_objects)(struct super_block *,
1967 struct shrink_control *);
1968};
1969
1970/*
1971 * Inode flags - they have no relation to superblock flags now
1972 */
Olivier Deprez157378f2022-04-04 15:47:50 +02001973#define S_SYNC (1 << 0) /* Writes are synced at once */
1974#define S_NOATIME (1 << 1) /* Do not update access times */
1975#define S_APPEND (1 << 2) /* Append-only file */
1976#define S_IMMUTABLE (1 << 3) /* Immutable file */
1977#define S_DEAD (1 << 4) /* removed, but still open directory */
1978#define S_NOQUOTA (1 << 5) /* Inode is not counted to quota */
1979#define S_DIRSYNC (1 << 6) /* Directory modifications are synchronous */
1980#define S_NOCMTIME (1 << 7) /* Do not update file c/mtime */
1981#define S_SWAPFILE (1 << 8) /* Do not truncate: swapon got its bmaps */
1982#define S_PRIVATE (1 << 9) /* Inode is fs-internal */
1983#define S_IMA (1 << 10) /* Inode has an associated IMA struct */
1984#define S_AUTOMOUNT (1 << 11) /* Automount/referral quasi-directory */
1985#define S_NOSEC (1 << 12) /* no suid or xattr security attributes */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001986#ifdef CONFIG_FS_DAX
Olivier Deprez157378f2022-04-04 15:47:50 +02001987#define S_DAX (1 << 13) /* Direct Access, avoiding the page cache */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001988#else
Olivier Deprez157378f2022-04-04 15:47:50 +02001989#define S_DAX 0 /* Make all the DAX code disappear */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001990#endif
Olivier Deprez157378f2022-04-04 15:47:50 +02001991#define S_ENCRYPTED (1 << 14) /* Encrypted file (using fs/crypto/) */
1992#define S_CASEFOLD (1 << 15) /* Casefolded file */
1993#define S_VERITY (1 << 16) /* Verity file (using fs/verity/) */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001994
1995/*
1996 * Note that nosuid etc flags are inode-specific: setting some file-system
1997 * flags just means all the inodes inherit those flags by default. It might be
1998 * possible to override it selectively if you really wanted to with some
1999 * ioctl() that is not currently implemented.
2000 *
2001 * Exception: SB_RDONLY is always applied to the entire file system.
2002 *
2003 * Unfortunately, it is possible to change a filesystems flags with it mounted
2004 * with files in use. This means that all of the inodes will not have their
2005 * i_flags updated. Hence, i_flags no longer inherit the superblock mount
2006 * flags, so these have to be checked separately. -- rmk@arm.uk.linux.org
2007 */
2008#define __IS_FLG(inode, flg) ((inode)->i_sb->s_flags & (flg))
2009
2010static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags & SB_RDONLY; }
2011#define IS_RDONLY(inode) sb_rdonly((inode)->i_sb)
2012#define IS_SYNC(inode) (__IS_FLG(inode, SB_SYNCHRONOUS) || \
2013 ((inode)->i_flags & S_SYNC))
2014#define IS_DIRSYNC(inode) (__IS_FLG(inode, SB_SYNCHRONOUS|SB_DIRSYNC) || \
2015 ((inode)->i_flags & (S_SYNC|S_DIRSYNC)))
2016#define IS_MANDLOCK(inode) __IS_FLG(inode, SB_MANDLOCK)
2017#define IS_NOATIME(inode) __IS_FLG(inode, SB_RDONLY|SB_NOATIME)
2018#define IS_I_VERSION(inode) __IS_FLG(inode, SB_I_VERSION)
2019
2020#define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA)
2021#define IS_APPEND(inode) ((inode)->i_flags & S_APPEND)
2022#define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE)
2023#define IS_POSIXACL(inode) __IS_FLG(inode, SB_POSIXACL)
2024
2025#define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD)
2026#define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME)
2027#define IS_SWAPFILE(inode) ((inode)->i_flags & S_SWAPFILE)
2028#define IS_PRIVATE(inode) ((inode)->i_flags & S_PRIVATE)
2029#define IS_IMA(inode) ((inode)->i_flags & S_IMA)
2030#define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT)
2031#define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC)
2032#define IS_DAX(inode) ((inode)->i_flags & S_DAX)
2033#define IS_ENCRYPTED(inode) ((inode)->i_flags & S_ENCRYPTED)
David Brazdil0f672f62019-12-10 10:32:29 +00002034#define IS_CASEFOLDED(inode) ((inode)->i_flags & S_CASEFOLD)
2035#define IS_VERITY(inode) ((inode)->i_flags & S_VERITY)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002036
2037#define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \
2038 (inode)->i_rdev == WHITEOUT_DEV)
2039
2040static inline bool HAS_UNMAPPED_ID(struct inode *inode)
2041{
2042 return !uid_valid(inode->i_uid) || !gid_valid(inode->i_gid);
2043}
2044
2045static inline enum rw_hint file_write_hint(struct file *file)
2046{
2047 if (file->f_write_hint != WRITE_LIFE_NOT_SET)
2048 return file->f_write_hint;
2049
2050 return file_inode(file)->i_write_hint;
2051}
2052
2053static inline int iocb_flags(struct file *file);
2054
2055static inline u16 ki_hint_validate(enum rw_hint hint)
2056{
2057 typeof(((struct kiocb *)0)->ki_hint) max_hint = -1;
2058
2059 if (hint <= max_hint)
2060 return hint;
2061 return 0;
2062}
2063
2064static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
2065{
2066 *kiocb = (struct kiocb) {
2067 .ki_filp = filp,
2068 .ki_flags = iocb_flags(filp),
2069 .ki_hint = ki_hint_validate(file_write_hint(filp)),
David Brazdil0f672f62019-12-10 10:32:29 +00002070 .ki_ioprio = get_current_ioprio(),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002071 };
2072}
2073
Olivier Deprez157378f2022-04-04 15:47:50 +02002074static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
2075 struct file *filp)
2076{
2077 *kiocb = (struct kiocb) {
2078 .ki_filp = filp,
2079 .ki_flags = kiocb_src->ki_flags,
2080 .ki_hint = kiocb_src->ki_hint,
2081 .ki_ioprio = kiocb_src->ki_ioprio,
2082 .ki_pos = kiocb_src->ki_pos,
2083 };
2084}
2085
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002086/*
2087 * Inode state bits. Protected by inode->i_lock
2088 *
2089 * Three bits determine the dirty state of the inode, I_DIRTY_SYNC,
2090 * I_DIRTY_DATASYNC and I_DIRTY_PAGES.
2091 *
2092 * Four bits define the lifetime of an inode. Initially, inodes are I_NEW,
2093 * until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at
2094 * various stages of removing an inode.
2095 *
2096 * Two bits are used for locking and completion notification, I_NEW and I_SYNC.
2097 *
2098 * I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on
2099 * fdatasync(). i_atime is the usual cause.
2100 * I_DIRTY_DATASYNC Data-related inode changes pending. We keep track of
2101 * these changes separately from I_DIRTY_SYNC so that we
2102 * don't have to write inode on fdatasync() when only
2103 * mtime has changed in it.
2104 * I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean.
2105 * I_NEW Serves as both a mutex and completion notification.
2106 * New inodes set I_NEW. If two processes both create
2107 * the same inode, one of them will release its inode and
2108 * wait for I_NEW to be released before returning.
2109 * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can
2110 * also cause waiting on I_NEW, without I_NEW actually
2111 * being set. find_inode() uses this to prevent returning
2112 * nearly-dead inodes.
2113 * I_WILL_FREE Must be set when calling write_inode_now() if i_count
2114 * is zero. I_FREEING must be set when I_WILL_FREE is
2115 * cleared.
2116 * I_FREEING Set when inode is about to be freed but still has dirty
2117 * pages or buffers attached or the inode itself is still
2118 * dirty.
2119 * I_CLEAR Added by clear_inode(). In this state the inode is
2120 * clean and can be destroyed. Inode keeps I_FREEING.
2121 *
2122 * Inodes that are I_WILL_FREE, I_FREEING or I_CLEAR are
2123 * prohibited for many purposes. iget() must wait for
2124 * the inode to be completely released, then create it
2125 * anew. Other functions will just ignore such inodes,
2126 * if appropriate. I_NEW is used for waiting.
2127 *
2128 * I_SYNC Writeback of inode is running. The bit is set during
2129 * data writeback, and cleared with a wakeup on the bit
2130 * address once it is done. The bit is also used to pin
2131 * the inode in memory for flusher thread.
2132 *
2133 * I_REFERENCED Marks the inode as recently references on the LRU list.
2134 *
2135 * I_DIO_WAKEUP Never set. Only used as a key for wait_on_bit().
2136 *
2137 * I_WB_SWITCH Cgroup bdi_writeback switching in progress. Used to
2138 * synchronize competing switching instances and to tell
2139 * wb stat updates to grab the i_pages lock. See
David Brazdil0f672f62019-12-10 10:32:29 +00002140 * inode_switch_wbs_work_fn() for details.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002141 *
2142 * I_OVL_INUSE Used by overlayfs to get exclusive ownership on upper
2143 * and work dirs among overlayfs mounts.
2144 *
2145 * I_CREATING New object's inode in the middle of setting up.
2146 *
Olivier Deprez157378f2022-04-04 15:47:50 +02002147 * I_DONTCACHE Evict inode as soon as it is not used anymore.
2148 *
Olivier Deprez0e641232021-09-23 10:07:05 +02002149 * I_SYNC_QUEUED Inode is queued in b_io or b_more_io writeback lists.
2150 * Used to detect that mark_inode_dirty() should not move
2151 * inode between dirty lists.
2152 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002153 * Q: What is the difference between I_WILL_FREE and I_FREEING?
2154 */
2155#define I_DIRTY_SYNC (1 << 0)
2156#define I_DIRTY_DATASYNC (1 << 1)
2157#define I_DIRTY_PAGES (1 << 2)
2158#define __I_NEW 3
2159#define I_NEW (1 << __I_NEW)
2160#define I_WILL_FREE (1 << 4)
2161#define I_FREEING (1 << 5)
2162#define I_CLEAR (1 << 6)
2163#define __I_SYNC 7
2164#define I_SYNC (1 << __I_SYNC)
2165#define I_REFERENCED (1 << 8)
2166#define __I_DIO_WAKEUP 9
2167#define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP)
2168#define I_LINKABLE (1 << 10)
2169#define I_DIRTY_TIME (1 << 11)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002170#define I_WB_SWITCH (1 << 13)
2171#define I_OVL_INUSE (1 << 14)
2172#define I_CREATING (1 << 15)
Olivier Deprez157378f2022-04-04 15:47:50 +02002173#define I_DONTCACHE (1 << 16)
Olivier Deprez0e641232021-09-23 10:07:05 +02002174#define I_SYNC_QUEUED (1 << 17)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002175
2176#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
2177#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
2178#define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME)
2179
2180extern void __mark_inode_dirty(struct inode *, int);
2181static inline void mark_inode_dirty(struct inode *inode)
2182{
2183 __mark_inode_dirty(inode, I_DIRTY);
2184}
2185
2186static inline void mark_inode_dirty_sync(struct inode *inode)
2187{
2188 __mark_inode_dirty(inode, I_DIRTY_SYNC);
2189}
2190
2191extern void inc_nlink(struct inode *inode);
2192extern void drop_nlink(struct inode *inode);
2193extern void clear_nlink(struct inode *inode);
2194extern void set_nlink(struct inode *inode, unsigned int nlink);
2195
2196static inline void inode_inc_link_count(struct inode *inode)
2197{
2198 inc_nlink(inode);
2199 mark_inode_dirty(inode);
2200}
2201
2202static inline void inode_dec_link_count(struct inode *inode)
2203{
2204 drop_nlink(inode);
2205 mark_inode_dirty(inode);
2206}
2207
2208enum file_time_flags {
2209 S_ATIME = 1,
2210 S_MTIME = 2,
2211 S_CTIME = 4,
2212 S_VERSION = 8,
2213};
2214
2215extern bool atime_needs_update(const struct path *, struct inode *);
2216extern void touch_atime(const struct path *);
Olivier Deprez157378f2022-04-04 15:47:50 +02002217int inode_update_time(struct inode *inode, struct timespec64 *time, int flags);
2218
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002219static inline void file_accessed(struct file *file)
2220{
2221 if (!(file->f_flags & O_NOATIME))
2222 touch_atime(&file->f_path);
2223}
2224
David Brazdil0f672f62019-12-10 10:32:29 +00002225extern int file_modified(struct file *file);
2226
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002227int sync_inode(struct inode *inode, struct writeback_control *wbc);
2228int sync_inode_metadata(struct inode *inode, int wait);
2229
2230struct file_system_type {
2231 const char *name;
2232 int fs_flags;
2233#define FS_REQUIRES_DEV 1
2234#define FS_BINARY_MOUNTDATA 2
2235#define FS_HAS_SUBTYPE 4
2236#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */
David Brazdil0f672f62019-12-10 10:32:29 +00002237#define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */
Olivier Deprez157378f2022-04-04 15:47:50 +02002238#define FS_THP_SUPPORT 8192 /* Remove once all fs converted */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002239#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
David Brazdil0f672f62019-12-10 10:32:29 +00002240 int (*init_fs_context)(struct fs_context *);
Olivier Deprez157378f2022-04-04 15:47:50 +02002241 const struct fs_parameter_spec *parameters;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002242 struct dentry *(*mount) (struct file_system_type *, int,
2243 const char *, void *);
2244 void (*kill_sb) (struct super_block *);
2245 struct module *owner;
2246 struct file_system_type * next;
2247 struct hlist_head fs_supers;
2248
2249 struct lock_class_key s_lock_key;
2250 struct lock_class_key s_umount_key;
2251 struct lock_class_key s_vfs_rename_key;
2252 struct lock_class_key s_writers_key[SB_FREEZE_LEVELS];
2253
2254 struct lock_class_key i_lock_key;
2255 struct lock_class_key i_mutex_key;
2256 struct lock_class_key i_mutex_dir_key;
2257};
2258
2259#define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME)
2260
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002261extern struct dentry *mount_bdev(struct file_system_type *fs_type,
2262 int flags, const char *dev_name, void *data,
2263 int (*fill_super)(struct super_block *, void *, int));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002264extern struct dentry *mount_single(struct file_system_type *fs_type,
2265 int flags, void *data,
2266 int (*fill_super)(struct super_block *, void *, int));
2267extern struct dentry *mount_nodev(struct file_system_type *fs_type,
2268 int flags, void *data,
2269 int (*fill_super)(struct super_block *, void *, int));
2270extern struct dentry *mount_subtree(struct vfsmount *mnt, const char *path);
2271void generic_shutdown_super(struct super_block *sb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002272void kill_block_super(struct super_block *sb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002273void kill_anon_super(struct super_block *sb);
2274void kill_litter_super(struct super_block *sb);
2275void deactivate_super(struct super_block *sb);
2276void deactivate_locked_super(struct super_block *sb);
2277int set_anon_super(struct super_block *s, void *data);
David Brazdil0f672f62019-12-10 10:32:29 +00002278int set_anon_super_fc(struct super_block *s, struct fs_context *fc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002279int get_anon_bdev(dev_t *);
2280void free_anon_bdev(dev_t);
David Brazdil0f672f62019-12-10 10:32:29 +00002281struct super_block *sget_fc(struct fs_context *fc,
2282 int (*test)(struct super_block *, struct fs_context *),
2283 int (*set)(struct super_block *, struct fs_context *));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002284struct super_block *sget(struct file_system_type *type,
2285 int (*test)(struct super_block *,void *),
2286 int (*set)(struct super_block *,void *),
2287 int flags, void *data);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002288
2289/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
2290#define fops_get(fops) \
2291 (((fops) && try_module_get((fops)->owner) ? (fops) : NULL))
2292#define fops_put(fops) \
2293 do { if (fops) module_put((fops)->owner); } while(0)
2294/*
2295 * This one is to be used *ONLY* from ->open() instances.
2296 * fops must be non-NULL, pinned down *and* module dependencies
2297 * should be sufficient to pin the caller down as well.
2298 */
2299#define replace_fops(f, fops) \
2300 do { \
2301 struct file *__file = (f); \
2302 fops_put(__file->f_op); \
2303 BUG_ON(!(__file->f_op = (fops))); \
2304 } while(0)
2305
2306extern int register_filesystem(struct file_system_type *);
2307extern int unregister_filesystem(struct file_system_type *);
David Brazdil0f672f62019-12-10 10:32:29 +00002308extern struct vfsmount *kern_mount(struct file_system_type *);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002309extern void kern_unmount(struct vfsmount *mnt);
2310extern int may_umount_tree(struct vfsmount *);
2311extern int may_umount(struct vfsmount *);
2312extern long do_mount(const char *, const char __user *,
2313 const char *, unsigned long, void *);
2314extern struct vfsmount *collect_mounts(const struct path *);
2315extern void drop_collected_mounts(struct vfsmount *);
2316extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
2317 struct vfsmount *);
2318extern int vfs_statfs(const struct path *, struct kstatfs *);
2319extern int user_statfs(const char __user *, struct kstatfs *);
2320extern int fd_statfs(int, struct kstatfs *);
2321extern int freeze_super(struct super_block *super);
2322extern int thaw_super(struct super_block *super);
2323extern bool our_mnt(struct vfsmount *mnt);
2324extern __printf(2, 3)
2325int super_setup_bdi_name(struct super_block *sb, char *fmt, ...);
2326extern int super_setup_bdi(struct super_block *sb);
2327
2328extern int current_umask(void);
2329
2330extern void ihold(struct inode * inode);
2331extern void iput(struct inode *);
2332extern int generic_update_time(struct inode *, struct timespec64 *, int);
2333
2334/* /sys/fs */
2335extern struct kobject *fs_kobj;
2336
2337#define MAX_RW_COUNT (INT_MAX & PAGE_MASK)
2338
2339#ifdef CONFIG_MANDATORY_FILE_LOCKING
2340extern int locks_mandatory_locked(struct file *);
2341extern int locks_mandatory_area(struct inode *, struct file *, loff_t, loff_t, unsigned char);
2342
2343/*
2344 * Candidates for mandatory locking have the setgid bit set
2345 * but no group execute bit - an otherwise meaningless combination.
2346 */
2347
2348static inline int __mandatory_lock(struct inode *ino)
2349{
2350 return (ino->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID;
2351}
2352
2353/*
2354 * ... and these candidates should be on SB_MANDLOCK mounted fs,
2355 * otherwise these will be advisory locks
2356 */
2357
2358static inline int mandatory_lock(struct inode *ino)
2359{
2360 return IS_MANDLOCK(ino) && __mandatory_lock(ino);
2361}
2362
2363static inline int locks_verify_locked(struct file *file)
2364{
2365 if (mandatory_lock(locks_inode(file)))
2366 return locks_mandatory_locked(file);
2367 return 0;
2368}
2369
2370static inline int locks_verify_truncate(struct inode *inode,
2371 struct file *f,
2372 loff_t size)
2373{
2374 if (!inode->i_flctx || !mandatory_lock(inode))
2375 return 0;
2376
2377 if (size < inode->i_size) {
2378 return locks_mandatory_area(inode, f, size, inode->i_size - 1,
2379 F_WRLCK);
2380 } else {
2381 return locks_mandatory_area(inode, f, inode->i_size, size - 1,
2382 F_WRLCK);
2383 }
2384}
2385
2386#else /* !CONFIG_MANDATORY_FILE_LOCKING */
2387
2388static inline int locks_mandatory_locked(struct file *file)
2389{
2390 return 0;
2391}
2392
2393static inline int locks_mandatory_area(struct inode *inode, struct file *filp,
2394 loff_t start, loff_t end, unsigned char type)
2395{
2396 return 0;
2397}
2398
2399static inline int __mandatory_lock(struct inode *inode)
2400{
2401 return 0;
2402}
2403
2404static inline int mandatory_lock(struct inode *inode)
2405{
2406 return 0;
2407}
2408
2409static inline int locks_verify_locked(struct file *file)
2410{
2411 return 0;
2412}
2413
2414static inline int locks_verify_truncate(struct inode *inode, struct file *filp,
2415 size_t size)
2416{
2417 return 0;
2418}
2419
2420#endif /* CONFIG_MANDATORY_FILE_LOCKING */
2421
2422
2423#ifdef CONFIG_FILE_LOCKING
2424static inline int break_lease(struct inode *inode, unsigned int mode)
2425{
2426 /*
2427 * Since this check is lockless, we must ensure that any refcounts
2428 * taken are done before checking i_flctx->flc_lease. Otherwise, we
2429 * could end up racing with tasks trying to set a new lease on this
2430 * file.
2431 */
2432 smp_mb();
2433 if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
2434 return __break_lease(inode, mode, FL_LEASE);
2435 return 0;
2436}
2437
2438static inline int break_deleg(struct inode *inode, unsigned int mode)
2439{
2440 /*
2441 * Since this check is lockless, we must ensure that any refcounts
2442 * taken are done before checking i_flctx->flc_lease. Otherwise, we
2443 * could end up racing with tasks trying to set a new lease on this
2444 * file.
2445 */
2446 smp_mb();
2447 if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
2448 return __break_lease(inode, mode, FL_DELEG);
2449 return 0;
2450}
2451
2452static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode)
2453{
2454 int ret;
2455
2456 ret = break_deleg(inode, O_WRONLY|O_NONBLOCK);
2457 if (ret == -EWOULDBLOCK && delegated_inode) {
2458 *delegated_inode = inode;
2459 ihold(inode);
2460 }
2461 return ret;
2462}
2463
2464static inline int break_deleg_wait(struct inode **delegated_inode)
2465{
2466 int ret;
2467
2468 ret = break_deleg(*delegated_inode, O_WRONLY);
2469 iput(*delegated_inode);
2470 *delegated_inode = NULL;
2471 return ret;
2472}
2473
2474static inline int break_layout(struct inode *inode, bool wait)
2475{
2476 smp_mb();
2477 if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
2478 return __break_lease(inode,
2479 wait ? O_WRONLY : O_WRONLY | O_NONBLOCK,
2480 FL_LAYOUT);
2481 return 0;
2482}
2483
2484#else /* !CONFIG_FILE_LOCKING */
2485static inline int break_lease(struct inode *inode, unsigned int mode)
2486{
2487 return 0;
2488}
2489
2490static inline int break_deleg(struct inode *inode, unsigned int mode)
2491{
2492 return 0;
2493}
2494
2495static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode)
2496{
2497 return 0;
2498}
2499
2500static inline int break_deleg_wait(struct inode **delegated_inode)
2501{
2502 BUG();
2503 return 0;
2504}
2505
2506static inline int break_layout(struct inode *inode, bool wait)
2507{
2508 return 0;
2509}
2510
2511#endif /* CONFIG_FILE_LOCKING */
2512
2513/* fs/open.c */
2514struct audit_names;
2515struct filename {
2516 const char *name; /* pointer to actual string */
2517 const __user char *uptr; /* original userland pointer */
2518 int refcnt;
2519 struct audit_names *aname;
2520 const char iname[];
2521};
David Brazdil0f672f62019-12-10 10:32:29 +00002522static_assert(offsetof(struct filename, iname) % sizeof(long) == 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002523
2524extern long vfs_truncate(const struct path *, loff_t);
2525extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
2526 struct file *filp);
2527extern int vfs_fallocate(struct file *file, int mode, loff_t offset,
2528 loff_t len);
2529extern long do_sys_open(int dfd, const char __user *filename, int flags,
2530 umode_t mode);
2531extern struct file *file_open_name(struct filename *, int, umode_t);
2532extern struct file *filp_open(const char *, int, umode_t);
2533extern struct file *file_open_root(struct dentry *, struct vfsmount *,
2534 const char *, int, umode_t);
2535extern struct file * dentry_open(const struct path *, int, const struct cred *);
2536extern struct file * open_with_fake_path(const struct path *, int,
2537 struct inode*, const struct cred *);
2538static inline struct file *file_clone_open(struct file *file)
2539{
2540 return dentry_open(&file->f_path, file->f_flags, file->f_cred);
2541}
2542extern int filp_close(struct file *, fl_owner_t id);
2543
2544extern struct filename *getname_flags(const char __user *, int, int *);
2545extern struct filename *getname(const char __user *);
2546extern struct filename *getname_kernel(const char *);
2547extern void putname(struct filename *name);
2548
2549extern int finish_open(struct file *file, struct dentry *dentry,
2550 int (*open)(struct inode *, struct file *));
2551extern int finish_no_open(struct file *file, struct dentry *dentry);
2552
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002553/* fs/dcache.c */
2554extern void __init vfs_caches_init_early(void);
2555extern void __init vfs_caches_init(void);
2556
2557extern struct kmem_cache *names_cachep;
2558
2559#define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL)
2560#define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
2561
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002562extern struct super_block *blockdev_superblock;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002563static inline bool sb_is_blkdev_sb(struct super_block *sb)
2564{
Olivier Deprez157378f2022-04-04 15:47:50 +02002565 return IS_ENABLED(CONFIG_BLOCK) && sb == blockdev_superblock;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002566}
2567
Olivier Deprez157378f2022-04-04 15:47:50 +02002568void emergency_thaw_all(void);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002569extern int sync_filesystem(struct super_block *);
2570extern const struct file_operations def_blk_fops;
2571extern const struct file_operations def_chr_fops;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002572
2573/* fs/char_dev.c */
2574#define CHRDEV_MAJOR_MAX 512
2575/* Marks the bottom of the first segment of free char majors */
2576#define CHRDEV_MAJOR_DYN_END 234
2577/* Marks the top and bottom of the second segment of free char majors */
2578#define CHRDEV_MAJOR_DYN_EXT_START 511
2579#define CHRDEV_MAJOR_DYN_EXT_END 384
2580
2581extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
2582extern int register_chrdev_region(dev_t, unsigned, const char *);
2583extern int __register_chrdev(unsigned int major, unsigned int baseminor,
2584 unsigned int count, const char *name,
2585 const struct file_operations *fops);
2586extern void __unregister_chrdev(unsigned int major, unsigned int baseminor,
2587 unsigned int count, const char *name);
2588extern void unregister_chrdev_region(dev_t, unsigned);
2589extern void chrdev_show(struct seq_file *,off_t);
2590
2591static inline int register_chrdev(unsigned int major, const char *name,
2592 const struct file_operations *fops)
2593{
2594 return __register_chrdev(major, 0, 256, name, fops);
2595}
2596
2597static inline void unregister_chrdev(unsigned int major, const char *name)
2598{
2599 __unregister_chrdev(major, 0, 256, name);
2600}
2601
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002602extern void init_special_inode(struct inode *, umode_t, dev_t);
2603
2604/* Invalid inode operations -- fs/bad_inode.c */
2605extern void make_bad_inode(struct inode *);
2606extern bool is_bad_inode(struct inode *);
2607
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002608unsigned long invalidate_mapping_pages(struct address_space *mapping,
2609 pgoff_t start, pgoff_t end);
2610
Olivier Deprez157378f2022-04-04 15:47:50 +02002611void invalidate_mapping_pagevec(struct address_space *mapping,
2612 pgoff_t start, pgoff_t end,
2613 unsigned long *nr_pagevec);
2614
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002615static inline void invalidate_remote_inode(struct inode *inode)
2616{
2617 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
2618 S_ISLNK(inode->i_mode))
2619 invalidate_mapping_pages(inode->i_mapping, 0, -1);
2620}
2621extern int invalidate_inode_pages2(struct address_space *mapping);
2622extern int invalidate_inode_pages2_range(struct address_space *mapping,
2623 pgoff_t start, pgoff_t end);
2624extern int write_inode_now(struct inode *, int);
2625extern int filemap_fdatawrite(struct address_space *);
2626extern int filemap_flush(struct address_space *);
2627extern int filemap_fdatawait_keep_errors(struct address_space *mapping);
2628extern int filemap_fdatawait_range(struct address_space *, loff_t lstart,
2629 loff_t lend);
David Brazdil0f672f62019-12-10 10:32:29 +00002630extern int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
2631 loff_t start_byte, loff_t end_byte);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002632
2633static inline int filemap_fdatawait(struct address_space *mapping)
2634{
2635 return filemap_fdatawait_range(mapping, 0, LLONG_MAX);
2636}
2637
2638extern bool filemap_range_has_page(struct address_space *, loff_t lstart,
2639 loff_t lend);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002640extern int filemap_write_and_wait_range(struct address_space *mapping,
2641 loff_t lstart, loff_t lend);
2642extern int __filemap_fdatawrite_range(struct address_space *mapping,
2643 loff_t start, loff_t end, int sync_mode);
2644extern int filemap_fdatawrite_range(struct address_space *mapping,
2645 loff_t start, loff_t end);
2646extern int filemap_check_errors(struct address_space *mapping);
2647extern void __filemap_set_wb_err(struct address_space *mapping, int err);
2648
Olivier Deprez157378f2022-04-04 15:47:50 +02002649static inline int filemap_write_and_wait(struct address_space *mapping)
2650{
2651 return filemap_write_and_wait_range(mapping, 0, LLONG_MAX);
2652}
2653
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002654extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart,
2655 loff_t lend);
2656extern int __must_check file_check_and_advance_wb_err(struct file *file);
2657extern int __must_check file_write_and_wait_range(struct file *file,
2658 loff_t start, loff_t end);
2659
2660static inline int file_write_and_wait(struct file *file)
2661{
2662 return file_write_and_wait_range(file, 0, LLONG_MAX);
2663}
2664
2665/**
2666 * filemap_set_wb_err - set a writeback error on an address_space
2667 * @mapping: mapping in which to set writeback error
2668 * @err: error to be set in mapping
2669 *
2670 * When writeback fails in some way, we must record that error so that
2671 * userspace can be informed when fsync and the like are called. We endeavor
2672 * to report errors on any file that was open at the time of the error. Some
2673 * internal callers also need to know when writeback errors have occurred.
2674 *
2675 * When a writeback error occurs, most filesystems will want to call
2676 * filemap_set_wb_err to record the error in the mapping so that it will be
2677 * automatically reported whenever fsync is called on the file.
2678 */
2679static inline void filemap_set_wb_err(struct address_space *mapping, int err)
2680{
2681 /* Fastpath for common case of no error */
2682 if (unlikely(err))
2683 __filemap_set_wb_err(mapping, err);
2684}
2685
2686/**
Olivier Deprez157378f2022-04-04 15:47:50 +02002687 * filemap_check_wb_err - has an error occurred since the mark was sampled?
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002688 * @mapping: mapping to check for writeback errors
2689 * @since: previously-sampled errseq_t
2690 *
2691 * Grab the errseq_t value from the mapping, and see if it has changed "since"
2692 * the given value was sampled.
2693 *
2694 * If it has then report the latest error set, otherwise return 0.
2695 */
2696static inline int filemap_check_wb_err(struct address_space *mapping,
2697 errseq_t since)
2698{
2699 return errseq_check(&mapping->wb_err, since);
2700}
2701
2702/**
2703 * filemap_sample_wb_err - sample the current errseq_t to test for later errors
2704 * @mapping: mapping to be sampled
2705 *
2706 * Writeback errors are always reported relative to a particular sample point
2707 * in the past. This function provides those sample points.
2708 */
2709static inline errseq_t filemap_sample_wb_err(struct address_space *mapping)
2710{
2711 return errseq_sample(&mapping->wb_err);
2712}
2713
Olivier Deprez157378f2022-04-04 15:47:50 +02002714/**
2715 * file_sample_sb_err - sample the current errseq_t to test for later errors
2716 * @file: file pointer to be sampled
2717 *
2718 * Grab the most current superblock-level errseq_t value for the given
2719 * struct file.
2720 */
2721static inline errseq_t file_sample_sb_err(struct file *file)
David Brazdil0f672f62019-12-10 10:32:29 +00002722{
Olivier Deprez157378f2022-04-04 15:47:50 +02002723 return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err);
David Brazdil0f672f62019-12-10 10:32:29 +00002724}
2725
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002726extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
2727 int datasync);
2728extern int vfs_fsync(struct file *file, int datasync);
2729
David Brazdil0f672f62019-12-10 10:32:29 +00002730extern int sync_file_range(struct file *file, loff_t offset, loff_t nbytes,
2731 unsigned int flags);
2732
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002733/*
2734 * Sync the bytes written if this was a synchronous write. Expect ki_pos
2735 * to already be updated for the write, and will return either the amount
2736 * of bytes passed in, or an error if syncing the file failed.
2737 */
2738static inline ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count)
2739{
2740 if (iocb->ki_flags & IOCB_DSYNC) {
2741 int ret = vfs_fsync_range(iocb->ki_filp,
2742 iocb->ki_pos - count, iocb->ki_pos - 1,
2743 (iocb->ki_flags & IOCB_SYNC) ? 0 : 1);
2744 if (ret)
2745 return ret;
2746 }
2747
2748 return count;
2749}
2750
2751extern void emergency_sync(void);
2752extern void emergency_remount(void);
Olivier Deprez157378f2022-04-04 15:47:50 +02002753
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002754#ifdef CONFIG_BLOCK
Olivier Deprez157378f2022-04-04 15:47:50 +02002755extern int bmap(struct inode *inode, sector_t *block);
2756#else
2757static inline int bmap(struct inode *inode, sector_t *block)
2758{
2759 return -EINVAL;
2760}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002761#endif
Olivier Deprez157378f2022-04-04 15:47:50 +02002762
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002763extern int notify_change(struct dentry *, struct iattr *, struct inode **);
2764extern int inode_permission(struct inode *, int);
2765extern int generic_permission(struct inode *, int);
2766extern int __check_sticky(struct inode *dir, struct inode *inode);
2767
2768static inline bool execute_ok(struct inode *inode)
2769{
2770 return (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode);
2771}
2772
Olivier Deprez157378f2022-04-04 15:47:50 +02002773static inline bool inode_wrong_type(const struct inode *inode, umode_t mode)
2774{
2775 return (inode->i_mode ^ mode) & S_IFMT;
2776}
2777
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002778static inline void file_start_write(struct file *file)
2779{
2780 if (!S_ISREG(file_inode(file)->i_mode))
2781 return;
Olivier Deprez157378f2022-04-04 15:47:50 +02002782 sb_start_write(file_inode(file)->i_sb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002783}
2784
2785static inline bool file_start_write_trylock(struct file *file)
2786{
2787 if (!S_ISREG(file_inode(file)->i_mode))
2788 return true;
Olivier Deprez157378f2022-04-04 15:47:50 +02002789 return sb_start_write_trylock(file_inode(file)->i_sb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002790}
2791
2792static inline void file_end_write(struct file *file)
2793{
2794 if (!S_ISREG(file_inode(file)->i_mode))
2795 return;
2796 __sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE);
2797}
2798
2799/*
2800 * get_write_access() gets write permission for a file.
2801 * put_write_access() releases this write permission.
2802 * This is used for regular files.
2803 * We cannot support write (and maybe mmap read-write shared) accesses and
2804 * MAP_DENYWRITE mmappings simultaneously. The i_writecount field of an inode
2805 * can have the following values:
2806 * 0: no writers, no VM_DENYWRITE mappings
2807 * < 0: (-i_writecount) vm_area_structs with VM_DENYWRITE set exist
2808 * > 0: (i_writecount) users are writing to the file.
2809 *
2810 * Normally we operate on that counter with atomic_{inc,dec} and it's safe
2811 * except for the cases where we don't hold i_writecount yet. Then we need to
2812 * use {get,deny}_write_access() - these functions check the sign and refuse
2813 * to do the change if sign is wrong.
2814 */
2815static inline int get_write_access(struct inode *inode)
2816{
2817 return atomic_inc_unless_negative(&inode->i_writecount) ? 0 : -ETXTBSY;
2818}
2819static inline int deny_write_access(struct file *file)
2820{
2821 struct inode *inode = file_inode(file);
2822 return atomic_dec_unless_positive(&inode->i_writecount) ? 0 : -ETXTBSY;
2823}
2824static inline void put_write_access(struct inode * inode)
2825{
2826 atomic_dec(&inode->i_writecount);
2827}
2828static inline void allow_write_access(struct file *file)
2829{
2830 if (file)
2831 atomic_inc(&file_inode(file)->i_writecount);
2832}
2833static inline bool inode_is_open_for_write(const struct inode *inode)
2834{
2835 return atomic_read(&inode->i_writecount) > 0;
2836}
2837
David Brazdil0f672f62019-12-10 10:32:29 +00002838#if defined(CONFIG_IMA) || defined(CONFIG_FILE_LOCKING)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002839static inline void i_readcount_dec(struct inode *inode)
2840{
2841 BUG_ON(!atomic_read(&inode->i_readcount));
2842 atomic_dec(&inode->i_readcount);
2843}
2844static inline void i_readcount_inc(struct inode *inode)
2845{
2846 atomic_inc(&inode->i_readcount);
2847}
2848#else
2849static inline void i_readcount_dec(struct inode *inode)
2850{
2851 return;
2852}
2853static inline void i_readcount_inc(struct inode *inode)
2854{
2855 return;
2856}
2857#endif
2858extern int do_pipe_flags(int *, int);
2859
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002860extern ssize_t kernel_read(struct file *, void *, size_t, loff_t *);
Olivier Deprez157378f2022-04-04 15:47:50 +02002861ssize_t __kernel_read(struct file *file, void *buf, size_t count, loff_t *pos);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002862extern ssize_t kernel_write(struct file *, const void *, size_t, loff_t *);
2863extern ssize_t __kernel_write(struct file *, const void *, size_t, loff_t *);
2864extern struct file * open_exec(const char *);
2865
2866/* fs/dcache.c -- generic fs support functions */
2867extern bool is_subdir(struct dentry *, struct dentry *);
2868extern bool path_is_under(const struct path *, const struct path *);
2869
2870extern char *file_path(struct file *, char *, int);
2871
2872#include <linux/err.h>
2873
2874/* needed for stackable file system support */
2875extern loff_t default_llseek(struct file *file, loff_t offset, int whence);
2876
2877extern loff_t vfs_llseek(struct file *file, loff_t offset, int whence);
2878
2879extern int inode_init_always(struct super_block *, struct inode *);
2880extern void inode_init_once(struct inode *);
2881extern void address_space_init_once(struct address_space *mapping);
2882extern struct inode * igrab(struct inode *);
2883extern ino_t iunique(struct super_block *, ino_t);
2884extern int inode_needs_sync(struct inode *inode);
2885extern int generic_delete_inode(struct inode *inode);
2886static inline int generic_drop_inode(struct inode *inode)
2887{
2888 return !inode->i_nlink || inode_unhashed(inode);
2889}
Olivier Deprez157378f2022-04-04 15:47:50 +02002890extern void d_mark_dontcache(struct inode *inode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002891
2892extern struct inode *ilookup5_nowait(struct super_block *sb,
2893 unsigned long hashval, int (*test)(struct inode *, void *),
2894 void *data);
2895extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
2896 int (*test)(struct inode *, void *), void *data);
2897extern struct inode *ilookup(struct super_block *sb, unsigned long ino);
2898
2899extern struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
2900 int (*test)(struct inode *, void *),
2901 int (*set)(struct inode *, void *),
2902 void *data);
2903extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *);
2904extern struct inode * iget_locked(struct super_block *, unsigned long);
2905extern struct inode *find_inode_nowait(struct super_block *,
2906 unsigned long,
2907 int (*match)(struct inode *,
2908 unsigned long, void *),
2909 void *data);
Olivier Deprez157378f2022-04-04 15:47:50 +02002910extern struct inode *find_inode_rcu(struct super_block *, unsigned long,
2911 int (*)(struct inode *, void *), void *);
2912extern struct inode *find_inode_by_ino_rcu(struct super_block *, unsigned long);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002913extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *);
2914extern int insert_inode_locked(struct inode *);
2915#ifdef CONFIG_DEBUG_LOCK_ALLOC
2916extern void lockdep_annotate_inode_mutex_key(struct inode *inode);
2917#else
2918static inline void lockdep_annotate_inode_mutex_key(struct inode *inode) { };
2919#endif
2920extern void unlock_new_inode(struct inode *);
2921extern void discard_new_inode(struct inode *);
2922extern unsigned int get_next_ino(void);
2923extern void evict_inodes(struct super_block *sb);
2924
Olivier Deprez157378f2022-04-04 15:47:50 +02002925/*
2926 * Userspace may rely on the the inode number being non-zero. For example, glibc
2927 * simply ignores files with zero i_ino in unlink() and other places.
2928 *
2929 * As an additional complication, if userspace was compiled with
2930 * _FILE_OFFSET_BITS=32 on a 64-bit kernel we'll only end up reading out the
2931 * lower 32 bits, so we need to check that those aren't zero explicitly. With
2932 * _FILE_OFFSET_BITS=64, this may cause some harmless false-negatives, but
2933 * better safe than sorry.
2934 */
2935static inline bool is_zero_ino(ino_t ino)
2936{
2937 return (u32)ino == 0;
2938}
2939
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002940extern void __iget(struct inode * inode);
2941extern void iget_failed(struct inode *);
2942extern void clear_inode(struct inode *);
2943extern void __destroy_inode(struct inode *);
2944extern struct inode *new_inode_pseudo(struct super_block *sb);
2945extern struct inode *new_inode(struct super_block *sb);
2946extern void free_inode_nonrcu(struct inode *inode);
2947extern int should_remove_suid(struct dentry *);
2948extern int file_remove_privs(struct file *);
2949
2950extern void __insert_inode_hash(struct inode *, unsigned long hashval);
2951static inline void insert_inode_hash(struct inode *inode)
2952{
2953 __insert_inode_hash(inode, inode->i_ino);
2954}
2955
2956extern void __remove_inode_hash(struct inode *);
2957static inline void remove_inode_hash(struct inode *inode)
2958{
2959 if (!inode_unhashed(inode) && !hlist_fake(&inode->i_hash))
2960 __remove_inode_hash(inode);
2961}
2962
2963extern void inode_sb_list_add(struct inode *inode);
2964
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002965extern int sb_set_blocksize(struct super_block *, int);
2966extern int sb_min_blocksize(struct super_block *, int);
2967
2968extern int generic_file_mmap(struct file *, struct vm_area_struct *);
2969extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
2970extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *);
Olivier Deprez157378f2022-04-04 15:47:50 +02002971extern int generic_write_check_limits(struct file *file, loff_t pos,
2972 loff_t *count);
David Brazdil0f672f62019-12-10 10:32:29 +00002973extern int generic_file_rw_checks(struct file *file_in, struct file *file_out);
Olivier Deprez157378f2022-04-04 15:47:50 +02002974extern ssize_t generic_file_buffered_read(struct kiocb *iocb,
2975 struct iov_iter *to, ssize_t already_read);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002976extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
2977extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
2978extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *);
2979extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *);
2980extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t);
2981
2982ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
2983 rwf_t flags);
2984ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos,
2985 rwf_t flags);
Olivier Deprez157378f2022-04-04 15:47:50 +02002986ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb,
2987 struct iov_iter *iter);
2988ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb,
2989 struct iov_iter *iter);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002990
2991/* fs/block_dev.c */
2992extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to);
2993extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from);
2994extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
2995 int datasync);
2996extern void block_sync_page(struct page *page);
2997
2998/* fs/splice.c */
2999extern ssize_t generic_file_splice_read(struct file *, loff_t *,
3000 struct pipe_inode_info *, size_t, unsigned int);
3001extern ssize_t iter_file_splice_write(struct pipe_inode_info *,
3002 struct file *, loff_t *, size_t, unsigned int);
3003extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
3004 struct file *out, loff_t *, size_t len, unsigned int flags);
3005extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
3006 loff_t *opos, size_t len, unsigned int flags);
3007
3008
3009extern void
3010file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
3011extern loff_t noop_llseek(struct file *file, loff_t offset, int whence);
3012extern loff_t no_llseek(struct file *file, loff_t offset, int whence);
3013extern loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize);
3014extern loff_t generic_file_llseek(struct file *file, loff_t offset, int whence);
3015extern loff_t generic_file_llseek_size(struct file *file, loff_t offset,
3016 int whence, loff_t maxsize, loff_t eof);
3017extern loff_t fixed_size_llseek(struct file *file, loff_t offset,
3018 int whence, loff_t size);
3019extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t);
3020extern loff_t no_seek_end_llseek(struct file *, loff_t, int);
3021extern int generic_file_open(struct inode * inode, struct file * filp);
3022extern int nonseekable_open(struct inode * inode, struct file * filp);
David Brazdil0f672f62019-12-10 10:32:29 +00003023extern int stream_open(struct inode * inode, struct file * filp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003024
3025#ifdef CONFIG_BLOCK
3026typedef void (dio_submit_t)(struct bio *bio, struct inode *inode,
3027 loff_t file_offset);
3028
3029enum {
3030 /* need locking between buffered and direct access */
3031 DIO_LOCKING = 0x01,
3032
3033 /* filesystem does not support filling holes */
3034 DIO_SKIP_HOLES = 0x02,
3035};
3036
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003037ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
3038 struct block_device *bdev, struct iov_iter *iter,
3039 get_block_t get_block,
3040 dio_iodone_t end_io, dio_submit_t submit_io,
3041 int flags);
3042
3043static inline ssize_t blockdev_direct_IO(struct kiocb *iocb,
3044 struct inode *inode,
3045 struct iov_iter *iter,
3046 get_block_t get_block)
3047{
3048 return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
3049 get_block, NULL, NULL, DIO_LOCKING | DIO_SKIP_HOLES);
3050}
3051#endif
3052
3053void inode_dio_wait(struct inode *inode);
3054
3055/*
3056 * inode_dio_begin - signal start of a direct I/O requests
3057 * @inode: inode the direct I/O happens on
3058 *
3059 * This is called once we've finished processing a direct I/O request,
3060 * and is used to wake up callers waiting for direct I/O to be quiesced.
3061 */
3062static inline void inode_dio_begin(struct inode *inode)
3063{
3064 atomic_inc(&inode->i_dio_count);
3065}
3066
3067/*
3068 * inode_dio_end - signal finish of a direct I/O requests
3069 * @inode: inode the direct I/O happens on
3070 *
3071 * This is called once we've finished processing a direct I/O request,
3072 * and is used to wake up callers waiting for direct I/O to be quiesced.
3073 */
3074static inline void inode_dio_end(struct inode *inode)
3075{
3076 if (atomic_dec_and_test(&inode->i_dio_count))
3077 wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
3078}
3079
Olivier Deprez157378f2022-04-04 15:47:50 +02003080/*
3081 * Warn about a page cache invalidation failure diring a direct I/O write.
3082 */
3083void dio_warn_stale_pagecache(struct file *filp);
3084
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003085extern void inode_set_flags(struct inode *inode, unsigned int flags,
3086 unsigned int mask);
3087
3088extern const struct file_operations generic_ro_fops;
3089
3090#define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m))
3091
3092extern int readlink_copy(char __user *, int, const char *);
3093extern int page_readlink(struct dentry *, char __user *, int);
3094extern const char *page_get_link(struct dentry *, struct inode *,
3095 struct delayed_call *);
3096extern void page_put_link(void *);
3097extern int __page_symlink(struct inode *inode, const char *symname, int len,
3098 int nofs);
3099extern int page_symlink(struct inode *inode, const char *symname, int len);
3100extern const struct inode_operations page_symlink_inode_operations;
3101extern void kfree_link(void *);
3102extern void generic_fillattr(struct inode *, struct kstat *);
3103extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int);
3104extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int);
3105void __inode_add_bytes(struct inode *inode, loff_t bytes);
3106void inode_add_bytes(struct inode *inode, loff_t bytes);
3107void __inode_sub_bytes(struct inode *inode, loff_t bytes);
3108void inode_sub_bytes(struct inode *inode, loff_t bytes);
3109static inline loff_t __inode_get_bytes(struct inode *inode)
3110{
3111 return (((loff_t)inode->i_blocks) << 9) + inode->i_bytes;
3112}
3113loff_t inode_get_bytes(struct inode *inode);
3114void inode_set_bytes(struct inode *inode, loff_t bytes);
3115const char *simple_get_link(struct dentry *, struct inode *,
3116 struct delayed_call *);
3117extern const struct inode_operations simple_symlink_inode_operations;
3118
3119extern int iterate_dir(struct file *, struct dir_context *);
3120
Olivier Deprez157378f2022-04-04 15:47:50 +02003121int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat,
3122 int flags);
3123int vfs_fstat(int fd, struct kstat *stat);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003124
3125static inline int vfs_stat(const char __user *filename, struct kstat *stat)
3126{
Olivier Deprez157378f2022-04-04 15:47:50 +02003127 return vfs_fstatat(AT_FDCWD, filename, stat, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003128}
3129static inline int vfs_lstat(const char __user *name, struct kstat *stat)
3130{
Olivier Deprez157378f2022-04-04 15:47:50 +02003131 return vfs_fstatat(AT_FDCWD, name, stat, AT_SYMLINK_NOFOLLOW);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003132}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003133
3134extern const char *vfs_get_link(struct dentry *, struct delayed_call *);
3135extern int vfs_readlink(struct dentry *, char __user *, int);
3136
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003137extern struct file_system_type *get_filesystem(struct file_system_type *fs);
3138extern void put_filesystem(struct file_system_type *fs);
3139extern struct file_system_type *get_fs_type(const char *name);
3140extern struct super_block *get_super(struct block_device *);
3141extern struct super_block *get_super_thawed(struct block_device *);
3142extern struct super_block *get_super_exclusive_thawed(struct block_device *bdev);
3143extern struct super_block *get_active_super(struct block_device *bdev);
3144extern void drop_super(struct super_block *sb);
3145extern void drop_super_exclusive(struct super_block *sb);
3146extern void iterate_supers(void (*)(struct super_block *, void *), void *);
3147extern void iterate_supers_type(struct file_system_type *,
3148 void (*)(struct super_block *, void *), void *);
3149
3150extern int dcache_dir_open(struct inode *, struct file *);
3151extern int dcache_dir_close(struct inode *, struct file *);
3152extern loff_t dcache_dir_lseek(struct file *, loff_t, int);
3153extern int dcache_readdir(struct file *, struct dir_context *);
3154extern int simple_setattr(struct dentry *, struct iattr *);
3155extern int simple_getattr(const struct path *, struct kstat *, u32, unsigned int);
3156extern int simple_statfs(struct dentry *, struct kstatfs *);
3157extern int simple_open(struct inode *inode, struct file *file);
3158extern int simple_link(struct dentry *, struct inode *, struct dentry *);
3159extern int simple_unlink(struct inode *, struct dentry *);
3160extern int simple_rmdir(struct inode *, struct dentry *);
3161extern int simple_rename(struct inode *, struct dentry *,
3162 struct inode *, struct dentry *, unsigned int);
Olivier Deprez157378f2022-04-04 15:47:50 +02003163extern void simple_recursive_removal(struct dentry *,
3164 void (*callback)(struct dentry *));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003165extern int noop_fsync(struct file *, loff_t, loff_t, int);
3166extern int noop_set_page_dirty(struct page *page);
3167extern void noop_invalidatepage(struct page *page, unsigned int offset,
3168 unsigned int length);
3169extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
3170extern int simple_empty(struct dentry *);
3171extern int simple_readpage(struct file *file, struct page *page);
3172extern int simple_write_begin(struct file *file, struct address_space *mapping,
3173 loff_t pos, unsigned len, unsigned flags,
3174 struct page **pagep, void **fsdata);
3175extern int simple_write_end(struct file *file, struct address_space *mapping,
3176 loff_t pos, unsigned len, unsigned copied,
3177 struct page *page, void *fsdata);
3178extern int always_delete_dentry(const struct dentry *);
3179extern struct inode *alloc_anon_inode(struct super_block *);
3180extern int simple_nosetlease(struct file *, long, struct file_lock **, void **);
3181extern const struct dentry_operations simple_dentry_operations;
3182
3183extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags);
3184extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *);
3185extern const struct file_operations simple_dir_operations;
3186extern const struct inode_operations simple_dir_inode_operations;
3187extern void make_empty_dir_inode(struct inode *inode);
3188extern bool is_empty_dir_inode(struct inode *inode);
3189struct tree_descr { const char *name; const struct file_operations *ops; int mode; };
3190struct dentry *d_alloc_name(struct dentry *, const char *);
3191extern int simple_fill_super(struct super_block *, unsigned long,
3192 const struct tree_descr *);
3193extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count);
3194extern void simple_release_fs(struct vfsmount **mount, int *count);
3195
3196extern ssize_t simple_read_from_buffer(void __user *to, size_t count,
3197 loff_t *ppos, const void *from, size_t available);
3198extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
3199 const void __user *from, size_t count);
3200
3201extern int __generic_file_fsync(struct file *, loff_t, loff_t, int);
3202extern int generic_file_fsync(struct file *, loff_t, loff_t, int);
3203
3204extern int generic_check_addressable(unsigned, u64);
3205
Olivier Deprez157378f2022-04-04 15:47:50 +02003206#ifdef CONFIG_UNICODE
3207extern int generic_ci_d_hash(const struct dentry *dentry, struct qstr *str);
3208extern int generic_ci_d_compare(const struct dentry *dentry, unsigned int len,
3209 const char *str, const struct qstr *name);
3210#endif
3211
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003212#ifdef CONFIG_MIGRATION
3213extern int buffer_migrate_page(struct address_space *,
3214 struct page *, struct page *,
3215 enum migrate_mode);
David Brazdil0f672f62019-12-10 10:32:29 +00003216extern int buffer_migrate_page_norefs(struct address_space *,
3217 struct page *, struct page *,
3218 enum migrate_mode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003219#else
3220#define buffer_migrate_page NULL
David Brazdil0f672f62019-12-10 10:32:29 +00003221#define buffer_migrate_page_norefs NULL
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003222#endif
3223
3224extern int setattr_prepare(struct dentry *, struct iattr *);
3225extern int inode_newsize_ok(const struct inode *, loff_t offset);
3226extern void setattr_copy(struct inode *inode, const struct iattr *attr);
3227
3228extern int file_update_time(struct file *file);
3229
Olivier Deprez157378f2022-04-04 15:47:50 +02003230static inline bool vma_is_dax(const struct vm_area_struct *vma)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003231{
3232 return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
3233}
3234
3235static inline bool vma_is_fsdax(struct vm_area_struct *vma)
3236{
3237 struct inode *inode;
3238
3239 if (!vma->vm_file)
3240 return false;
3241 if (!vma_is_dax(vma))
3242 return false;
3243 inode = file_inode(vma->vm_file);
3244 if (S_ISCHR(inode->i_mode))
3245 return false; /* device-dax */
3246 return true;
3247}
3248
3249static inline int iocb_flags(struct file *file)
3250{
3251 int res = 0;
3252 if (file->f_flags & O_APPEND)
3253 res |= IOCB_APPEND;
Olivier Deprez157378f2022-04-04 15:47:50 +02003254 if (file->f_flags & O_DIRECT)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003255 res |= IOCB_DIRECT;
3256 if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host))
3257 res |= IOCB_DSYNC;
3258 if (file->f_flags & __O_SYNC)
3259 res |= IOCB_SYNC;
3260 return res;
3261}
3262
3263static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags)
3264{
Olivier Deprez157378f2022-04-04 15:47:50 +02003265 int kiocb_flags = 0;
3266
3267 /* make sure there's no overlap between RWF and private IOCB flags */
3268 BUILD_BUG_ON((__force int) RWF_SUPPORTED & IOCB_EVENTFD);
3269
3270 if (!flags)
3271 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003272 if (unlikely(flags & ~RWF_SUPPORTED))
3273 return -EOPNOTSUPP;
3274
3275 if (flags & RWF_NOWAIT) {
3276 if (!(ki->ki_filp->f_mode & FMODE_NOWAIT))
3277 return -EOPNOTSUPP;
Olivier Deprez157378f2022-04-04 15:47:50 +02003278 kiocb_flags |= IOCB_NOIO;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003279 }
Olivier Deprez157378f2022-04-04 15:47:50 +02003280 kiocb_flags |= (__force int) (flags & RWF_SUPPORTED);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003281 if (flags & RWF_SYNC)
Olivier Deprez157378f2022-04-04 15:47:50 +02003282 kiocb_flags |= IOCB_DSYNC;
3283
3284 ki->ki_flags |= kiocb_flags;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003285 return 0;
3286}
3287
3288static inline ino_t parent_ino(struct dentry *dentry)
3289{
3290 ino_t res;
3291
3292 /*
3293 * Don't strictly need d_lock here? If the parent ino could change
3294 * then surely we'd have a deeper race in the caller?
3295 */
3296 spin_lock(&dentry->d_lock);
3297 res = dentry->d_parent->d_inode->i_ino;
3298 spin_unlock(&dentry->d_lock);
3299 return res;
3300}
3301
3302/* Transaction based IO helpers */
3303
3304/*
3305 * An argresp is stored in an allocated page and holds the
3306 * size of the argument or response, along with its content
3307 */
3308struct simple_transaction_argresp {
3309 ssize_t size;
Olivier Deprez157378f2022-04-04 15:47:50 +02003310 char data[];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003311};
3312
3313#define SIMPLE_TRANSACTION_LIMIT (PAGE_SIZE - sizeof(struct simple_transaction_argresp))
3314
3315char *simple_transaction_get(struct file *file, const char __user *buf,
3316 size_t size);
3317ssize_t simple_transaction_read(struct file *file, char __user *buf,
3318 size_t size, loff_t *pos);
3319int simple_transaction_release(struct inode *inode, struct file *file);
3320
3321void simple_transaction_set(struct file *file, size_t n);
3322
3323/*
3324 * simple attribute files
3325 *
3326 * These attributes behave similar to those in sysfs:
3327 *
3328 * Writing to an attribute immediately sets a value, an open file can be
3329 * written to multiple times.
3330 *
3331 * Reading from an attribute creates a buffer from the value that might get
3332 * read with multiple read calls. When the attribute has been read
3333 * completely, no further read calls are possible until the file is opened
3334 * again.
3335 *
3336 * All attributes contain a text representation of a numeric value
3337 * that are accessed with the get() and set() functions.
3338 */
3339#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
3340static int __fops ## _open(struct inode *inode, struct file *file) \
3341{ \
3342 __simple_attr_check_format(__fmt, 0ull); \
3343 return simple_attr_open(inode, file, __get, __set, __fmt); \
3344} \
3345static const struct file_operations __fops = { \
3346 .owner = THIS_MODULE, \
3347 .open = __fops ## _open, \
3348 .release = simple_attr_release, \
3349 .read = simple_attr_read, \
3350 .write = simple_attr_write, \
3351 .llseek = generic_file_llseek, \
3352}
3353
3354static inline __printf(1, 2)
3355void __simple_attr_check_format(const char *fmt, ...)
3356{
3357 /* don't do anything, just let the compiler check the arguments; */
3358}
3359
3360int simple_attr_open(struct inode *inode, struct file *file,
3361 int (*get)(void *, u64 *), int (*set)(void *, u64),
3362 const char *fmt);
3363int simple_attr_release(struct inode *inode, struct file *file);
3364ssize_t simple_attr_read(struct file *file, char __user *buf,
3365 size_t len, loff_t *ppos);
3366ssize_t simple_attr_write(struct file *file, const char __user *buf,
3367 size_t len, loff_t *ppos);
3368
3369struct ctl_table;
3370int proc_nr_files(struct ctl_table *table, int write,
Olivier Deprez157378f2022-04-04 15:47:50 +02003371 void *buffer, size_t *lenp, loff_t *ppos);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003372int proc_nr_dentry(struct ctl_table *table, int write,
Olivier Deprez157378f2022-04-04 15:47:50 +02003373 void *buffer, size_t *lenp, loff_t *ppos);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003374int proc_nr_inodes(struct ctl_table *table, int write,
Olivier Deprez157378f2022-04-04 15:47:50 +02003375 void *buffer, size_t *lenp, loff_t *ppos);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003376int __init get_filesystem_list(char *buf);
3377
3378#define __FMODE_EXEC ((__force int) FMODE_EXEC)
3379#define __FMODE_NONOTIFY ((__force int) FMODE_NONOTIFY)
3380
3381#define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE])
3382#define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \
3383 (flag & __FMODE_NONOTIFY)))
3384
3385static inline bool is_sxid(umode_t mode)
3386{
3387 return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP));
3388}
3389
3390static inline int check_sticky(struct inode *dir, struct inode *inode)
3391{
3392 if (!(dir->i_mode & S_ISVTX))
3393 return 0;
3394
3395 return __check_sticky(dir, inode);
3396}
3397
3398static inline void inode_has_no_xattr(struct inode *inode)
3399{
3400 if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & SB_NOSEC))
3401 inode->i_flags |= S_NOSEC;
3402}
3403
3404static inline bool is_root_inode(struct inode *inode)
3405{
3406 return inode == inode->i_sb->s_root->d_inode;
3407}
3408
3409static inline bool dir_emit(struct dir_context *ctx,
3410 const char *name, int namelen,
3411 u64 ino, unsigned type)
3412{
3413 return ctx->actor(ctx, name, namelen, ctx->pos, ino, type) == 0;
3414}
3415static inline bool dir_emit_dot(struct file *file, struct dir_context *ctx)
3416{
3417 return ctx->actor(ctx, ".", 1, ctx->pos,
3418 file->f_path.dentry->d_inode->i_ino, DT_DIR) == 0;
3419}
3420static inline bool dir_emit_dotdot(struct file *file, struct dir_context *ctx)
3421{
3422 return ctx->actor(ctx, "..", 2, ctx->pos,
3423 parent_ino(file->f_path.dentry), DT_DIR) == 0;
3424}
3425static inline bool dir_emit_dots(struct file *file, struct dir_context *ctx)
3426{
3427 if (ctx->pos == 0) {
3428 if (!dir_emit_dot(file, ctx))
3429 return false;
3430 ctx->pos = 1;
3431 }
3432 if (ctx->pos == 1) {
3433 if (!dir_emit_dotdot(file, ctx))
3434 return false;
3435 ctx->pos = 2;
3436 }
3437 return true;
3438}
3439static inline bool dir_relax(struct inode *inode)
3440{
3441 inode_unlock(inode);
3442 inode_lock(inode);
3443 return !IS_DEADDIR(inode);
3444}
3445
3446static inline bool dir_relax_shared(struct inode *inode)
3447{
3448 inode_unlock_shared(inode);
3449 inode_lock_shared(inode);
3450 return !IS_DEADDIR(inode);
3451}
3452
3453extern bool path_noexec(const struct path *path);
3454extern void inode_nohighmem(struct inode *inode);
3455
3456/* mm/fadvise.c */
3457extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len,
3458 int advice);
David Brazdil0f672f62019-12-10 10:32:29 +00003459extern int generic_fadvise(struct file *file, loff_t offset, loff_t len,
3460 int advice);
3461
David Brazdil0f672f62019-12-10 10:32:29 +00003462int vfs_ioc_setflags_prepare(struct inode *inode, unsigned int oldflags,
3463 unsigned int flags);
3464
3465int vfs_ioc_fssetxattr_check(struct inode *inode, const struct fsxattr *old_fa,
3466 struct fsxattr *fa);
3467
3468static inline void simple_fill_fsxattr(struct fsxattr *fa, __u32 xflags)
3469{
3470 memset(fa, 0, sizeof(*fa));
3471 fa->fsx_xflags = xflags;
3472}
3473
3474/*
3475 * Flush file data before changing attributes. Caller must hold any locks
3476 * required to prevent further writes to this file until we're done setting
3477 * flags.
3478 */
3479static inline int inode_drain_writes(struct inode *inode)
3480{
3481 inode_dio_wait(inode);
3482 return filemap_write_and_wait(inode->i_mapping);
3483}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003484
3485#endif /* _LINUX_FS_H */