Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _FS_CEPH_SUPER_H |
| 3 | #define _FS_CEPH_SUPER_H |
| 4 | |
| 5 | #include <linux/ceph/ceph_debug.h> |
| 6 | |
| 7 | #include <asm/unaligned.h> |
| 8 | #include <linux/backing-dev.h> |
| 9 | #include <linux/completion.h> |
| 10 | #include <linux/exportfs.h> |
| 11 | #include <linux/fs.h> |
| 12 | #include <linux/mempool.h> |
| 13 | #include <linux/pagemap.h> |
| 14 | #include <linux/wait.h> |
| 15 | #include <linux/writeback.h> |
| 16 | #include <linux/slab.h> |
| 17 | #include <linux/posix_acl.h> |
| 18 | #include <linux/refcount.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 19 | #include <linux/security.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 20 | |
| 21 | #include <linux/ceph/libceph.h> |
| 22 | |
| 23 | #ifdef CONFIG_CEPH_FSCACHE |
| 24 | #include <linux/fscache.h> |
| 25 | #endif |
| 26 | |
| 27 | /* f_type in struct statfs */ |
| 28 | #define CEPH_SUPER_MAGIC 0x00c36400 |
| 29 | |
| 30 | /* large granularity for statfs utilization stats to facilitate |
| 31 | * large volume sizes on 32-bit machines. */ |
| 32 | #define CEPH_BLOCK_SHIFT 22 /* 4 MB */ |
| 33 | #define CEPH_BLOCK (1 << CEPH_BLOCK_SHIFT) |
| 34 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 35 | #define CEPH_MOUNT_OPT_CLEANRECOVER (1<<1) /* auto reonnect (clean mode) after blocklisted */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 36 | #define CEPH_MOUNT_OPT_DIRSTAT (1<<4) /* `cat dirname` for stats */ |
| 37 | #define CEPH_MOUNT_OPT_RBYTES (1<<5) /* dir st_bytes = rbytes */ |
| 38 | #define CEPH_MOUNT_OPT_NOASYNCREADDIR (1<<7) /* no dcache readdir */ |
| 39 | #define CEPH_MOUNT_OPT_INO32 (1<<8) /* 32 bit inos */ |
| 40 | #define CEPH_MOUNT_OPT_DCACHE (1<<9) /* use dcache for readdir etc */ |
| 41 | #define CEPH_MOUNT_OPT_FSCACHE (1<<10) /* use fscache */ |
| 42 | #define CEPH_MOUNT_OPT_NOPOOLPERM (1<<11) /* no pool permission check */ |
| 43 | #define CEPH_MOUNT_OPT_MOUNTWAIT (1<<12) /* mount waits if no mds is up */ |
| 44 | #define CEPH_MOUNT_OPT_NOQUOTADF (1<<13) /* no root dir quota in statfs */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 45 | #define CEPH_MOUNT_OPT_NOCOPYFROM (1<<14) /* don't use RADOS 'copy-from' op */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 46 | #define CEPH_MOUNT_OPT_ASYNC_DIROPS (1<<15) /* allow async directory ops */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 47 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 48 | #define CEPH_MOUNT_OPT_DEFAULT \ |
| 49 | (CEPH_MOUNT_OPT_DCACHE | \ |
| 50 | CEPH_MOUNT_OPT_NOCOPYFROM) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 51 | |
| 52 | #define ceph_set_mount_opt(fsc, opt) \ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 53 | (fsc)->mount_options->flags |= CEPH_MOUNT_OPT_##opt |
| 54 | #define ceph_clear_mount_opt(fsc, opt) \ |
| 55 | (fsc)->mount_options->flags &= ~CEPH_MOUNT_OPT_##opt |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 56 | #define ceph_test_mount_opt(fsc, opt) \ |
| 57 | (!!((fsc)->mount_options->flags & CEPH_MOUNT_OPT_##opt)) |
| 58 | |
| 59 | /* max size of osd read request, limited by libceph */ |
| 60 | #define CEPH_MAX_READ_SIZE CEPH_MSG_MAX_DATA_LEN |
| 61 | /* osd has a configurable limitaion of max write size. |
| 62 | * CEPH_MSG_MAX_DATA_LEN should be small enough. */ |
| 63 | #define CEPH_MAX_WRITE_SIZE CEPH_MSG_MAX_DATA_LEN |
| 64 | #define CEPH_RASIZE_DEFAULT (8192*1024) /* max readahead */ |
| 65 | #define CEPH_MAX_READDIR_DEFAULT 1024 |
| 66 | #define CEPH_MAX_READDIR_BYTES_DEFAULT (512*1024) |
| 67 | #define CEPH_SNAPDIRNAME_DEFAULT ".snap" |
| 68 | |
| 69 | /* |
| 70 | * Delay telling the MDS we no longer want caps, in case we reopen |
| 71 | * the file. Delay a minimum amount of time, even if we send a cap |
| 72 | * message for some other reason. Otherwise, take the oppotunity to |
| 73 | * update the mds to avoid sending another message later. |
| 74 | */ |
| 75 | #define CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT 5 /* cap release delay */ |
| 76 | #define CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT 60 /* cap release delay */ |
| 77 | |
| 78 | struct ceph_mount_options { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 79 | unsigned int flags; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 80 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 81 | unsigned int wsize; /* max write size */ |
| 82 | unsigned int rsize; /* max read size */ |
| 83 | unsigned int rasize; /* max readahead */ |
| 84 | unsigned int congestion_kb; /* max writeback in flight */ |
| 85 | unsigned int caps_wanted_delay_min, caps_wanted_delay_max; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 86 | int caps_max; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 87 | unsigned int max_readdir; /* max readdir result (entries) */ |
| 88 | unsigned int max_readdir_bytes; /* max readdir result (bytes) */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 89 | |
| 90 | /* |
| 91 | * everything above this point can be memcmp'd; everything below |
| 92 | * is handled in compare_mount_options() |
| 93 | */ |
| 94 | |
| 95 | char *snapdir_name; /* default ".snap" */ |
| 96 | char *mds_namespace; /* default NULL */ |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 97 | char *server_path; /* default NULL (means "/") */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 98 | char *fscache_uniq; /* default NULL */ |
| 99 | }; |
| 100 | |
| 101 | struct ceph_fs_client { |
| 102 | struct super_block *sb; |
| 103 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 104 | struct list_head metric_wakeup; |
| 105 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 106 | struct ceph_mount_options *mount_options; |
| 107 | struct ceph_client *client; |
| 108 | |
| 109 | unsigned long mount_state; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 110 | |
| 111 | unsigned long last_auto_reconnect; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 112 | bool blocklisted; |
| 113 | |
| 114 | bool have_copy_from2; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 115 | |
| 116 | u32 filp_gen; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 117 | loff_t max_file_size; |
| 118 | |
| 119 | struct ceph_mds_client *mdsc; |
| 120 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 121 | atomic_long_t writeback_count; |
| 122 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 123 | struct workqueue_struct *inode_wq; |
| 124 | struct workqueue_struct *cap_wq; |
| 125 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 126 | #ifdef CONFIG_DEBUG_FS |
| 127 | struct dentry *debugfs_dentry_lru, *debugfs_caps; |
| 128 | struct dentry *debugfs_congestion_kb; |
| 129 | struct dentry *debugfs_bdi; |
| 130 | struct dentry *debugfs_mdsc, *debugfs_mdsmap; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 131 | struct dentry *debugfs_metric; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 132 | struct dentry *debugfs_mds_sessions; |
| 133 | #endif |
| 134 | |
| 135 | #ifdef CONFIG_CEPH_FSCACHE |
| 136 | struct fscache_cookie *fscache; |
| 137 | #endif |
| 138 | }; |
| 139 | |
| 140 | |
| 141 | /* |
| 142 | * File i/o capability. This tracks shared state with the metadata |
| 143 | * server that allows us to cache or writeback attributes or to read |
| 144 | * and write data. For any given inode, we should have one or more |
| 145 | * capabilities, one issued by each metadata server, and our |
| 146 | * cumulative access is the OR of all issued capabilities. |
| 147 | * |
| 148 | * Each cap is referenced by the inode's i_caps rbtree and by per-mds |
| 149 | * session capability lists. |
| 150 | */ |
| 151 | struct ceph_cap { |
| 152 | struct ceph_inode_info *ci; |
| 153 | struct rb_node ci_node; /* per-ci cap tree */ |
| 154 | struct ceph_mds_session *session; |
| 155 | struct list_head session_caps; /* per-session caplist */ |
| 156 | u64 cap_id; /* unique cap id (mds provided) */ |
| 157 | union { |
| 158 | /* in-use caps */ |
| 159 | struct { |
| 160 | int issued; /* latest, from the mds */ |
| 161 | int implemented; /* implemented superset of |
| 162 | issued (for revocation) */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 163 | int mds; /* mds index for this cap */ |
| 164 | int mds_wanted; /* caps wanted from this mds */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 165 | }; |
| 166 | /* caps to release */ |
| 167 | struct { |
| 168 | u64 cap_ino; |
| 169 | int queue_release; |
| 170 | }; |
| 171 | }; |
| 172 | u32 seq, issue_seq, mseq; |
| 173 | u32 cap_gen; /* active/stale cycle */ |
| 174 | unsigned long last_used; |
| 175 | struct list_head caps_item; |
| 176 | }; |
| 177 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 178 | #define CHECK_CAPS_AUTHONLY 1 /* only check auth cap */ |
| 179 | #define CHECK_CAPS_FLUSH 2 /* flush any dirty caps */ |
| 180 | #define CHECK_CAPS_NOINVAL 4 /* don't invalidate pagecache */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 181 | |
| 182 | struct ceph_cap_flush { |
| 183 | u64 tid; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 184 | int caps; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 185 | bool wake; /* wake up flush waiters when finish ? */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 186 | bool is_capsnap; /* true means capsnap */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 187 | struct list_head g_list; // global |
| 188 | struct list_head i_list; // per inode |
| 189 | }; |
| 190 | |
| 191 | /* |
| 192 | * Snapped cap state that is pending flush to mds. When a snapshot occurs, |
| 193 | * we first complete any in-process sync writes and writeback any dirty |
| 194 | * data before flushing the snapped state (tracked here) back to the MDS. |
| 195 | */ |
| 196 | struct ceph_cap_snap { |
| 197 | refcount_t nref; |
| 198 | struct list_head ci_item; |
| 199 | |
| 200 | struct ceph_cap_flush cap_flush; |
| 201 | |
| 202 | u64 follows; |
| 203 | int issued, dirty; |
| 204 | struct ceph_snap_context *context; |
| 205 | |
| 206 | umode_t mode; |
| 207 | kuid_t uid; |
| 208 | kgid_t gid; |
| 209 | |
| 210 | struct ceph_buffer *xattr_blob; |
| 211 | u64 xattr_version; |
| 212 | |
| 213 | u64 size; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 214 | u64 change_attr; |
| 215 | struct timespec64 mtime, atime, ctime, btime; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 216 | u64 time_warp_seq; |
| 217 | u64 truncate_size; |
| 218 | u32 truncate_seq; |
| 219 | int writing; /* a sync write is still in progress */ |
| 220 | int dirty_pages; /* dirty pages awaiting writeback */ |
| 221 | bool inline_data; |
| 222 | bool need_flush; |
| 223 | }; |
| 224 | |
| 225 | static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap) |
| 226 | { |
| 227 | if (refcount_dec_and_test(&capsnap->nref)) { |
| 228 | if (capsnap->xattr_blob) |
| 229 | ceph_buffer_put(capsnap->xattr_blob); |
| 230 | kfree(capsnap); |
| 231 | } |
| 232 | } |
| 233 | |
| 234 | /* |
| 235 | * The frag tree describes how a directory is fragmented, potentially across |
| 236 | * multiple metadata servers. It is also used to indicate points where |
| 237 | * metadata authority is delegated, and whether/where metadata is replicated. |
| 238 | * |
| 239 | * A _leaf_ frag will be present in the i_fragtree IFF there is |
| 240 | * delegation info. That is, if mds >= 0 || ndist > 0. |
| 241 | */ |
| 242 | #define CEPH_MAX_DIRFRAG_REP 4 |
| 243 | |
| 244 | struct ceph_inode_frag { |
| 245 | struct rb_node node; |
| 246 | |
| 247 | /* fragtree state */ |
| 248 | u32 frag; |
| 249 | int split_by; /* i.e. 2^(split_by) children */ |
| 250 | |
| 251 | /* delegation and replication info */ |
| 252 | int mds; /* -1 if same authority as parent */ |
| 253 | int ndist; /* >0 if replicated */ |
| 254 | int dist[CEPH_MAX_DIRFRAG_REP]; |
| 255 | }; |
| 256 | |
| 257 | /* |
| 258 | * We cache inode xattrs as an encoded blob until they are first used, |
| 259 | * at which point we parse them into an rbtree. |
| 260 | */ |
| 261 | struct ceph_inode_xattr { |
| 262 | struct rb_node node; |
| 263 | |
| 264 | const char *name; |
| 265 | int name_len; |
| 266 | const char *val; |
| 267 | int val_len; |
| 268 | int dirty; |
| 269 | |
| 270 | int should_free_name; |
| 271 | int should_free_val; |
| 272 | }; |
| 273 | |
| 274 | /* |
| 275 | * Ceph dentry state |
| 276 | */ |
| 277 | struct ceph_dentry_info { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 278 | struct dentry *dentry; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 279 | struct ceph_mds_session *lease_session; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 280 | struct list_head lease_list; |
| 281 | unsigned flags; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 282 | int lease_shared_gen; |
| 283 | u32 lease_gen; |
| 284 | u32 lease_seq; |
| 285 | unsigned long lease_renew_after, lease_renew_from; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 286 | unsigned long time; |
| 287 | u64 offset; |
| 288 | }; |
| 289 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 290 | #define CEPH_DENTRY_REFERENCED 1 |
| 291 | #define CEPH_DENTRY_LEASE_LIST 2 |
| 292 | #define CEPH_DENTRY_SHRINK_LIST 4 |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 293 | #define CEPH_DENTRY_PRIMARY_LINK 8 |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 294 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 295 | struct ceph_inode_xattrs_info { |
| 296 | /* |
| 297 | * (still encoded) xattr blob. we avoid the overhead of parsing |
| 298 | * this until someone actually calls getxattr, etc. |
| 299 | * |
| 300 | * blob->vec.iov_len == 4 implies there are no xattrs; blob == |
| 301 | * NULL means we don't know. |
| 302 | */ |
| 303 | struct ceph_buffer *blob, *prealloc_blob; |
| 304 | |
| 305 | struct rb_root index; |
| 306 | bool dirty; |
| 307 | int count; |
| 308 | int names_size; |
| 309 | int vals_size; |
| 310 | u64 version, index_version; |
| 311 | }; |
| 312 | |
| 313 | /* |
| 314 | * Ceph inode. |
| 315 | */ |
| 316 | struct ceph_inode_info { |
| 317 | struct ceph_vino i_vino; /* ceph ino + snap */ |
| 318 | |
| 319 | spinlock_t i_ceph_lock; |
| 320 | |
| 321 | u64 i_version; |
| 322 | u64 i_inline_version; |
| 323 | u32 i_time_warp_seq; |
| 324 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 325 | unsigned long i_ceph_flags; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 326 | atomic64_t i_release_count; |
| 327 | atomic64_t i_ordered_count; |
| 328 | atomic64_t i_complete_seq[2]; |
| 329 | |
| 330 | struct ceph_dir_layout i_dir_layout; |
| 331 | struct ceph_file_layout i_layout; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 332 | struct ceph_file_layout i_cached_layout; // for async creates |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 333 | char *i_symlink; |
| 334 | |
| 335 | /* for dirs */ |
| 336 | struct timespec64 i_rctime; |
| 337 | u64 i_rbytes, i_rfiles, i_rsubdirs; |
| 338 | u64 i_files, i_subdirs; |
| 339 | |
| 340 | /* quotas */ |
| 341 | u64 i_max_bytes, i_max_files; |
| 342 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 343 | s32 i_dir_pin; |
| 344 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 345 | struct rb_root i_fragtree; |
| 346 | int i_fragtree_nsplits; |
| 347 | struct mutex i_fragtree_mutex; |
| 348 | |
| 349 | struct ceph_inode_xattrs_info i_xattrs; |
| 350 | |
| 351 | /* capabilities. protected _both_ by i_ceph_lock and cap->session's |
| 352 | * s_mutex. */ |
| 353 | struct rb_root i_caps; /* cap list */ |
| 354 | struct ceph_cap *i_auth_cap; /* authoritative cap, if any */ |
| 355 | unsigned i_dirty_caps, i_flushing_caps; /* mask of dirtied fields */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 356 | |
| 357 | /* |
| 358 | * Link to the auth cap's session's s_cap_dirty list. s_cap_dirty |
| 359 | * is protected by the mdsc->cap_dirty_lock, but each individual item |
| 360 | * is also protected by the inode's i_ceph_lock. Walking s_cap_dirty |
| 361 | * requires the mdsc->cap_dirty_lock. List presence for an item can |
| 362 | * be tested under the i_ceph_lock. Changing anything requires both. |
| 363 | */ |
| 364 | struct list_head i_dirty_item; |
| 365 | |
| 366 | /* |
| 367 | * Link to session's s_cap_flushing list. Protected in a similar |
| 368 | * fashion to i_dirty_item, but also by the s_mutex for changes. The |
| 369 | * s_cap_flushing list can be walked while holding either the s_mutex |
| 370 | * or msdc->cap_dirty_lock. List presence can also be checked while |
| 371 | * holding the i_ceph_lock for this inode. |
| 372 | */ |
| 373 | struct list_head i_flushing_item; |
| 374 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 375 | /* we need to track cap writeback on a per-cap-bit basis, to allow |
| 376 | * overlapping, pipelined cap flushes to the mds. we can probably |
| 377 | * reduce the tid to 8 bits if we're concerned about inode size. */ |
| 378 | struct ceph_cap_flush *i_prealloc_cap_flush; |
| 379 | struct list_head i_cap_flush_list; |
| 380 | wait_queue_head_t i_cap_wq; /* threads waiting on a capability */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 381 | unsigned long i_hold_caps_max; /* jiffies */ |
| 382 | struct list_head i_cap_delay_list; /* for delayed cap release to mds */ |
| 383 | struct ceph_cap_reservation i_cap_migration_resv; |
| 384 | struct list_head i_cap_snaps; /* snapped state pending flush to mds */ |
| 385 | struct ceph_snap_context *i_head_snapc; /* set if wr_buffer_head > 0 or |
| 386 | dirty|flushing caps */ |
| 387 | unsigned i_snap_caps; /* cap bits for snapped files */ |
| 388 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 389 | unsigned long i_last_rd; |
| 390 | unsigned long i_last_wr; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 391 | int i_nr_by_mode[CEPH_FILE_MODE_BITS]; /* open file counts */ |
| 392 | |
| 393 | struct mutex i_truncate_mutex; |
| 394 | u32 i_truncate_seq; /* last truncate to smaller size */ |
| 395 | u64 i_truncate_size; /* and the size we last truncated down to */ |
| 396 | int i_truncate_pending; /* still need to call vmtruncate */ |
| 397 | |
| 398 | u64 i_max_size; /* max file size authorized by mds */ |
| 399 | u64 i_reported_size; /* (max_)size reported to or requested of mds */ |
| 400 | u64 i_wanted_max_size; /* offset we'd like to write too */ |
| 401 | u64 i_requested_max_size; /* max_size we've requested */ |
| 402 | |
| 403 | /* held references to caps */ |
| 404 | int i_pin_ref; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 405 | int i_rd_ref, i_rdcache_ref, i_wr_ref, i_wb_ref, i_fx_ref; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 406 | int i_wrbuffer_ref, i_wrbuffer_ref_head; |
| 407 | atomic_t i_filelock_ref; |
| 408 | atomic_t i_shared_gen; /* increment each time we get FILE_SHARED */ |
| 409 | u32 i_rdcache_gen; /* incremented each time we get FILE_CACHE. */ |
| 410 | u32 i_rdcache_revoking; /* RDCACHE gen to async invalidate, if any */ |
| 411 | |
| 412 | struct list_head i_unsafe_dirops; /* uncommitted mds dir ops */ |
| 413 | struct list_head i_unsafe_iops; /* uncommitted mds inode ops */ |
| 414 | spinlock_t i_unsafe_lock; |
| 415 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 416 | union { |
| 417 | struct ceph_snap_realm *i_snap_realm; /* snap realm (if caps) */ |
| 418 | struct ceph_snapid_map *i_snapid_map; /* snapid -> dev_t */ |
| 419 | }; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 420 | int i_snap_realm_counter; /* snap realm (if caps) */ |
| 421 | struct list_head i_snap_realm_item; |
| 422 | struct list_head i_snap_flush_item; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 423 | struct timespec64 i_btime; |
| 424 | struct timespec64 i_snap_btime; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 425 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 426 | struct work_struct i_work; |
| 427 | unsigned long i_work_mask; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 428 | |
| 429 | #ifdef CONFIG_CEPH_FSCACHE |
| 430 | struct fscache_cookie *fscache; |
| 431 | u32 i_fscache_gen; |
| 432 | #endif |
| 433 | struct inode vfs_inode; /* at end */ |
| 434 | }; |
| 435 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 436 | static inline struct ceph_inode_info * |
| 437 | ceph_inode(const struct inode *inode) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 438 | { |
| 439 | return container_of(inode, struct ceph_inode_info, vfs_inode); |
| 440 | } |
| 441 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 442 | static inline struct ceph_fs_client * |
| 443 | ceph_inode_to_client(const struct inode *inode) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 444 | { |
| 445 | return (struct ceph_fs_client *)inode->i_sb->s_fs_info; |
| 446 | } |
| 447 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 448 | static inline struct ceph_fs_client * |
| 449 | ceph_sb_to_client(const struct super_block *sb) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 450 | { |
| 451 | return (struct ceph_fs_client *)sb->s_fs_info; |
| 452 | } |
| 453 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 454 | static inline struct ceph_mds_client * |
| 455 | ceph_sb_to_mdsc(const struct super_block *sb) |
| 456 | { |
| 457 | return (struct ceph_mds_client *)ceph_sb_to_client(sb)->mdsc; |
| 458 | } |
| 459 | |
| 460 | static inline struct ceph_vino |
| 461 | ceph_vino(const struct inode *inode) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 462 | { |
| 463 | return ceph_inode(inode)->i_vino; |
| 464 | } |
| 465 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 466 | static inline u32 ceph_ino_to_ino32(u64 vino) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 467 | { |
| 468 | u32 ino = vino & 0xffffffff; |
| 469 | ino ^= vino >> 32; |
| 470 | if (!ino) |
| 471 | ino = 2; |
| 472 | return ino; |
| 473 | } |
| 474 | |
| 475 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 476 | * Inode numbers in cephfs are 64 bits, but inode->i_ino is 32-bits on |
| 477 | * some arches. We generally do not use this value inside the ceph driver, but |
| 478 | * we do want to set it to something, so that generic vfs code has an |
| 479 | * appropriate value for tracepoints and the like. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 480 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 481 | static inline ino_t ceph_vino_to_ino_t(struct ceph_vino vino) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 482 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 483 | if (sizeof(ino_t) == sizeof(u32)) |
| 484 | return ceph_ino_to_ino32(vino.ino); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 485 | return (ino_t)vino.ino; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 486 | } |
| 487 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 488 | /* for printf-style formatting */ |
| 489 | #define ceph_vinop(i) ceph_inode(i)->i_vino.ino, ceph_inode(i)->i_vino.snap |
| 490 | |
| 491 | static inline u64 ceph_ino(struct inode *inode) |
| 492 | { |
| 493 | return ceph_inode(inode)->i_vino.ino; |
| 494 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 495 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 496 | static inline u64 ceph_snap(struct inode *inode) |
| 497 | { |
| 498 | return ceph_inode(inode)->i_vino.snap; |
| 499 | } |
| 500 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 501 | /** |
| 502 | * ceph_present_ino - format an inode number for presentation to userland |
| 503 | * @sb: superblock where the inode lives |
| 504 | * @ino: inode number to (possibly) convert |
| 505 | * |
| 506 | * If the user mounted with the ino32 option, then the 64-bit value needs |
| 507 | * to be converted to something that can fit inside 32 bits. Note that |
| 508 | * internal kernel code never uses this value, so this is entirely for |
| 509 | * userland consumption. |
| 510 | */ |
| 511 | static inline u64 ceph_present_ino(struct super_block *sb, u64 ino) |
| 512 | { |
| 513 | if (unlikely(ceph_test_mount_opt(ceph_sb_to_client(sb), INO32))) |
| 514 | return ceph_ino_to_ino32(ino); |
| 515 | return ino; |
| 516 | } |
| 517 | |
| 518 | static inline u64 ceph_present_inode(struct inode *inode) |
| 519 | { |
| 520 | return ceph_present_ino(inode->i_sb, ceph_ino(inode)); |
| 521 | } |
| 522 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 523 | static inline int ceph_ino_compare(struct inode *inode, void *data) |
| 524 | { |
| 525 | struct ceph_vino *pvino = (struct ceph_vino *)data; |
| 526 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 527 | return ci->i_vino.ino == pvino->ino && |
| 528 | ci->i_vino.snap == pvino->snap; |
| 529 | } |
| 530 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 531 | /* |
| 532 | * The MDS reserves a set of inodes for its own usage. These should never |
| 533 | * be accessible by clients, and so the MDS has no reason to ever hand these |
| 534 | * out. The range is CEPH_MDS_INO_MDSDIR_OFFSET..CEPH_INO_SYSTEM_BASE. |
| 535 | * |
| 536 | * These come from src/mds/mdstypes.h in the ceph sources. |
| 537 | */ |
| 538 | #define CEPH_MAX_MDS 0x100 |
| 539 | #define CEPH_NUM_STRAY 10 |
| 540 | #define CEPH_MDS_INO_MDSDIR_OFFSET (1 * CEPH_MAX_MDS) |
| 541 | #define CEPH_INO_SYSTEM_BASE ((6*CEPH_MAX_MDS) + (CEPH_MAX_MDS * CEPH_NUM_STRAY)) |
| 542 | |
| 543 | static inline bool ceph_vino_is_reserved(const struct ceph_vino vino) |
| 544 | { |
| 545 | if (vino.ino < CEPH_INO_SYSTEM_BASE && |
| 546 | vino.ino >= CEPH_MDS_INO_MDSDIR_OFFSET) { |
| 547 | WARN_RATELIMIT(1, "Attempt to access reserved inode number 0x%llx", vino.ino); |
| 548 | return true; |
| 549 | } |
| 550 | return false; |
| 551 | } |
| 552 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 553 | static inline struct inode *ceph_find_inode(struct super_block *sb, |
| 554 | struct ceph_vino vino) |
| 555 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 556 | if (ceph_vino_is_reserved(vino)) |
| 557 | return NULL; |
| 558 | |
| 559 | /* |
| 560 | * NB: The hashval will be run through the fs/inode.c hash function |
| 561 | * anyway, so there is no need to squash the inode number down to |
| 562 | * 32-bits first. Just use low-order bits on arches with 32-bit long. |
| 563 | */ |
| 564 | return ilookup5(sb, (unsigned long)vino.ino, ceph_ino_compare, &vino); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 565 | } |
| 566 | |
| 567 | |
| 568 | /* |
| 569 | * Ceph inode. |
| 570 | */ |
| 571 | #define CEPH_I_DIR_ORDERED (1 << 0) /* dentries in dir are ordered */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 572 | #define CEPH_I_FLUSH (1 << 2) /* do not delay flush of dirty metadata */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 573 | #define CEPH_I_POOL_PERM (1 << 3) /* pool rd/wr bits are valid */ |
| 574 | #define CEPH_I_POOL_RD (1 << 4) /* can read from pool */ |
| 575 | #define CEPH_I_POOL_WR (1 << 5) /* can write to pool */ |
| 576 | #define CEPH_I_SEC_INITED (1 << 6) /* security initialized */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 577 | #define CEPH_I_KICK_FLUSH (1 << 7) /* kick flushing caps */ |
| 578 | #define CEPH_I_FLUSH_SNAPS (1 << 8) /* need flush snapss */ |
| 579 | #define CEPH_I_ERROR_WRITE (1 << 9) /* have seen write errors */ |
| 580 | #define CEPH_I_ERROR_FILELOCK (1 << 10) /* have seen file lock errors */ |
| 581 | #define CEPH_I_ODIRECT (1 << 11) /* inode in direct I/O mode */ |
| 582 | #define CEPH_ASYNC_CREATE_BIT (12) /* async create in flight for this */ |
| 583 | #define CEPH_I_ASYNC_CREATE (1 << CEPH_ASYNC_CREATE_BIT) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 584 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 585 | /* |
| 586 | * Masks of ceph inode work. |
| 587 | */ |
| 588 | #define CEPH_I_WORK_WRITEBACK 0 /* writeback */ |
| 589 | #define CEPH_I_WORK_INVALIDATE_PAGES 1 /* invalidate pages */ |
| 590 | #define CEPH_I_WORK_VMTRUNCATE 2 /* vmtruncate */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 591 | |
| 592 | /* |
| 593 | * We set the ERROR_WRITE bit when we start seeing write errors on an inode |
| 594 | * and then clear it when they start succeeding. Note that we do a lockless |
| 595 | * check first, and only take the lock if it looks like it needs to be changed. |
| 596 | * The write submission code just takes this as a hint, so we're not too |
| 597 | * worried if a few slip through in either direction. |
| 598 | */ |
| 599 | static inline void ceph_set_error_write(struct ceph_inode_info *ci) |
| 600 | { |
| 601 | if (!(READ_ONCE(ci->i_ceph_flags) & CEPH_I_ERROR_WRITE)) { |
| 602 | spin_lock(&ci->i_ceph_lock); |
| 603 | ci->i_ceph_flags |= CEPH_I_ERROR_WRITE; |
| 604 | spin_unlock(&ci->i_ceph_lock); |
| 605 | } |
| 606 | } |
| 607 | |
| 608 | static inline void ceph_clear_error_write(struct ceph_inode_info *ci) |
| 609 | { |
| 610 | if (READ_ONCE(ci->i_ceph_flags) & CEPH_I_ERROR_WRITE) { |
| 611 | spin_lock(&ci->i_ceph_lock); |
| 612 | ci->i_ceph_flags &= ~CEPH_I_ERROR_WRITE; |
| 613 | spin_unlock(&ci->i_ceph_lock); |
| 614 | } |
| 615 | } |
| 616 | |
| 617 | static inline void __ceph_dir_set_complete(struct ceph_inode_info *ci, |
| 618 | long long release_count, |
| 619 | long long ordered_count) |
| 620 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 621 | /* |
| 622 | * Makes sure operations that setup readdir cache (update page |
| 623 | * cache and i_size) are strongly ordered w.r.t. the following |
| 624 | * atomic64_set() operations. |
| 625 | */ |
| 626 | smp_mb(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 627 | atomic64_set(&ci->i_complete_seq[0], release_count); |
| 628 | atomic64_set(&ci->i_complete_seq[1], ordered_count); |
| 629 | } |
| 630 | |
| 631 | static inline void __ceph_dir_clear_complete(struct ceph_inode_info *ci) |
| 632 | { |
| 633 | atomic64_inc(&ci->i_release_count); |
| 634 | } |
| 635 | |
| 636 | static inline void __ceph_dir_clear_ordered(struct ceph_inode_info *ci) |
| 637 | { |
| 638 | atomic64_inc(&ci->i_ordered_count); |
| 639 | } |
| 640 | |
| 641 | static inline bool __ceph_dir_is_complete(struct ceph_inode_info *ci) |
| 642 | { |
| 643 | return atomic64_read(&ci->i_complete_seq[0]) == |
| 644 | atomic64_read(&ci->i_release_count); |
| 645 | } |
| 646 | |
| 647 | static inline bool __ceph_dir_is_complete_ordered(struct ceph_inode_info *ci) |
| 648 | { |
| 649 | return atomic64_read(&ci->i_complete_seq[0]) == |
| 650 | atomic64_read(&ci->i_release_count) && |
| 651 | atomic64_read(&ci->i_complete_seq[1]) == |
| 652 | atomic64_read(&ci->i_ordered_count); |
| 653 | } |
| 654 | |
| 655 | static inline void ceph_dir_clear_complete(struct inode *inode) |
| 656 | { |
| 657 | __ceph_dir_clear_complete(ceph_inode(inode)); |
| 658 | } |
| 659 | |
| 660 | static inline void ceph_dir_clear_ordered(struct inode *inode) |
| 661 | { |
| 662 | __ceph_dir_clear_ordered(ceph_inode(inode)); |
| 663 | } |
| 664 | |
| 665 | static inline bool ceph_dir_is_complete_ordered(struct inode *inode) |
| 666 | { |
| 667 | bool ret = __ceph_dir_is_complete_ordered(ceph_inode(inode)); |
| 668 | smp_rmb(); |
| 669 | return ret; |
| 670 | } |
| 671 | |
| 672 | /* find a specific frag @f */ |
| 673 | extern struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, |
| 674 | u32 f); |
| 675 | |
| 676 | /* |
| 677 | * choose fragment for value @v. copy frag content to pfrag, if leaf |
| 678 | * exists |
| 679 | */ |
| 680 | extern u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v, |
| 681 | struct ceph_inode_frag *pfrag, |
| 682 | int *found); |
| 683 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 684 | static inline struct ceph_dentry_info *ceph_dentry(const struct dentry *dentry) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 685 | { |
| 686 | return (struct ceph_dentry_info *)dentry->d_fsdata; |
| 687 | } |
| 688 | |
| 689 | /* |
| 690 | * caps helpers |
| 691 | */ |
| 692 | static inline bool __ceph_is_any_real_caps(struct ceph_inode_info *ci) |
| 693 | { |
| 694 | return !RB_EMPTY_ROOT(&ci->i_caps); |
| 695 | } |
| 696 | |
| 697 | extern int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented); |
| 698 | extern int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int t); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 699 | extern int __ceph_caps_issued_mask_metric(struct ceph_inode_info *ci, int mask, |
| 700 | int t); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 701 | extern int __ceph_caps_issued_other(struct ceph_inode_info *ci, |
| 702 | struct ceph_cap *cap); |
| 703 | |
| 704 | static inline int ceph_caps_issued(struct ceph_inode_info *ci) |
| 705 | { |
| 706 | int issued; |
| 707 | spin_lock(&ci->i_ceph_lock); |
| 708 | issued = __ceph_caps_issued(ci, NULL); |
| 709 | spin_unlock(&ci->i_ceph_lock); |
| 710 | return issued; |
| 711 | } |
| 712 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 713 | static inline int ceph_caps_issued_mask_metric(struct ceph_inode_info *ci, |
| 714 | int mask, int touch) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 715 | { |
| 716 | int r; |
| 717 | spin_lock(&ci->i_ceph_lock); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 718 | r = __ceph_caps_issued_mask_metric(ci, mask, touch); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 719 | spin_unlock(&ci->i_ceph_lock); |
| 720 | return r; |
| 721 | } |
| 722 | |
| 723 | static inline int __ceph_caps_dirty(struct ceph_inode_info *ci) |
| 724 | { |
| 725 | return ci->i_dirty_caps | ci->i_flushing_caps; |
| 726 | } |
| 727 | extern struct ceph_cap_flush *ceph_alloc_cap_flush(void); |
| 728 | extern void ceph_free_cap_flush(struct ceph_cap_flush *cf); |
| 729 | extern int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask, |
| 730 | struct ceph_cap_flush **pcf); |
| 731 | |
| 732 | extern int __ceph_caps_revoking_other(struct ceph_inode_info *ci, |
| 733 | struct ceph_cap *ocap, int mask); |
| 734 | extern int ceph_caps_revoking(struct ceph_inode_info *ci, int mask); |
| 735 | extern int __ceph_caps_used(struct ceph_inode_info *ci); |
| 736 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 737 | static inline bool __ceph_is_file_opened(struct ceph_inode_info *ci) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 738 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 739 | return ci->i_nr_by_mode[0]; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 740 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 741 | extern int __ceph_caps_file_wanted(struct ceph_inode_info *ci); |
| 742 | extern int __ceph_caps_wanted(struct ceph_inode_info *ci); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 743 | |
| 744 | /* what the mds thinks we want */ |
| 745 | extern int __ceph_caps_mds_wanted(struct ceph_inode_info *ci, bool check); |
| 746 | |
| 747 | extern void ceph_caps_init(struct ceph_mds_client *mdsc); |
| 748 | extern void ceph_caps_finalize(struct ceph_mds_client *mdsc); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 749 | extern void ceph_adjust_caps_max_min(struct ceph_mds_client *mdsc, |
| 750 | struct ceph_mount_options *fsopt); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 751 | extern int ceph_reserve_caps(struct ceph_mds_client *mdsc, |
| 752 | struct ceph_cap_reservation *ctx, int need); |
| 753 | extern void ceph_unreserve_caps(struct ceph_mds_client *mdsc, |
| 754 | struct ceph_cap_reservation *ctx); |
| 755 | extern void ceph_reservation_status(struct ceph_fs_client *client, |
| 756 | int *total, int *avail, int *used, |
| 757 | int *reserved, int *min); |
| 758 | |
| 759 | |
| 760 | |
| 761 | /* |
| 762 | * we keep buffered readdir results attached to file->private_data |
| 763 | */ |
| 764 | #define CEPH_F_SYNC 1 |
| 765 | #define CEPH_F_ATEND 2 |
| 766 | |
| 767 | struct ceph_file_info { |
| 768 | short fmode; /* initialized on open */ |
| 769 | short flags; /* CEPH_F_* */ |
| 770 | |
| 771 | spinlock_t rw_contexts_lock; |
| 772 | struct list_head rw_contexts; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 773 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 774 | u32 filp_gen; |
| 775 | atomic_t num_locks; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 776 | }; |
| 777 | |
| 778 | struct ceph_dir_file_info { |
| 779 | struct ceph_file_info file_info; |
| 780 | |
| 781 | /* readdir: position within the dir */ |
| 782 | u32 frag; |
| 783 | struct ceph_mds_request *last_readdir; |
| 784 | |
| 785 | /* readdir: position within a frag */ |
| 786 | unsigned next_offset; /* offset of next chunk (last_name's + 1) */ |
| 787 | char *last_name; /* last entry in previous chunk */ |
| 788 | long long dir_release_count; |
| 789 | long long dir_ordered_count; |
| 790 | int readdir_cache_idx; |
| 791 | |
| 792 | /* used for -o dirstat read() on directory thing */ |
| 793 | char *dir_info; |
| 794 | int dir_info_len; |
| 795 | }; |
| 796 | |
| 797 | struct ceph_rw_context { |
| 798 | struct list_head list; |
| 799 | struct task_struct *thread; |
| 800 | int caps; |
| 801 | }; |
| 802 | |
| 803 | #define CEPH_DEFINE_RW_CONTEXT(_name, _caps) \ |
| 804 | struct ceph_rw_context _name = { \ |
| 805 | .thread = current, \ |
| 806 | .caps = _caps, \ |
| 807 | } |
| 808 | |
| 809 | static inline void ceph_add_rw_context(struct ceph_file_info *cf, |
| 810 | struct ceph_rw_context *ctx) |
| 811 | { |
| 812 | spin_lock(&cf->rw_contexts_lock); |
| 813 | list_add(&ctx->list, &cf->rw_contexts); |
| 814 | spin_unlock(&cf->rw_contexts_lock); |
| 815 | } |
| 816 | |
| 817 | static inline void ceph_del_rw_context(struct ceph_file_info *cf, |
| 818 | struct ceph_rw_context *ctx) |
| 819 | { |
| 820 | spin_lock(&cf->rw_contexts_lock); |
| 821 | list_del(&ctx->list); |
| 822 | spin_unlock(&cf->rw_contexts_lock); |
| 823 | } |
| 824 | |
| 825 | static inline struct ceph_rw_context* |
| 826 | ceph_find_rw_context(struct ceph_file_info *cf) |
| 827 | { |
| 828 | struct ceph_rw_context *ctx, *found = NULL; |
| 829 | spin_lock(&cf->rw_contexts_lock); |
| 830 | list_for_each_entry(ctx, &cf->rw_contexts, list) { |
| 831 | if (ctx->thread == current) { |
| 832 | found = ctx; |
| 833 | break; |
| 834 | } |
| 835 | } |
| 836 | spin_unlock(&cf->rw_contexts_lock); |
| 837 | return found; |
| 838 | } |
| 839 | |
| 840 | struct ceph_readdir_cache_control { |
| 841 | struct page *page; |
| 842 | struct dentry **dentries; |
| 843 | int index; |
| 844 | }; |
| 845 | |
| 846 | /* |
| 847 | * A "snap realm" describes a subset of the file hierarchy sharing |
| 848 | * the same set of snapshots that apply to it. The realms themselves |
| 849 | * are organized into a hierarchy, such that children inherit (some of) |
| 850 | * the snapshots of their parents. |
| 851 | * |
| 852 | * All inodes within the realm that have capabilities are linked into a |
| 853 | * per-realm list. |
| 854 | */ |
| 855 | struct ceph_snap_realm { |
| 856 | u64 ino; |
| 857 | struct inode *inode; |
| 858 | atomic_t nref; |
| 859 | struct rb_node node; |
| 860 | |
| 861 | u64 created, seq; |
| 862 | u64 parent_ino; |
| 863 | u64 parent_since; /* snapid when our current parent became so */ |
| 864 | |
| 865 | u64 *prior_parent_snaps; /* snaps inherited from any parents we */ |
| 866 | u32 num_prior_parent_snaps; /* had prior to parent_since */ |
| 867 | u64 *snaps; /* snaps specific to this realm */ |
| 868 | u32 num_snaps; |
| 869 | |
| 870 | struct ceph_snap_realm *parent; |
| 871 | struct list_head children; /* list of child realms */ |
| 872 | struct list_head child_item; |
| 873 | |
| 874 | struct list_head empty_item; /* if i have ref==0 */ |
| 875 | |
| 876 | struct list_head dirty_item; /* if realm needs new context */ |
| 877 | |
| 878 | /* the current set of snaps for this realm */ |
| 879 | struct ceph_snap_context *cached_context; |
| 880 | |
| 881 | struct list_head inodes_with_caps; |
| 882 | spinlock_t inodes_with_caps_lock; |
| 883 | }; |
| 884 | |
| 885 | static inline int default_congestion_kb(void) |
| 886 | { |
| 887 | int congestion_kb; |
| 888 | |
| 889 | /* |
| 890 | * Copied from NFS |
| 891 | * |
| 892 | * congestion size, scale with available memory. |
| 893 | * |
| 894 | * 64MB: 8192k |
| 895 | * 128MB: 11585k |
| 896 | * 256MB: 16384k |
| 897 | * 512MB: 23170k |
| 898 | * 1GB: 32768k |
| 899 | * 2GB: 46340k |
| 900 | * 4GB: 65536k |
| 901 | * 8GB: 92681k |
| 902 | * 16GB: 131072k |
| 903 | * |
| 904 | * This allows larger machines to have larger/more transfers. |
| 905 | * Limit the default to 256M |
| 906 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 907 | congestion_kb = (16*int_sqrt(totalram_pages())) << (PAGE_SHIFT-10); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 908 | if (congestion_kb > 256*1024) |
| 909 | congestion_kb = 256*1024; |
| 910 | |
| 911 | return congestion_kb; |
| 912 | } |
| 913 | |
| 914 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 915 | /* super.c */ |
| 916 | extern int ceph_force_reconnect(struct super_block *sb); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 917 | /* snap.c */ |
| 918 | struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc, |
| 919 | u64 ino); |
| 920 | extern void ceph_get_snap_realm(struct ceph_mds_client *mdsc, |
| 921 | struct ceph_snap_realm *realm); |
| 922 | extern void ceph_put_snap_realm(struct ceph_mds_client *mdsc, |
| 923 | struct ceph_snap_realm *realm); |
| 924 | extern int ceph_update_snap_trace(struct ceph_mds_client *m, |
| 925 | void *p, void *e, bool deletion, |
| 926 | struct ceph_snap_realm **realm_ret); |
| 927 | extern void ceph_handle_snap(struct ceph_mds_client *mdsc, |
| 928 | struct ceph_mds_session *session, |
| 929 | struct ceph_msg *msg); |
| 930 | extern void ceph_queue_cap_snap(struct ceph_inode_info *ci); |
| 931 | extern int __ceph_finish_cap_snap(struct ceph_inode_info *ci, |
| 932 | struct ceph_cap_snap *capsnap); |
| 933 | extern void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc); |
| 934 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 935 | extern struct ceph_snapid_map *ceph_get_snapid_map(struct ceph_mds_client *mdsc, |
| 936 | u64 snap); |
| 937 | extern void ceph_put_snapid_map(struct ceph_mds_client* mdsc, |
| 938 | struct ceph_snapid_map *sm); |
| 939 | extern void ceph_trim_snapid_map(struct ceph_mds_client *mdsc); |
| 940 | extern void ceph_cleanup_snapid_map(struct ceph_mds_client *mdsc); |
| 941 | |
| 942 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 943 | /* |
| 944 | * a cap_snap is "pending" if it is still awaiting an in-progress |
| 945 | * sync write (that may/may not still update size, mtime, etc.). |
| 946 | */ |
| 947 | static inline bool __ceph_have_pending_cap_snap(struct ceph_inode_info *ci) |
| 948 | { |
| 949 | return !list_empty(&ci->i_cap_snaps) && |
| 950 | list_last_entry(&ci->i_cap_snaps, struct ceph_cap_snap, |
| 951 | ci_item)->writing; |
| 952 | } |
| 953 | |
| 954 | /* inode.c */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 955 | struct ceph_mds_reply_info_in; |
| 956 | struct ceph_mds_reply_dirfrag; |
| 957 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 958 | extern const struct inode_operations ceph_file_iops; |
| 959 | |
| 960 | extern struct inode *ceph_alloc_inode(struct super_block *sb); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 961 | extern void ceph_evict_inode(struct inode *inode); |
| 962 | extern void ceph_free_inode(struct inode *inode); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 963 | |
| 964 | extern struct inode *ceph_get_inode(struct super_block *sb, |
| 965 | struct ceph_vino vino); |
| 966 | extern struct inode *ceph_get_snapdir(struct inode *parent); |
| 967 | extern int ceph_fill_file_size(struct inode *inode, int issued, |
| 968 | u32 truncate_seq, u64 truncate_size, u64 size); |
| 969 | extern void ceph_fill_file_time(struct inode *inode, int issued, |
| 970 | u64 time_warp_seq, struct timespec64 *ctime, |
| 971 | struct timespec64 *mtime, |
| 972 | struct timespec64 *atime); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 973 | extern int ceph_fill_inode(struct inode *inode, struct page *locked_page, |
| 974 | struct ceph_mds_reply_info_in *iinfo, |
| 975 | struct ceph_mds_reply_dirfrag *dirinfo, |
| 976 | struct ceph_mds_session *session, int cap_fmode, |
| 977 | struct ceph_cap_reservation *caps_reservation); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 978 | extern int ceph_fill_trace(struct super_block *sb, |
| 979 | struct ceph_mds_request *req); |
| 980 | extern int ceph_readdir_prepopulate(struct ceph_mds_request *req, |
| 981 | struct ceph_mds_session *session); |
| 982 | |
| 983 | extern int ceph_inode_holds_cap(struct inode *inode, int mask); |
| 984 | |
| 985 | extern bool ceph_inode_set_size(struct inode *inode, loff_t size); |
| 986 | extern void __ceph_do_pending_vmtruncate(struct inode *inode); |
| 987 | extern void ceph_queue_vmtruncate(struct inode *inode); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 988 | extern void ceph_queue_invalidate(struct inode *inode); |
| 989 | extern void ceph_queue_writeback(struct inode *inode); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 990 | extern void ceph_async_iput(struct inode *inode); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 991 | |
| 992 | extern int __ceph_do_getattr(struct inode *inode, struct page *locked_page, |
| 993 | int mask, bool force); |
| 994 | static inline int ceph_do_getattr(struct inode *inode, int mask, bool force) |
| 995 | { |
| 996 | return __ceph_do_getattr(inode, NULL, mask, force); |
| 997 | } |
| 998 | extern int ceph_permission(struct inode *inode, int mask); |
| 999 | extern int __ceph_setattr(struct inode *inode, struct iattr *attr); |
| 1000 | extern int ceph_setattr(struct dentry *dentry, struct iattr *attr); |
| 1001 | extern int ceph_getattr(const struct path *path, struct kstat *stat, |
| 1002 | u32 request_mask, unsigned int flags); |
| 1003 | |
| 1004 | /* xattr.c */ |
| 1005 | int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int); |
| 1006 | ssize_t __ceph_getxattr(struct inode *, const char *, void *, size_t); |
| 1007 | extern ssize_t ceph_listxattr(struct dentry *, char *, size_t); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1008 | extern struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1009 | extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1010 | extern const struct xattr_handler *ceph_xattr_handlers[]; |
| 1011 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1012 | struct ceph_acl_sec_ctx { |
| 1013 | #ifdef CONFIG_CEPH_FS_POSIX_ACL |
| 1014 | void *default_acl; |
| 1015 | void *acl; |
| 1016 | #endif |
| 1017 | #ifdef CONFIG_CEPH_FS_SECURITY_LABEL |
| 1018 | void *sec_ctx; |
| 1019 | u32 sec_ctxlen; |
| 1020 | #endif |
| 1021 | struct ceph_pagelist *pagelist; |
| 1022 | }; |
| 1023 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1024 | #ifdef CONFIG_SECURITY |
| 1025 | extern bool ceph_security_xattr_deadlock(struct inode *in); |
| 1026 | extern bool ceph_security_xattr_wanted(struct inode *in); |
| 1027 | #else |
| 1028 | static inline bool ceph_security_xattr_deadlock(struct inode *in) |
| 1029 | { |
| 1030 | return false; |
| 1031 | } |
| 1032 | static inline bool ceph_security_xattr_wanted(struct inode *in) |
| 1033 | { |
| 1034 | return false; |
| 1035 | } |
| 1036 | #endif |
| 1037 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1038 | #ifdef CONFIG_CEPH_FS_SECURITY_LABEL |
| 1039 | extern int ceph_security_init_secctx(struct dentry *dentry, umode_t mode, |
| 1040 | struct ceph_acl_sec_ctx *ctx); |
| 1041 | static inline void ceph_security_invalidate_secctx(struct inode *inode) |
| 1042 | { |
| 1043 | security_inode_invalidate_secctx(inode); |
| 1044 | } |
| 1045 | #else |
| 1046 | static inline int ceph_security_init_secctx(struct dentry *dentry, umode_t mode, |
| 1047 | struct ceph_acl_sec_ctx *ctx) |
| 1048 | { |
| 1049 | return 0; |
| 1050 | } |
| 1051 | static inline void ceph_security_invalidate_secctx(struct inode *inode) |
| 1052 | { |
| 1053 | } |
| 1054 | #endif |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1055 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1056 | void ceph_release_acl_sec_ctx(struct ceph_acl_sec_ctx *as_ctx); |
| 1057 | |
| 1058 | /* acl.c */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1059 | #ifdef CONFIG_CEPH_FS_POSIX_ACL |
| 1060 | |
| 1061 | struct posix_acl *ceph_get_acl(struct inode *, int); |
| 1062 | int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type); |
| 1063 | int ceph_pre_init_acls(struct inode *dir, umode_t *mode, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1064 | struct ceph_acl_sec_ctx *as_ctx); |
| 1065 | void ceph_init_inode_acls(struct inode *inode, |
| 1066 | struct ceph_acl_sec_ctx *as_ctx); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1067 | |
| 1068 | static inline void ceph_forget_all_cached_acls(struct inode *inode) |
| 1069 | { |
| 1070 | forget_all_cached_acls(inode); |
| 1071 | } |
| 1072 | |
| 1073 | #else |
| 1074 | |
| 1075 | #define ceph_get_acl NULL |
| 1076 | #define ceph_set_acl NULL |
| 1077 | |
| 1078 | static inline int ceph_pre_init_acls(struct inode *dir, umode_t *mode, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1079 | struct ceph_acl_sec_ctx *as_ctx) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1080 | { |
| 1081 | return 0; |
| 1082 | } |
| 1083 | static inline void ceph_init_inode_acls(struct inode *inode, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1084 | struct ceph_acl_sec_ctx *as_ctx) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1085 | { |
| 1086 | } |
| 1087 | static inline int ceph_acl_chmod(struct dentry *dentry, struct inode *inode) |
| 1088 | { |
| 1089 | return 0; |
| 1090 | } |
| 1091 | |
| 1092 | static inline void ceph_forget_all_cached_acls(struct inode *inode) |
| 1093 | { |
| 1094 | } |
| 1095 | |
| 1096 | #endif |
| 1097 | |
| 1098 | /* caps.c */ |
| 1099 | extern const char *ceph_cap_string(int c); |
| 1100 | extern void ceph_handle_caps(struct ceph_mds_session *session, |
| 1101 | struct ceph_msg *msg); |
| 1102 | extern struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc, |
| 1103 | struct ceph_cap_reservation *ctx); |
| 1104 | extern void ceph_add_cap(struct inode *inode, |
| 1105 | struct ceph_mds_session *session, u64 cap_id, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1106 | unsigned issued, unsigned wanted, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1107 | unsigned cap, unsigned seq, u64 realmino, int flags, |
| 1108 | struct ceph_cap **new_cap); |
| 1109 | extern void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1110 | extern void __ceph_remove_caps(struct ceph_inode_info *ci); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1111 | extern void ceph_put_cap(struct ceph_mds_client *mdsc, |
| 1112 | struct ceph_cap *cap); |
| 1113 | extern int ceph_is_any_caps(struct inode *inode); |
| 1114 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1115 | extern int ceph_write_inode(struct inode *inode, struct writeback_control *wbc); |
| 1116 | extern int ceph_fsync(struct file *file, loff_t start, loff_t end, |
| 1117 | int datasync); |
| 1118 | extern void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc, |
| 1119 | struct ceph_mds_session *session); |
| 1120 | extern void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc, |
| 1121 | struct ceph_mds_session *session); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1122 | void ceph_kick_flushing_inode_caps(struct ceph_mds_session *session, |
| 1123 | struct ceph_inode_info *ci); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1124 | extern struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, |
| 1125 | int mds); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1126 | extern void ceph_take_cap_refs(struct ceph_inode_info *ci, int caps, |
| 1127 | bool snap_rwsem_locked); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1128 | extern void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps); |
| 1129 | extern void ceph_put_cap_refs(struct ceph_inode_info *ci, int had); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1130 | extern void ceph_put_cap_refs_no_check_caps(struct ceph_inode_info *ci, |
| 1131 | int had); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1132 | extern void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr, |
| 1133 | struct ceph_snap_context *snapc); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1134 | extern void __ceph_remove_capsnap(struct inode *inode, |
| 1135 | struct ceph_cap_snap *capsnap, |
| 1136 | bool *wake_ci, bool *wake_mdsc); |
| 1137 | extern void ceph_remove_capsnap(struct inode *inode, |
| 1138 | struct ceph_cap_snap *capsnap, |
| 1139 | bool *wake_ci, bool *wake_mdsc); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1140 | extern void ceph_flush_snaps(struct ceph_inode_info *ci, |
| 1141 | struct ceph_mds_session **psession); |
| 1142 | extern bool __ceph_should_report_size(struct ceph_inode_info *ci); |
| 1143 | extern void ceph_check_caps(struct ceph_inode_info *ci, int flags, |
| 1144 | struct ceph_mds_session *session); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 1145 | extern unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1146 | extern void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc); |
| 1147 | extern int ceph_drop_caps_for_unlink(struct inode *inode); |
| 1148 | extern int ceph_encode_inode_release(void **p, struct inode *inode, |
| 1149 | int mds, int drop, int unless, int force); |
| 1150 | extern int ceph_encode_dentry_release(void **p, struct dentry *dn, |
| 1151 | struct inode *dir, |
| 1152 | int mds, int drop, int unless); |
| 1153 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1154 | extern int ceph_get_caps(struct file *filp, int need, int want, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1155 | loff_t endoff, int *got, struct page **pinned_page); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1156 | extern int ceph_try_get_caps(struct inode *inode, |
| 1157 | int need, int want, bool nonblock, int *got); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1158 | |
| 1159 | /* for counting open files by mode */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1160 | extern void ceph_get_fmode(struct ceph_inode_info *ci, int mode, int count); |
| 1161 | extern void ceph_put_fmode(struct ceph_inode_info *ci, int mode, int count); |
| 1162 | extern void __ceph_touch_fmode(struct ceph_inode_info *ci, |
| 1163 | struct ceph_mds_client *mdsc, int fmode); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1164 | |
| 1165 | /* addr.c */ |
| 1166 | extern const struct address_space_operations ceph_aops; |
| 1167 | extern int ceph_mmap(struct file *file, struct vm_area_struct *vma); |
| 1168 | extern int ceph_uninline_data(struct file *filp, struct page *locked_page); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1169 | extern int ceph_pool_perm_check(struct inode *inode, int need); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1170 | extern void ceph_pool_perm_destroy(struct ceph_mds_client* mdsc); |
| 1171 | |
| 1172 | /* file.c */ |
| 1173 | extern const struct file_operations ceph_file_fops; |
| 1174 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1175 | extern int ceph_renew_caps(struct inode *inode, int fmode); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1176 | extern int ceph_open(struct inode *inode, struct file *file); |
| 1177 | extern int ceph_atomic_open(struct inode *dir, struct dentry *dentry, |
| 1178 | struct file *file, unsigned flags, umode_t mode); |
| 1179 | extern int ceph_release(struct inode *inode, struct file *filp); |
| 1180 | extern void ceph_fill_inline_data(struct inode *inode, struct page *locked_page, |
| 1181 | char *data, size_t len); |
| 1182 | |
| 1183 | /* dir.c */ |
| 1184 | extern const struct file_operations ceph_dir_fops; |
| 1185 | extern const struct file_operations ceph_snapdir_fops; |
| 1186 | extern const struct inode_operations ceph_dir_iops; |
| 1187 | extern const struct inode_operations ceph_snapdir_iops; |
| 1188 | extern const struct dentry_operations ceph_dentry_ops; |
| 1189 | |
| 1190 | extern loff_t ceph_make_fpos(unsigned high, unsigned off, bool hash_order); |
| 1191 | extern int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry); |
| 1192 | extern int ceph_handle_snapdir(struct ceph_mds_request *req, |
| 1193 | struct dentry *dentry, int err); |
| 1194 | extern struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, |
| 1195 | struct dentry *dentry, int err); |
| 1196 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1197 | extern void __ceph_dentry_lease_touch(struct ceph_dentry_info *di); |
| 1198 | extern void __ceph_dentry_dir_lease_touch(struct ceph_dentry_info *di); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1199 | extern void ceph_invalidate_dentry_lease(struct dentry *dentry); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1200 | extern int ceph_trim_dentries(struct ceph_mds_client *mdsc); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1201 | extern unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn); |
| 1202 | extern void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl); |
| 1203 | |
| 1204 | /* ioctl.c */ |
| 1205 | extern long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg); |
| 1206 | |
| 1207 | /* export.c */ |
| 1208 | extern const struct export_operations ceph_export_ops; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1209 | struct inode *ceph_lookup_inode(struct super_block *sb, u64 ino); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1210 | |
| 1211 | /* locks.c */ |
| 1212 | extern __init void ceph_flock_init(void); |
| 1213 | extern int ceph_lock(struct file *file, int cmd, struct file_lock *fl); |
| 1214 | extern int ceph_flock(struct file *file, int cmd, struct file_lock *fl); |
| 1215 | extern void ceph_count_locks(struct inode *inode, int *p_num, int *f_num); |
| 1216 | extern int ceph_encode_locks_to_buffer(struct inode *inode, |
| 1217 | struct ceph_filelock *flocks, |
| 1218 | int num_fcntl_locks, |
| 1219 | int num_flock_locks); |
| 1220 | extern int ceph_locks_to_pagelist(struct ceph_filelock *flocks, |
| 1221 | struct ceph_pagelist *pagelist, |
| 1222 | int num_fcntl_locks, int num_flock_locks); |
| 1223 | |
| 1224 | /* debugfs.c */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1225 | extern void ceph_fs_debugfs_init(struct ceph_fs_client *client); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1226 | extern void ceph_fs_debugfs_cleanup(struct ceph_fs_client *client); |
| 1227 | |
| 1228 | /* quota.c */ |
| 1229 | static inline bool __ceph_has_any_quota(struct ceph_inode_info *ci) |
| 1230 | { |
| 1231 | return ci->i_max_files || ci->i_max_bytes; |
| 1232 | } |
| 1233 | |
| 1234 | extern void ceph_adjust_quota_realms_count(struct inode *inode, bool inc); |
| 1235 | |
| 1236 | static inline void __ceph_update_quota(struct ceph_inode_info *ci, |
| 1237 | u64 max_bytes, u64 max_files) |
| 1238 | { |
| 1239 | bool had_quota, has_quota; |
| 1240 | had_quota = __ceph_has_any_quota(ci); |
| 1241 | ci->i_max_bytes = max_bytes; |
| 1242 | ci->i_max_files = max_files; |
| 1243 | has_quota = __ceph_has_any_quota(ci); |
| 1244 | |
| 1245 | if (had_quota != has_quota) |
| 1246 | ceph_adjust_quota_realms_count(&ci->vfs_inode, has_quota); |
| 1247 | } |
| 1248 | |
| 1249 | extern void ceph_handle_quota(struct ceph_mds_client *mdsc, |
| 1250 | struct ceph_mds_session *session, |
| 1251 | struct ceph_msg *msg); |
| 1252 | extern bool ceph_quota_is_max_files_exceeded(struct inode *inode); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1253 | extern bool ceph_quota_is_max_bytes_exceeded(struct inode *inode, |
| 1254 | loff_t newlen); |
| 1255 | extern bool ceph_quota_is_max_bytes_approaching(struct inode *inode, |
| 1256 | loff_t newlen); |
| 1257 | extern bool ceph_quota_update_statfs(struct ceph_fs_client *fsc, |
| 1258 | struct kstatfs *buf); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1259 | extern int ceph_quota_check_rename(struct ceph_mds_client *mdsc, |
| 1260 | struct inode *old, struct inode *new); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1261 | extern void ceph_cleanup_quotarealms_inodes(struct ceph_mds_client *mdsc); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1262 | |
| 1263 | #endif /* _FS_CEPH_SUPER_H */ |