Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * linux/fs/nfs/file.c |
| 4 | * |
| 5 | * Copyright (C) 1992 Rick Sladkey |
| 6 | */ |
| 7 | #include <linux/fs.h> |
| 8 | #include <linux/file.h> |
| 9 | #include <linux/falloc.h> |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 10 | #include <linux/mount.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 11 | #include <linux/nfs_fs.h> |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 12 | #include <linux/nfs_ssc.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 13 | #include "delegation.h" |
| 14 | #include "internal.h" |
| 15 | #include "iostat.h" |
| 16 | #include "fscache.h" |
| 17 | #include "pnfs.h" |
| 18 | |
| 19 | #include "nfstrace.h" |
| 20 | |
| 21 | #ifdef CONFIG_NFS_V4_2 |
| 22 | #include "nfs42.h" |
| 23 | #endif |
| 24 | |
| 25 | #define NFSDBG_FACILITY NFSDBG_FILE |
| 26 | |
| 27 | static int |
| 28 | nfs4_file_open(struct inode *inode, struct file *filp) |
| 29 | { |
| 30 | struct nfs_open_context *ctx; |
| 31 | struct dentry *dentry = file_dentry(filp); |
| 32 | struct dentry *parent = NULL; |
| 33 | struct inode *dir; |
| 34 | unsigned openflags = filp->f_flags; |
| 35 | struct iattr attr; |
| 36 | int err; |
| 37 | |
| 38 | /* |
| 39 | * If no cached dentry exists or if it's negative, NFSv4 handled the |
| 40 | * opens in ->lookup() or ->create(). |
| 41 | * |
| 42 | * We only get this far for a cached positive dentry. We skipped |
| 43 | * revalidation, so handle it here by dropping the dentry and returning |
| 44 | * -EOPENSTALE. The VFS will retry the lookup/create/open. |
| 45 | */ |
| 46 | |
| 47 | dprintk("NFS: open file(%pd2)\n", dentry); |
| 48 | |
| 49 | err = nfs_check_flags(openflags); |
| 50 | if (err) |
| 51 | return err; |
| 52 | |
| 53 | if ((openflags & O_ACCMODE) == 3) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 54 | return nfs_open(inode, filp); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 55 | |
| 56 | /* We can't create new files here */ |
| 57 | openflags &= ~(O_CREAT|O_EXCL); |
| 58 | |
| 59 | parent = dget_parent(dentry); |
| 60 | dir = d_inode(parent); |
| 61 | |
| 62 | ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode, filp); |
| 63 | err = PTR_ERR(ctx); |
| 64 | if (IS_ERR(ctx)) |
| 65 | goto out; |
| 66 | |
| 67 | attr.ia_valid = ATTR_OPEN; |
| 68 | if (openflags & O_TRUNC) { |
| 69 | attr.ia_valid |= ATTR_SIZE; |
| 70 | attr.ia_size = 0; |
| 71 | filemap_write_and_wait(inode->i_mapping); |
| 72 | } |
| 73 | |
| 74 | inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr, NULL); |
| 75 | if (IS_ERR(inode)) { |
| 76 | err = PTR_ERR(inode); |
| 77 | switch (err) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 78 | default: |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 79 | goto out_put_ctx; |
| 80 | case -ENOENT: |
| 81 | case -ESTALE: |
| 82 | case -EISDIR: |
| 83 | case -ENOTDIR: |
| 84 | case -ELOOP: |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 85 | goto out_drop; |
| 86 | } |
| 87 | } |
| 88 | if (inode != d_inode(dentry)) |
| 89 | goto out_drop; |
| 90 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 91 | nfs_file_set_open_context(filp, ctx); |
| 92 | nfs_fscache_open_file(inode, filp); |
| 93 | err = 0; |
| 94 | |
| 95 | out_put_ctx: |
| 96 | put_nfs_open_context(ctx); |
| 97 | out: |
| 98 | dput(parent); |
| 99 | return err; |
| 100 | |
| 101 | out_drop: |
| 102 | d_drop(dentry); |
| 103 | err = -EOPENSTALE; |
| 104 | goto out_put_ctx; |
| 105 | } |
| 106 | |
| 107 | /* |
| 108 | * Flush all dirty pages, and check for write errors. |
| 109 | */ |
| 110 | static int |
| 111 | nfs4_file_flush(struct file *file, fl_owner_t id) |
| 112 | { |
| 113 | struct inode *inode = file_inode(file); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 114 | errseq_t since; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 115 | |
| 116 | dprintk("NFS: flush(%pD2)\n", file); |
| 117 | |
| 118 | nfs_inc_stats(inode, NFSIOS_VFSFLUSH); |
| 119 | if ((file->f_mode & FMODE_WRITE) == 0) |
| 120 | return 0; |
| 121 | |
| 122 | /* |
| 123 | * If we're holding a write delegation, then check if we're required |
| 124 | * to flush the i/o on close. If not, then just start the i/o now. |
| 125 | */ |
| 126 | if (!nfs4_delegation_flush_on_close(inode)) |
| 127 | return filemap_fdatawrite(file->f_mapping); |
| 128 | |
| 129 | /* Flush writes to the server and return any errors */ |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 130 | since = filemap_sample_wb_err(file->f_mapping); |
| 131 | nfs_wb_all(inode); |
| 132 | return filemap_check_wb_err(file->f_mapping, since); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 133 | } |
| 134 | |
| 135 | #ifdef CONFIG_NFS_V4_2 |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 136 | static ssize_t __nfs4_copy_file_range(struct file *file_in, loff_t pos_in, |
| 137 | struct file *file_out, loff_t pos_out, |
| 138 | size_t count, unsigned int flags) |
| 139 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 140 | struct nfs42_copy_notify_res *cn_resp = NULL; |
| 141 | struct nl4_server *nss = NULL; |
| 142 | nfs4_stateid *cnrs = NULL; |
| 143 | ssize_t ret; |
| 144 | bool sync = false; |
| 145 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 146 | /* Only offload copy if superblock is the same */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 147 | if (file_in->f_op != &nfs4_file_operations) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 148 | return -EXDEV; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 149 | if (!nfs_server_capable(file_inode(file_out), NFS_CAP_COPY) || |
| 150 | !nfs_server_capable(file_inode(file_in), NFS_CAP_COPY)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 151 | return -EOPNOTSUPP; |
| 152 | if (file_inode(file_in) == file_inode(file_out)) |
| 153 | return -EOPNOTSUPP; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 154 | /* if the copy size if smaller than 2 RPC payloads, make it |
| 155 | * synchronous |
| 156 | */ |
| 157 | if (count <= 2 * NFS_SERVER(file_inode(file_in))->rsize) |
| 158 | sync = true; |
| 159 | retry: |
| 160 | if (!nfs42_files_from_same_server(file_in, file_out)) { |
| 161 | /* for inter copy, if copy size if smaller than 12 RPC |
| 162 | * payloads, fallback to traditional copy. There are |
| 163 | * 14 RPCs during an NFSv4.x mount between source/dest |
| 164 | * servers. |
| 165 | */ |
| 166 | if (sync || |
| 167 | count <= 14 * NFS_SERVER(file_inode(file_in))->rsize) |
| 168 | return -EOPNOTSUPP; |
| 169 | cn_resp = kzalloc(sizeof(struct nfs42_copy_notify_res), |
| 170 | GFP_NOFS); |
| 171 | if (unlikely(cn_resp == NULL)) |
| 172 | return -ENOMEM; |
| 173 | |
| 174 | ret = nfs42_proc_copy_notify(file_in, file_out, cn_resp); |
| 175 | if (ret) { |
| 176 | ret = -EOPNOTSUPP; |
| 177 | goto out; |
| 178 | } |
| 179 | nss = &cn_resp->cnr_src; |
| 180 | cnrs = &cn_resp->cnr_stateid; |
| 181 | } |
| 182 | ret = nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count, |
| 183 | nss, cnrs, sync); |
| 184 | out: |
| 185 | if (!nfs42_files_from_same_server(file_in, file_out)) |
| 186 | kfree(cn_resp); |
| 187 | if (ret == -EAGAIN) |
| 188 | goto retry; |
| 189 | return ret; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 190 | } |
| 191 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 192 | static ssize_t nfs4_copy_file_range(struct file *file_in, loff_t pos_in, |
| 193 | struct file *file_out, loff_t pos_out, |
| 194 | size_t count, unsigned int flags) |
| 195 | { |
| 196 | ssize_t ret; |
| 197 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 198 | ret = __nfs4_copy_file_range(file_in, pos_in, file_out, pos_out, count, |
| 199 | flags); |
| 200 | if (ret == -EOPNOTSUPP || ret == -EXDEV) |
| 201 | ret = generic_copy_file_range(file_in, pos_in, file_out, |
| 202 | pos_out, count, flags); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 203 | return ret; |
| 204 | } |
| 205 | |
| 206 | static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence) |
| 207 | { |
| 208 | loff_t ret; |
| 209 | |
| 210 | switch (whence) { |
| 211 | case SEEK_HOLE: |
| 212 | case SEEK_DATA: |
| 213 | ret = nfs42_proc_llseek(filep, offset, whence); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 214 | if (ret != -EOPNOTSUPP) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 215 | return ret; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 216 | fallthrough; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 217 | default: |
| 218 | return nfs_file_llseek(filep, offset, whence); |
| 219 | } |
| 220 | } |
| 221 | |
| 222 | static long nfs42_fallocate(struct file *filep, int mode, loff_t offset, loff_t len) |
| 223 | { |
| 224 | struct inode *inode = file_inode(filep); |
| 225 | long ret; |
| 226 | |
| 227 | if (!S_ISREG(inode->i_mode)) |
| 228 | return -EOPNOTSUPP; |
| 229 | |
| 230 | if ((mode != 0) && (mode != (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE))) |
| 231 | return -EOPNOTSUPP; |
| 232 | |
| 233 | ret = inode_newsize_ok(inode, offset + len); |
| 234 | if (ret < 0) |
| 235 | return ret; |
| 236 | |
| 237 | if (mode & FALLOC_FL_PUNCH_HOLE) |
| 238 | return nfs42_proc_deallocate(filep, offset, len); |
| 239 | return nfs42_proc_allocate(filep, offset, len); |
| 240 | } |
| 241 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 242 | static loff_t nfs42_remap_file_range(struct file *src_file, loff_t src_off, |
| 243 | struct file *dst_file, loff_t dst_off, loff_t count, |
| 244 | unsigned int remap_flags) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 245 | { |
| 246 | struct inode *dst_inode = file_inode(dst_file); |
| 247 | struct nfs_server *server = NFS_SERVER(dst_inode); |
| 248 | struct inode *src_inode = file_inode(src_file); |
| 249 | unsigned int bs = server->clone_blksize; |
| 250 | bool same_inode = false; |
| 251 | int ret; |
| 252 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 253 | /* NFS does not support deduplication. */ |
| 254 | if (remap_flags & REMAP_FILE_DEDUP) |
| 255 | return -EOPNOTSUPP; |
| 256 | |
| 257 | if (remap_flags & ~REMAP_FILE_ADVISORY) |
| 258 | return -EINVAL; |
| 259 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 260 | if (IS_SWAPFILE(dst_inode) || IS_SWAPFILE(src_inode)) |
| 261 | return -ETXTBSY; |
| 262 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 263 | /* check alignment w.r.t. clone_blksize */ |
| 264 | ret = -EINVAL; |
| 265 | if (bs) { |
| 266 | if (!IS_ALIGNED(src_off, bs) || !IS_ALIGNED(dst_off, bs)) |
| 267 | goto out; |
| 268 | if (!IS_ALIGNED(count, bs) && i_size_read(src_inode) != (src_off + count)) |
| 269 | goto out; |
| 270 | } |
| 271 | |
| 272 | if (src_inode == dst_inode) |
| 273 | same_inode = true; |
| 274 | |
| 275 | /* XXX: do we lock at all? what if server needs CB_RECALL_LAYOUT? */ |
| 276 | if (same_inode) { |
| 277 | inode_lock(src_inode); |
| 278 | } else if (dst_inode < src_inode) { |
| 279 | inode_lock_nested(dst_inode, I_MUTEX_PARENT); |
| 280 | inode_lock_nested(src_inode, I_MUTEX_CHILD); |
| 281 | } else { |
| 282 | inode_lock_nested(src_inode, I_MUTEX_PARENT); |
| 283 | inode_lock_nested(dst_inode, I_MUTEX_CHILD); |
| 284 | } |
| 285 | |
| 286 | /* flush all pending writes on both src and dst so that server |
| 287 | * has the latest data */ |
| 288 | ret = nfs_sync_inode(src_inode); |
| 289 | if (ret) |
| 290 | goto out_unlock; |
| 291 | ret = nfs_sync_inode(dst_inode); |
| 292 | if (ret) |
| 293 | goto out_unlock; |
| 294 | |
| 295 | ret = nfs42_proc_clone(src_file, dst_file, src_off, dst_off, count); |
| 296 | |
| 297 | /* truncate inode page cache of the dst range so that future reads can fetch |
| 298 | * new data from server */ |
| 299 | if (!ret) |
| 300 | truncate_inode_pages_range(&dst_inode->i_data, dst_off, dst_off + count - 1); |
| 301 | |
| 302 | out_unlock: |
| 303 | if (same_inode) { |
| 304 | inode_unlock(src_inode); |
| 305 | } else if (dst_inode < src_inode) { |
| 306 | inode_unlock(src_inode); |
| 307 | inode_unlock(dst_inode); |
| 308 | } else { |
| 309 | inode_unlock(dst_inode); |
| 310 | inode_unlock(src_inode); |
| 311 | } |
| 312 | out: |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 313 | return ret < 0 ? ret : count; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 314 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 315 | |
| 316 | static int read_name_gen = 1; |
| 317 | #define SSC_READ_NAME_BODY "ssc_read_%d" |
| 318 | |
| 319 | static struct file *__nfs42_ssc_open(struct vfsmount *ss_mnt, |
| 320 | struct nfs_fh *src_fh, nfs4_stateid *stateid) |
| 321 | { |
| 322 | struct nfs_fattr fattr; |
| 323 | struct file *filep, *res; |
| 324 | struct nfs_server *server; |
| 325 | struct inode *r_ino = NULL; |
| 326 | struct nfs_open_context *ctx; |
| 327 | struct nfs4_state_owner *sp; |
| 328 | char *read_name = NULL; |
| 329 | int len, status = 0; |
| 330 | |
| 331 | server = NFS_SERVER(ss_mnt->mnt_root->d_inode); |
| 332 | |
| 333 | nfs_fattr_init(&fattr); |
| 334 | |
| 335 | status = nfs4_proc_getattr(server, src_fh, &fattr, NULL, NULL); |
| 336 | if (status < 0) { |
| 337 | res = ERR_PTR(status); |
| 338 | goto out; |
| 339 | } |
| 340 | |
| 341 | res = ERR_PTR(-ENOMEM); |
| 342 | len = strlen(SSC_READ_NAME_BODY) + 16; |
| 343 | read_name = kzalloc(len, GFP_NOFS); |
| 344 | if (read_name == NULL) |
| 345 | goto out; |
| 346 | snprintf(read_name, len, SSC_READ_NAME_BODY, read_name_gen++); |
| 347 | |
| 348 | r_ino = nfs_fhget(ss_mnt->mnt_root->d_inode->i_sb, src_fh, &fattr, |
| 349 | NULL); |
| 350 | if (IS_ERR(r_ino)) { |
| 351 | res = ERR_CAST(r_ino); |
| 352 | goto out_free_name; |
| 353 | } |
| 354 | |
| 355 | filep = alloc_file_pseudo(r_ino, ss_mnt, read_name, FMODE_READ, |
| 356 | r_ino->i_fop); |
| 357 | if (IS_ERR(filep)) { |
| 358 | res = ERR_CAST(filep); |
| 359 | goto out_free_name; |
| 360 | } |
| 361 | filep->f_mode |= FMODE_READ; |
| 362 | |
| 363 | ctx = alloc_nfs_open_context(filep->f_path.dentry, filep->f_mode, |
| 364 | filep); |
| 365 | if (IS_ERR(ctx)) { |
| 366 | res = ERR_CAST(ctx); |
| 367 | goto out_filep; |
| 368 | } |
| 369 | |
| 370 | res = ERR_PTR(-EINVAL); |
| 371 | sp = nfs4_get_state_owner(server, ctx->cred, GFP_KERNEL); |
| 372 | if (sp == NULL) |
| 373 | goto out_ctx; |
| 374 | |
| 375 | ctx->state = nfs4_get_open_state(r_ino, sp); |
| 376 | if (ctx->state == NULL) |
| 377 | goto out_stateowner; |
| 378 | |
| 379 | set_bit(NFS_SRV_SSC_COPY_STATE, &ctx->state->flags); |
| 380 | memcpy(&ctx->state->open_stateid.other, &stateid->other, |
| 381 | NFS4_STATEID_OTHER_SIZE); |
| 382 | update_open_stateid(ctx->state, stateid, NULL, filep->f_mode); |
| 383 | set_bit(NFS_OPEN_STATE, &ctx->state->flags); |
| 384 | |
| 385 | nfs_file_set_open_context(filep, ctx); |
| 386 | put_nfs_open_context(ctx); |
| 387 | |
| 388 | file_ra_state_init(&filep->f_ra, filep->f_mapping->host->i_mapping); |
| 389 | res = filep; |
| 390 | out_free_name: |
| 391 | kfree(read_name); |
| 392 | out: |
| 393 | return res; |
| 394 | out_stateowner: |
| 395 | nfs4_put_state_owner(sp); |
| 396 | out_ctx: |
| 397 | put_nfs_open_context(ctx); |
| 398 | out_filep: |
| 399 | fput(filep); |
| 400 | goto out_free_name; |
| 401 | } |
| 402 | |
| 403 | static void __nfs42_ssc_close(struct file *filep) |
| 404 | { |
| 405 | struct nfs_open_context *ctx = nfs_file_open_context(filep); |
| 406 | |
| 407 | ctx->state->flags = 0; |
| 408 | } |
| 409 | |
| 410 | static const struct nfs4_ssc_client_ops nfs4_ssc_clnt_ops_tbl = { |
| 411 | .sco_open = __nfs42_ssc_open, |
| 412 | .sco_close = __nfs42_ssc_close, |
| 413 | }; |
| 414 | |
| 415 | /** |
| 416 | * nfs42_ssc_register_ops - Wrapper to register NFS_V4 ops in nfs_common |
| 417 | * |
| 418 | * Return values: |
| 419 | * None |
| 420 | */ |
| 421 | void nfs42_ssc_register_ops(void) |
| 422 | { |
| 423 | nfs42_ssc_register(&nfs4_ssc_clnt_ops_tbl); |
| 424 | } |
| 425 | |
| 426 | /** |
| 427 | * nfs42_ssc_unregister_ops - wrapper to un-register NFS_V4 ops in nfs_common |
| 428 | * |
| 429 | * Return values: |
| 430 | * None. |
| 431 | */ |
| 432 | void nfs42_ssc_unregister_ops(void) |
| 433 | { |
| 434 | nfs42_ssc_unregister(&nfs4_ssc_clnt_ops_tbl); |
| 435 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 436 | #endif /* CONFIG_NFS_V4_2 */ |
| 437 | |
| 438 | const struct file_operations nfs4_file_operations = { |
| 439 | .read_iter = nfs_file_read, |
| 440 | .write_iter = nfs_file_write, |
| 441 | .mmap = nfs_file_mmap, |
| 442 | .open = nfs4_file_open, |
| 443 | .flush = nfs4_file_flush, |
| 444 | .release = nfs_file_release, |
| 445 | .fsync = nfs_file_fsync, |
| 446 | .lock = nfs_lock, |
| 447 | .flock = nfs_flock, |
| 448 | .splice_read = generic_file_splice_read, |
| 449 | .splice_write = iter_file_splice_write, |
| 450 | .check_flags = nfs_check_flags, |
| 451 | .setlease = simple_nosetlease, |
| 452 | #ifdef CONFIG_NFS_V4_2 |
| 453 | .copy_file_range = nfs4_copy_file_range, |
| 454 | .llseek = nfs4_file_llseek, |
| 455 | .fallocate = nfs42_fallocate, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 456 | .remap_file_range = nfs42_remap_file_range, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 457 | #else |
| 458 | .llseek = nfs_file_llseek, |
| 459 | #endif |
| 460 | }; |