blob: 5ecaf7b6b0fa1d2f11aa965ab84003929f6588ca [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * fs/nfs/nfs4proc.c
3 *
4 * Client-side procedure declarations for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <linux/mm.h>
39#include <linux/delay.h>
40#include <linux/errno.h>
41#include <linux/string.h>
42#include <linux/ratelimit.h>
43#include <linux/printk.h>
44#include <linux/slab.h>
45#include <linux/sunrpc/clnt.h>
46#include <linux/nfs.h>
47#include <linux/nfs4.h>
48#include <linux/nfs_fs.h>
49#include <linux/nfs_page.h>
50#include <linux/nfs_mount.h>
51#include <linux/namei.h>
52#include <linux/mount.h>
53#include <linux/module.h>
54#include <linux/xattr.h>
55#include <linux/utsname.h>
56#include <linux/freezer.h>
57#include <linux/iversion.h>
58
59#include "nfs4_fs.h"
60#include "delegation.h"
61#include "internal.h"
62#include "iostat.h"
63#include "callback.h"
64#include "pnfs.h"
65#include "netns.h"
66#include "nfs4idmap.h"
67#include "nfs4session.h"
68#include "fscache.h"
69
70#include "nfs4trace.h"
71
72#define NFSDBG_FACILITY NFSDBG_PROC
73
74#define NFS4_BITMASK_SZ 3
75
76#define NFS4_POLL_RETRY_MIN (HZ/10)
77#define NFS4_POLL_RETRY_MAX (15*HZ)
78
79/* file attributes which can be mapped to nfs attributes */
80#define NFS4_VALID_ATTRS (ATTR_MODE \
81 | ATTR_UID \
82 | ATTR_GID \
83 | ATTR_SIZE \
84 | ATTR_ATIME \
85 | ATTR_MTIME \
86 | ATTR_CTIME \
87 | ATTR_ATIME_SET \
88 | ATTR_MTIME_SET)
89
90struct nfs4_opendata;
91static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
92static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
93static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
94static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *label, struct inode *inode);
95static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct nfs4_label *label, struct inode *inode);
David Brazdil0f672f62019-12-10 10:32:29 +000096static int nfs4_do_setattr(struct inode *inode, const struct cred *cred,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000097 struct nfs_fattr *fattr, struct iattr *sattr,
98 struct nfs_open_context *ctx, struct nfs4_label *ilabel,
99 struct nfs4_label *olabel);
100#ifdef CONFIG_NFS_V4_1
101static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
David Brazdil0f672f62019-12-10 10:32:29 +0000102 const struct cred *cred,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000103 struct nfs4_slot *slot,
104 bool is_privileged);
105static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
David Brazdil0f672f62019-12-10 10:32:29 +0000106 const struct cred *);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000107static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *,
David Brazdil0f672f62019-12-10 10:32:29 +0000108 const struct cred *, bool);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000109#endif
110
111#ifdef CONFIG_NFS_V4_SECURITY_LABEL
112static inline struct nfs4_label *
113nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
114 struct iattr *sattr, struct nfs4_label *label)
115{
116 int err;
117
118 if (label == NULL)
119 return NULL;
120
121 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0)
122 return NULL;
123
124 err = security_dentry_init_security(dentry, sattr->ia_mode,
125 &dentry->d_name, (void **)&label->label, &label->len);
126 if (err == 0)
127 return label;
128
129 return NULL;
130}
131static inline void
132nfs4_label_release_security(struct nfs4_label *label)
133{
134 if (label)
135 security_release_secctx(label->label, label->len);
136}
137static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
138{
139 if (label)
140 return server->attr_bitmask;
141
142 return server->attr_bitmask_nl;
143}
144#else
145static inline struct nfs4_label *
146nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
147 struct iattr *sattr, struct nfs4_label *l)
148{ return NULL; }
149static inline void
150nfs4_label_release_security(struct nfs4_label *label)
151{ return; }
152static inline u32 *
153nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
154{ return server->attr_bitmask; }
155#endif
156
157/* Prevent leaks of NFSv4 errors into userland */
158static int nfs4_map_errors(int err)
159{
160 if (err >= -1000)
161 return err;
162 switch (err) {
163 case -NFS4ERR_RESOURCE:
164 case -NFS4ERR_LAYOUTTRYLATER:
165 case -NFS4ERR_RECALLCONFLICT:
166 return -EREMOTEIO;
167 case -NFS4ERR_WRONGSEC:
168 case -NFS4ERR_WRONG_CRED:
169 return -EPERM;
170 case -NFS4ERR_BADOWNER:
171 case -NFS4ERR_BADNAME:
172 return -EINVAL;
173 case -NFS4ERR_SHARE_DENIED:
174 return -EACCES;
175 case -NFS4ERR_MINOR_VERS_MISMATCH:
176 return -EPROTONOSUPPORT;
177 case -NFS4ERR_FILE_OPEN:
178 return -EBUSY;
179 default:
180 dprintk("%s could not handle NFSv4 error %d\n",
181 __func__, -err);
182 break;
183 }
184 return -EIO;
185}
186
187/*
188 * This is our standard bitmap for GETATTR requests.
189 */
190const u32 nfs4_fattr_bitmap[3] = {
191 FATTR4_WORD0_TYPE
192 | FATTR4_WORD0_CHANGE
193 | FATTR4_WORD0_SIZE
194 | FATTR4_WORD0_FSID
195 | FATTR4_WORD0_FILEID,
196 FATTR4_WORD1_MODE
197 | FATTR4_WORD1_NUMLINKS
198 | FATTR4_WORD1_OWNER
199 | FATTR4_WORD1_OWNER_GROUP
200 | FATTR4_WORD1_RAWDEV
201 | FATTR4_WORD1_SPACE_USED
202 | FATTR4_WORD1_TIME_ACCESS
203 | FATTR4_WORD1_TIME_METADATA
204 | FATTR4_WORD1_TIME_MODIFY
205 | FATTR4_WORD1_MOUNTED_ON_FILEID,
206#ifdef CONFIG_NFS_V4_SECURITY_LABEL
207 FATTR4_WORD2_SECURITY_LABEL
208#endif
209};
210
211static const u32 nfs4_pnfs_open_bitmap[3] = {
212 FATTR4_WORD0_TYPE
213 | FATTR4_WORD0_CHANGE
214 | FATTR4_WORD0_SIZE
215 | FATTR4_WORD0_FSID
216 | FATTR4_WORD0_FILEID,
217 FATTR4_WORD1_MODE
218 | FATTR4_WORD1_NUMLINKS
219 | FATTR4_WORD1_OWNER
220 | FATTR4_WORD1_OWNER_GROUP
221 | FATTR4_WORD1_RAWDEV
222 | FATTR4_WORD1_SPACE_USED
223 | FATTR4_WORD1_TIME_ACCESS
224 | FATTR4_WORD1_TIME_METADATA
225 | FATTR4_WORD1_TIME_MODIFY,
226 FATTR4_WORD2_MDSTHRESHOLD
227#ifdef CONFIG_NFS_V4_SECURITY_LABEL
228 | FATTR4_WORD2_SECURITY_LABEL
229#endif
230};
231
232static const u32 nfs4_open_noattr_bitmap[3] = {
233 FATTR4_WORD0_TYPE
234 | FATTR4_WORD0_FILEID,
235};
236
237const u32 nfs4_statfs_bitmap[3] = {
238 FATTR4_WORD0_FILES_AVAIL
239 | FATTR4_WORD0_FILES_FREE
240 | FATTR4_WORD0_FILES_TOTAL,
241 FATTR4_WORD1_SPACE_AVAIL
242 | FATTR4_WORD1_SPACE_FREE
243 | FATTR4_WORD1_SPACE_TOTAL
244};
245
246const u32 nfs4_pathconf_bitmap[3] = {
247 FATTR4_WORD0_MAXLINK
248 | FATTR4_WORD0_MAXNAME,
249 0
250};
251
252const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
253 | FATTR4_WORD0_MAXREAD
254 | FATTR4_WORD0_MAXWRITE
255 | FATTR4_WORD0_LEASE_TIME,
256 FATTR4_WORD1_TIME_DELTA
257 | FATTR4_WORD1_FS_LAYOUT_TYPES,
258 FATTR4_WORD2_LAYOUT_BLKSIZE
259 | FATTR4_WORD2_CLONE_BLKSIZE
260};
261
262const u32 nfs4_fs_locations_bitmap[3] = {
263 FATTR4_WORD0_CHANGE
264 | FATTR4_WORD0_SIZE
265 | FATTR4_WORD0_FSID
266 | FATTR4_WORD0_FILEID
267 | FATTR4_WORD0_FS_LOCATIONS,
268 FATTR4_WORD1_OWNER
269 | FATTR4_WORD1_OWNER_GROUP
270 | FATTR4_WORD1_RAWDEV
271 | FATTR4_WORD1_SPACE_USED
272 | FATTR4_WORD1_TIME_ACCESS
273 | FATTR4_WORD1_TIME_METADATA
274 | FATTR4_WORD1_TIME_MODIFY
275 | FATTR4_WORD1_MOUNTED_ON_FILEID,
276};
277
278static void nfs4_bitmap_copy_adjust(__u32 *dst, const __u32 *src,
279 struct inode *inode)
280{
281 unsigned long cache_validity;
282
283 memcpy(dst, src, NFS4_BITMASK_SZ*sizeof(*dst));
284 if (!inode || !nfs4_have_delegation(inode, FMODE_READ))
285 return;
286
287 cache_validity = READ_ONCE(NFS_I(inode)->cache_validity);
288 if (!(cache_validity & NFS_INO_REVAL_FORCED))
289 cache_validity &= ~(NFS_INO_INVALID_CHANGE
290 | NFS_INO_INVALID_SIZE);
291
292 if (!(cache_validity & NFS_INO_INVALID_SIZE))
293 dst[0] &= ~FATTR4_WORD0_SIZE;
294
295 if (!(cache_validity & NFS_INO_INVALID_CHANGE))
296 dst[0] &= ~FATTR4_WORD0_CHANGE;
297}
298
299static void nfs4_bitmap_copy_adjust_setattr(__u32 *dst,
300 const __u32 *src, struct inode *inode)
301{
302 nfs4_bitmap_copy_adjust(dst, src, inode);
303}
304
305static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
306 struct nfs4_readdir_arg *readdir)
307{
308 unsigned int attrs = FATTR4_WORD0_FILEID | FATTR4_WORD0_TYPE;
309 __be32 *start, *p;
310
311 if (cookie > 2) {
312 readdir->cookie = cookie;
313 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
314 return;
315 }
316
317 readdir->cookie = 0;
318 memset(&readdir->verifier, 0, sizeof(readdir->verifier));
319 if (cookie == 2)
320 return;
321
322 /*
323 * NFSv4 servers do not return entries for '.' and '..'
324 * Therefore, we fake these entries here. We let '.'
325 * have cookie 0 and '..' have cookie 1. Note that
326 * when talking to the server, we always send cookie 0
327 * instead of 1 or 2.
328 */
329 start = p = kmap_atomic(*readdir->pages);
330
331 if (cookie == 0) {
332 *p++ = xdr_one; /* next */
333 *p++ = xdr_zero; /* cookie, first word */
334 *p++ = xdr_one; /* cookie, second word */
335 *p++ = xdr_one; /* entry len */
336 memcpy(p, ".\0\0\0", 4); /* entry */
337 p++;
338 *p++ = xdr_one; /* bitmap length */
339 *p++ = htonl(attrs); /* bitmap */
340 *p++ = htonl(12); /* attribute buffer length */
341 *p++ = htonl(NF4DIR);
342 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry)));
343 }
344
345 *p++ = xdr_one; /* next */
346 *p++ = xdr_zero; /* cookie, first word */
347 *p++ = xdr_two; /* cookie, second word */
348 *p++ = xdr_two; /* entry len */
349 memcpy(p, "..\0\0", 4); /* entry */
350 p++;
351 *p++ = xdr_one; /* bitmap length */
352 *p++ = htonl(attrs); /* bitmap */
353 *p++ = htonl(12); /* attribute buffer length */
354 *p++ = htonl(NF4DIR);
355 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent)));
356
357 readdir->pgbase = (char *)p - (char *)start;
358 readdir->count -= readdir->pgbase;
359 kunmap_atomic(start);
360}
361
362static void nfs4_test_and_free_stateid(struct nfs_server *server,
363 nfs4_stateid *stateid,
David Brazdil0f672f62019-12-10 10:32:29 +0000364 const struct cred *cred)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000365{
366 const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops;
367
368 ops->test_and_free_expired(server, stateid, cred);
369}
370
371static void __nfs4_free_revoked_stateid(struct nfs_server *server,
372 nfs4_stateid *stateid,
David Brazdil0f672f62019-12-10 10:32:29 +0000373 const struct cred *cred)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000374{
375 stateid->type = NFS4_REVOKED_STATEID_TYPE;
376 nfs4_test_and_free_stateid(server, stateid, cred);
377}
378
379static void nfs4_free_revoked_stateid(struct nfs_server *server,
380 const nfs4_stateid *stateid,
David Brazdil0f672f62019-12-10 10:32:29 +0000381 const struct cred *cred)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000382{
383 nfs4_stateid tmp;
384
385 nfs4_stateid_copy(&tmp, stateid);
386 __nfs4_free_revoked_stateid(server, &tmp, cred);
387}
388
389static long nfs4_update_delay(long *timeout)
390{
391 long ret;
392 if (!timeout)
393 return NFS4_POLL_RETRY_MAX;
394 if (*timeout <= 0)
395 *timeout = NFS4_POLL_RETRY_MIN;
396 if (*timeout > NFS4_POLL_RETRY_MAX)
397 *timeout = NFS4_POLL_RETRY_MAX;
398 ret = *timeout;
399 *timeout <<= 1;
400 return ret;
401}
402
David Brazdil0f672f62019-12-10 10:32:29 +0000403static int nfs4_delay_killable(long *timeout)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000404{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000405 might_sleep();
406
407 freezable_schedule_timeout_killable_unsafe(
408 nfs4_update_delay(timeout));
David Brazdil0f672f62019-12-10 10:32:29 +0000409 if (!__fatal_signal_pending(current))
410 return 0;
411 return -EINTR;
412}
413
414static int nfs4_delay_interruptible(long *timeout)
415{
416 might_sleep();
417
418 freezable_schedule_timeout_interruptible(nfs4_update_delay(timeout));
419 if (!signal_pending(current))
420 return 0;
421 return __fatal_signal_pending(current) ? -EINTR :-ERESTARTSYS;
422}
423
424static int nfs4_delay(long *timeout, bool interruptible)
425{
426 if (interruptible)
427 return nfs4_delay_interruptible(timeout);
428 return nfs4_delay_killable(timeout);
429}
430
431static const nfs4_stateid *
432nfs4_recoverable_stateid(const nfs4_stateid *stateid)
433{
434 if (!stateid)
435 return NULL;
436 switch (stateid->type) {
437 case NFS4_OPEN_STATEID_TYPE:
438 case NFS4_LOCK_STATEID_TYPE:
439 case NFS4_DELEGATION_STATEID_TYPE:
440 return stateid;
441 default:
442 break;
443 }
444 return NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000445}
446
447/* This is the error handling routine for processes that are allowed
448 * to sleep.
449 */
450static int nfs4_do_handle_exception(struct nfs_server *server,
451 int errorcode, struct nfs4_exception *exception)
452{
453 struct nfs_client *clp = server->nfs_client;
454 struct nfs4_state *state = exception->state;
David Brazdil0f672f62019-12-10 10:32:29 +0000455 const nfs4_stateid *stateid;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000456 struct inode *inode = exception->inode;
457 int ret = errorcode;
458
459 exception->delay = 0;
460 exception->recovering = 0;
461 exception->retry = 0;
462
David Brazdil0f672f62019-12-10 10:32:29 +0000463 stateid = nfs4_recoverable_stateid(exception->stateid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000464 if (stateid == NULL && state != NULL)
David Brazdil0f672f62019-12-10 10:32:29 +0000465 stateid = nfs4_recoverable_stateid(&state->stateid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000466
467 switch(errorcode) {
468 case 0:
469 return 0;
470 case -NFS4ERR_BADHANDLE:
471 case -ESTALE:
472 if (inode != NULL && S_ISREG(inode->i_mode))
473 pnfs_destroy_layout(NFS_I(inode));
474 break;
475 case -NFS4ERR_DELEG_REVOKED:
476 case -NFS4ERR_ADMIN_REVOKED:
477 case -NFS4ERR_EXPIRED:
478 case -NFS4ERR_BAD_STATEID:
479 if (inode != NULL && stateid != NULL) {
480 nfs_inode_find_state_and_recover(inode,
481 stateid);
482 goto wait_on_recovery;
483 }
484 /* Fall through */
485 case -NFS4ERR_OPENMODE:
486 if (inode) {
487 int err;
488
489 err = nfs_async_inode_return_delegation(inode,
490 stateid);
491 if (err == 0)
492 goto wait_on_recovery;
493 if (stateid != NULL && stateid->type == NFS4_DELEGATION_STATEID_TYPE) {
494 exception->retry = 1;
495 break;
496 }
497 }
498 if (state == NULL)
499 break;
500 ret = nfs4_schedule_stateid_recovery(server, state);
501 if (ret < 0)
502 break;
503 goto wait_on_recovery;
504 case -NFS4ERR_STALE_STATEID:
505 case -NFS4ERR_STALE_CLIENTID:
506 nfs4_schedule_lease_recovery(clp);
507 goto wait_on_recovery;
508 case -NFS4ERR_MOVED:
509 ret = nfs4_schedule_migration_recovery(server);
510 if (ret < 0)
511 break;
512 goto wait_on_recovery;
513 case -NFS4ERR_LEASE_MOVED:
514 nfs4_schedule_lease_moved_recovery(clp);
515 goto wait_on_recovery;
516#if defined(CONFIG_NFS_V4_1)
517 case -NFS4ERR_BADSESSION:
518 case -NFS4ERR_BADSLOT:
519 case -NFS4ERR_BAD_HIGH_SLOT:
520 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
521 case -NFS4ERR_DEADSESSION:
522 case -NFS4ERR_SEQ_FALSE_RETRY:
523 case -NFS4ERR_SEQ_MISORDERED:
Olivier Deprez0e641232021-09-23 10:07:05 +0200524 /* Handled in nfs41_sequence_process() */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000525 goto wait_on_recovery;
526#endif /* defined(CONFIG_NFS_V4_1) */
527 case -NFS4ERR_FILE_OPEN:
528 if (exception->timeout > HZ) {
529 /* We have retried a decent amount, time to
530 * fail
531 */
532 ret = -EBUSY;
533 break;
534 }
535 /* Fall through */
536 case -NFS4ERR_DELAY:
537 nfs_inc_server_stats(server, NFSIOS_DELAY);
538 /* Fall through */
539 case -NFS4ERR_GRACE:
540 case -NFS4ERR_LAYOUTTRYLATER:
541 case -NFS4ERR_RECALLCONFLICT:
542 exception->delay = 1;
543 return 0;
544
545 case -NFS4ERR_RETRY_UNCACHED_REP:
546 case -NFS4ERR_OLD_STATEID:
547 exception->retry = 1;
548 break;
549 case -NFS4ERR_BADOWNER:
550 /* The following works around a Linux server bug! */
551 case -NFS4ERR_BADNAME:
552 if (server->caps & NFS_CAP_UIDGID_NOMAP) {
553 server->caps &= ~NFS_CAP_UIDGID_NOMAP;
554 exception->retry = 1;
555 printk(KERN_WARNING "NFS: v4 server %s "
556 "does not accept raw "
557 "uid/gids. "
558 "Reenabling the idmapper.\n",
559 server->nfs_client->cl_hostname);
560 }
561 }
562 /* We failed to handle the error */
563 return nfs4_map_errors(ret);
564wait_on_recovery:
565 exception->recovering = 1;
566 return 0;
567}
568
569/* This is the error handling routine for processes that are allowed
570 * to sleep.
571 */
572int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
573{
574 struct nfs_client *clp = server->nfs_client;
575 int ret;
576
577 ret = nfs4_do_handle_exception(server, errorcode, exception);
578 if (exception->delay) {
David Brazdil0f672f62019-12-10 10:32:29 +0000579 ret = nfs4_delay(&exception->timeout,
580 exception->interruptible);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000581 goto out_retry;
582 }
583 if (exception->recovering) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200584 if (exception->task_is_privileged)
585 return -EDEADLOCK;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000586 ret = nfs4_wait_clnt_recover(clp);
587 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
588 return -EIO;
589 goto out_retry;
590 }
591 return ret;
592out_retry:
593 if (ret == 0)
594 exception->retry = 1;
595 return ret;
596}
597
598static int
599nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
600 int errorcode, struct nfs4_exception *exception)
601{
602 struct nfs_client *clp = server->nfs_client;
603 int ret;
604
605 ret = nfs4_do_handle_exception(server, errorcode, exception);
606 if (exception->delay) {
607 rpc_delay(task, nfs4_update_delay(&exception->timeout));
608 goto out_retry;
609 }
610 if (exception->recovering) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200611 if (exception->task_is_privileged)
612 return -EDEADLOCK;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000613 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
614 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
615 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
616 goto out_retry;
617 }
618 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
619 ret = -EIO;
620 return ret;
621out_retry:
622 if (ret == 0) {
623 exception->retry = 1;
624 /*
625 * For NFS4ERR_MOVED, the client transport will need to
626 * be recomputed after migration recovery has completed.
627 */
628 if (errorcode == -NFS4ERR_MOVED)
629 rpc_task_release_transport(task);
630 }
631 return ret;
632}
633
634int
635nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server,
636 struct nfs4_state *state, long *timeout)
637{
638 struct nfs4_exception exception = {
639 .state = state,
640 };
641
642 if (task->tk_status >= 0)
643 return 0;
644 if (timeout)
645 exception.timeout = *timeout;
646 task->tk_status = nfs4_async_handle_exception(task, server,
647 task->tk_status,
648 &exception);
649 if (exception.delay && timeout)
650 *timeout = exception.timeout;
651 if (exception.retry)
652 return -EAGAIN;
653 return 0;
654}
655
656/*
657 * Return 'true' if 'clp' is using an rpc_client that is integrity protected
658 * or 'false' otherwise.
659 */
660static bool _nfs4_is_integrity_protected(struct nfs_client *clp)
661{
662 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor;
663 return (flavor == RPC_AUTH_GSS_KRB5I) || (flavor == RPC_AUTH_GSS_KRB5P);
664}
665
666static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
667{
668 spin_lock(&clp->cl_lock);
669 if (time_before(clp->cl_last_renewal,timestamp))
670 clp->cl_last_renewal = timestamp;
671 spin_unlock(&clp->cl_lock);
672}
673
674static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
675{
676 struct nfs_client *clp = server->nfs_client;
677
678 if (!nfs4_has_session(clp))
679 do_renew_lease(clp, timestamp);
680}
681
682struct nfs4_call_sync_data {
683 const struct nfs_server *seq_server;
684 struct nfs4_sequence_args *seq_args;
685 struct nfs4_sequence_res *seq_res;
686};
687
688void nfs4_init_sequence(struct nfs4_sequence_args *args,
689 struct nfs4_sequence_res *res, int cache_reply,
690 int privileged)
691{
692 args->sa_slot = NULL;
693 args->sa_cache_this = cache_reply;
694 args->sa_privileged = privileged;
695
696 res->sr_slot = NULL;
697}
698
699static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res)
700{
701 struct nfs4_slot *slot = res->sr_slot;
702 struct nfs4_slot_table *tbl;
703
704 tbl = slot->table;
705 spin_lock(&tbl->slot_tbl_lock);
706 if (!nfs41_wake_and_assign_slot(tbl, slot))
707 nfs4_free_slot(tbl, slot);
708 spin_unlock(&tbl->slot_tbl_lock);
709
710 res->sr_slot = NULL;
711}
712
713static int nfs40_sequence_done(struct rpc_task *task,
714 struct nfs4_sequence_res *res)
715{
716 if (res->sr_slot != NULL)
717 nfs40_sequence_free_slot(res);
718 return 1;
719}
720
721#if defined(CONFIG_NFS_V4_1)
722
723static void nfs41_release_slot(struct nfs4_slot *slot)
724{
725 struct nfs4_session *session;
726 struct nfs4_slot_table *tbl;
727 bool send_new_highest_used_slotid = false;
728
729 if (!slot)
730 return;
731 tbl = slot->table;
732 session = tbl->session;
733
734 /* Bump the slot sequence number */
735 if (slot->seq_done)
736 slot->seq_nr++;
737 slot->seq_done = 0;
738
739 spin_lock(&tbl->slot_tbl_lock);
740 /* Be nice to the server: try to ensure that the last transmitted
741 * value for highest_user_slotid <= target_highest_slotid
742 */
743 if (tbl->highest_used_slotid > tbl->target_highest_slotid)
744 send_new_highest_used_slotid = true;
745
746 if (nfs41_wake_and_assign_slot(tbl, slot)) {
747 send_new_highest_used_slotid = false;
748 goto out_unlock;
749 }
750 nfs4_free_slot(tbl, slot);
751
752 if (tbl->highest_used_slotid != NFS4_NO_SLOT)
753 send_new_highest_used_slotid = false;
754out_unlock:
755 spin_unlock(&tbl->slot_tbl_lock);
756 if (send_new_highest_used_slotid)
757 nfs41_notify_server(session->clp);
758 if (waitqueue_active(&tbl->slot_waitq))
759 wake_up_all(&tbl->slot_waitq);
760}
761
762static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
763{
764 nfs41_release_slot(res->sr_slot);
765 res->sr_slot = NULL;
766}
767
David Brazdil0f672f62019-12-10 10:32:29 +0000768static void nfs4_slot_sequence_record_sent(struct nfs4_slot *slot,
769 u32 seqnr)
770{
771 if ((s32)(seqnr - slot->seq_nr_highest_sent) > 0)
772 slot->seq_nr_highest_sent = seqnr;
773}
774static void nfs4_slot_sequence_acked(struct nfs4_slot *slot,
775 u32 seqnr)
776{
777 slot->seq_nr_highest_sent = seqnr;
778 slot->seq_nr_last_acked = seqnr;
779}
780
Olivier Deprez0e641232021-09-23 10:07:05 +0200781static void nfs4_probe_sequence(struct nfs_client *client, const struct cred *cred,
782 struct nfs4_slot *slot)
783{
784 struct rpc_task *task = _nfs41_proc_sequence(client, cred, slot, true);
785 if (!IS_ERR(task))
786 rpc_put_task_async(task);
787}
788
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000789static int nfs41_sequence_process(struct rpc_task *task,
790 struct nfs4_sequence_res *res)
791{
792 struct nfs4_session *session;
793 struct nfs4_slot *slot = res->sr_slot;
794 struct nfs_client *clp;
Olivier Deprez0e641232021-09-23 10:07:05 +0200795 int status;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000796 int ret = 1;
797
798 if (slot == NULL)
799 goto out_noaction;
800 /* don't increment the sequence number if the task wasn't sent */
David Brazdil0f672f62019-12-10 10:32:29 +0000801 if (!RPC_WAS_SENT(task) || slot->seq_done)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000802 goto out;
803
804 session = slot->table->session;
Olivier Deprez0e641232021-09-23 10:07:05 +0200805 clp = session->clp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000806
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000807 trace_nfs4_sequence_done(session, res);
Olivier Deprez0e641232021-09-23 10:07:05 +0200808
809 status = res->sr_status;
810 if (task->tk_status == -NFS4ERR_DEADSESSION)
811 status = -NFS4ERR_DEADSESSION;
812
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000813 /* Check the SEQUENCE operation status */
Olivier Deprez0e641232021-09-23 10:07:05 +0200814 switch (status) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000815 case 0:
David Brazdil0f672f62019-12-10 10:32:29 +0000816 /* Mark this sequence number as having been acked */
817 nfs4_slot_sequence_acked(slot, slot->seq_nr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000818 /* Update the slot's sequence and clientid lease timer */
819 slot->seq_done = 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000820 do_renew_lease(clp, res->sr_timestamp);
821 /* Check sequence flags */
822 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags,
823 !!slot->privileged);
824 nfs41_update_target_slotid(slot->table, slot, res);
825 break;
826 case 1:
827 /*
828 * sr_status remains 1 if an RPC level error occurred.
829 * The server may or may not have processed the sequence
830 * operation..
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000831 */
David Brazdil0f672f62019-12-10 10:32:29 +0000832 nfs4_slot_sequence_record_sent(slot, slot->seq_nr);
833 slot->seq_done = 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000834 goto out;
835 case -NFS4ERR_DELAY:
836 /* The server detected a resend of the RPC call and
837 * returned NFS4ERR_DELAY as per Section 2.10.6.2
838 * of RFC5661.
839 */
840 dprintk("%s: slot=%u seq=%u: Operation in progress\n",
841 __func__,
842 slot->slot_nr,
843 slot->seq_nr);
David Brazdil0f672f62019-12-10 10:32:29 +0000844 nfs4_slot_sequence_acked(slot, slot->seq_nr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000845 goto out_retry;
846 case -NFS4ERR_RETRY_UNCACHED_REP:
847 case -NFS4ERR_SEQ_FALSE_RETRY:
848 /*
849 * The server thinks we tried to replay a request.
850 * Retry the call after bumping the sequence ID.
851 */
David Brazdil0f672f62019-12-10 10:32:29 +0000852 nfs4_slot_sequence_acked(slot, slot->seq_nr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000853 goto retry_new_seq;
854 case -NFS4ERR_BADSLOT:
855 /*
856 * The slot id we used was probably retired. Try again
857 * using a different slot id.
858 */
859 if (slot->slot_nr < slot->table->target_highest_slotid)
860 goto session_recover;
861 goto retry_nowait;
862 case -NFS4ERR_SEQ_MISORDERED:
David Brazdil0f672f62019-12-10 10:32:29 +0000863 nfs4_slot_sequence_record_sent(slot, slot->seq_nr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000864 /*
David Brazdil0f672f62019-12-10 10:32:29 +0000865 * Were one or more calls using this slot interrupted?
866 * If the server never received the request, then our
Olivier Deprez0e641232021-09-23 10:07:05 +0200867 * transmitted slot sequence number may be too high. However,
868 * if the server did receive the request then it might
869 * accidentally give us a reply with a mismatched operation.
870 * We can sort this out by sending a lone sequence operation
871 * to the server on the same slot.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000872 */
David Brazdil0f672f62019-12-10 10:32:29 +0000873 if ((s32)(slot->seq_nr - slot->seq_nr_last_acked) > 1) {
874 slot->seq_nr--;
Olivier Deprez0e641232021-09-23 10:07:05 +0200875 if (task->tk_msg.rpc_proc != &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE]) {
876 nfs4_probe_sequence(clp, task->tk_msg.rpc_cred, slot);
877 res->sr_slot = NULL;
878 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000879 goto retry_nowait;
880 }
David Brazdil0f672f62019-12-10 10:32:29 +0000881 /*
882 * RFC5661:
883 * A retry might be sent while the original request is
884 * still in progress on the replier. The replier SHOULD
885 * deal with the issue by returning NFS4ERR_DELAY as the
886 * reply to SEQUENCE or CB_SEQUENCE operation, but
887 * implementations MAY return NFS4ERR_SEQ_MISORDERED.
888 *
889 * Restart the search after a delay.
890 */
891 slot->seq_nr = slot->seq_nr_highest_sent;
892 goto out_retry;
Olivier Deprez0e641232021-09-23 10:07:05 +0200893 case -NFS4ERR_BADSESSION:
894 case -NFS4ERR_DEADSESSION:
895 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
896 goto session_recover;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000897 default:
898 /* Just update the slot sequence no. */
899 slot->seq_done = 1;
900 }
901out:
902 /* The session may be reset by one of the error handlers. */
903 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
904out_noaction:
905 return ret;
906session_recover:
Olivier Deprez0e641232021-09-23 10:07:05 +0200907 nfs4_schedule_session_recovery(session, status);
908 dprintk("%s ERROR: %d Reset session\n", __func__, status);
909 nfs41_sequence_free_slot(res);
910 goto out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000911retry_new_seq:
912 ++slot->seq_nr;
913retry_nowait:
914 if (rpc_restart_call_prepare(task)) {
915 nfs41_sequence_free_slot(res);
916 task->tk_status = 0;
917 ret = 0;
918 }
919 goto out;
920out_retry:
921 if (!rpc_restart_call(task))
922 goto out;
923 rpc_delay(task, NFS4_POLL_RETRY_MAX);
924 return 0;
925}
926
927int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
928{
929 if (!nfs41_sequence_process(task, res))
930 return 0;
931 if (res->sr_slot != NULL)
932 nfs41_sequence_free_slot(res);
933 return 1;
934
935}
936EXPORT_SYMBOL_GPL(nfs41_sequence_done);
937
938static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
939{
940 if (res->sr_slot == NULL)
941 return 1;
942 if (res->sr_slot->table->session != NULL)
943 return nfs41_sequence_process(task, res);
944 return nfs40_sequence_done(task, res);
945}
946
947static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
948{
949 if (res->sr_slot != NULL) {
950 if (res->sr_slot->table->session != NULL)
951 nfs41_sequence_free_slot(res);
952 else
953 nfs40_sequence_free_slot(res);
954 }
955}
956
957int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
958{
959 if (res->sr_slot == NULL)
960 return 1;
961 if (!res->sr_slot->table->session)
962 return nfs40_sequence_done(task, res);
963 return nfs41_sequence_done(task, res);
964}
965EXPORT_SYMBOL_GPL(nfs4_sequence_done);
966
967static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
968{
969 struct nfs4_call_sync_data *data = calldata;
970
971 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
972
973 nfs4_setup_sequence(data->seq_server->nfs_client,
974 data->seq_args, data->seq_res, task);
975}
976
977static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
978{
979 struct nfs4_call_sync_data *data = calldata;
980
981 nfs41_sequence_done(task, data->seq_res);
982}
983
984static const struct rpc_call_ops nfs41_call_sync_ops = {
985 .rpc_call_prepare = nfs41_call_sync_prepare,
986 .rpc_call_done = nfs41_call_sync_done,
987};
988
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000989#else /* !CONFIG_NFS_V4_1 */
990
991static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
992{
993 return nfs40_sequence_done(task, res);
994}
995
996static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
997{
998 if (res->sr_slot != NULL)
999 nfs40_sequence_free_slot(res);
1000}
1001
1002int nfs4_sequence_done(struct rpc_task *task,
1003 struct nfs4_sequence_res *res)
1004{
1005 return nfs40_sequence_done(task, res);
1006}
1007EXPORT_SYMBOL_GPL(nfs4_sequence_done);
1008
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001009#endif /* !CONFIG_NFS_V4_1 */
1010
David Brazdil0f672f62019-12-10 10:32:29 +00001011static void nfs41_sequence_res_init(struct nfs4_sequence_res *res)
1012{
1013 res->sr_timestamp = jiffies;
1014 res->sr_status_flags = 0;
1015 res->sr_status = 1;
1016}
1017
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001018static
1019void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args,
1020 struct nfs4_sequence_res *res,
1021 struct nfs4_slot *slot)
1022{
1023 if (!slot)
1024 return;
1025 slot->privileged = args->sa_privileged ? 1 : 0;
1026 args->sa_slot = slot;
1027
1028 res->sr_slot = slot;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001029}
1030
1031int nfs4_setup_sequence(struct nfs_client *client,
1032 struct nfs4_sequence_args *args,
1033 struct nfs4_sequence_res *res,
1034 struct rpc_task *task)
1035{
1036 struct nfs4_session *session = nfs4_get_session(client);
1037 struct nfs4_slot_table *tbl = client->cl_slot_tbl;
1038 struct nfs4_slot *slot;
1039
1040 /* slot already allocated? */
1041 if (res->sr_slot != NULL)
1042 goto out_start;
1043
David Brazdil0f672f62019-12-10 10:32:29 +00001044 if (session)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001045 tbl = &session->fc_slot_table;
David Brazdil0f672f62019-12-10 10:32:29 +00001046
1047 spin_lock(&tbl->slot_tbl_lock);
1048 /* The state manager will wait until the slot table is empty */
1049 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
1050 goto out_sleep;
1051
1052 slot = nfs4_alloc_slot(tbl);
1053 if (IS_ERR(slot)) {
1054 if (slot == ERR_PTR(-ENOMEM))
1055 goto out_sleep_timeout;
1056 goto out_sleep;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001057 }
David Brazdil0f672f62019-12-10 10:32:29 +00001058 spin_unlock(&tbl->slot_tbl_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001059
1060 nfs4_sequence_attach_slot(args, res, slot);
1061
1062 trace_nfs4_setup_sequence(session, args);
1063out_start:
David Brazdil0f672f62019-12-10 10:32:29 +00001064 nfs41_sequence_res_init(res);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001065 rpc_call_start(task);
1066 return 0;
David Brazdil0f672f62019-12-10 10:32:29 +00001067out_sleep_timeout:
1068 /* Try again in 1/4 second */
1069 if (args->sa_privileged)
1070 rpc_sleep_on_priority_timeout(&tbl->slot_tbl_waitq, task,
1071 jiffies + (HZ >> 2), RPC_PRIORITY_PRIVILEGED);
1072 else
1073 rpc_sleep_on_timeout(&tbl->slot_tbl_waitq, task,
1074 NULL, jiffies + (HZ >> 2));
1075 spin_unlock(&tbl->slot_tbl_lock);
1076 return -EAGAIN;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001077out_sleep:
1078 if (args->sa_privileged)
1079 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
David Brazdil0f672f62019-12-10 10:32:29 +00001080 RPC_PRIORITY_PRIVILEGED);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001081 else
1082 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
1083 spin_unlock(&tbl->slot_tbl_lock);
1084 return -EAGAIN;
1085}
1086EXPORT_SYMBOL_GPL(nfs4_setup_sequence);
1087
1088static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata)
1089{
1090 struct nfs4_call_sync_data *data = calldata;
1091 nfs4_setup_sequence(data->seq_server->nfs_client,
1092 data->seq_args, data->seq_res, task);
1093}
1094
1095static void nfs40_call_sync_done(struct rpc_task *task, void *calldata)
1096{
1097 struct nfs4_call_sync_data *data = calldata;
1098 nfs4_sequence_done(task, data->seq_res);
1099}
1100
1101static const struct rpc_call_ops nfs40_call_sync_ops = {
1102 .rpc_call_prepare = nfs40_call_sync_prepare,
1103 .rpc_call_done = nfs40_call_sync_done,
1104};
1105
David Brazdil0f672f62019-12-10 10:32:29 +00001106static int nfs4_call_sync_custom(struct rpc_task_setup *task_setup)
1107{
1108 int ret;
1109 struct rpc_task *task;
1110
1111 task = rpc_run_task(task_setup);
1112 if (IS_ERR(task))
1113 return PTR_ERR(task);
1114
1115 ret = task->tk_status;
1116 rpc_put_task(task);
1117 return ret;
1118}
1119
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001120static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
1121 struct nfs_server *server,
1122 struct rpc_message *msg,
1123 struct nfs4_sequence_args *args,
1124 struct nfs4_sequence_res *res)
1125{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001126 struct nfs_client *clp = server->nfs_client;
1127 struct nfs4_call_sync_data data = {
1128 .seq_server = server,
1129 .seq_args = args,
1130 .seq_res = res,
1131 };
1132 struct rpc_task_setup task_setup = {
1133 .rpc_client = clnt,
1134 .rpc_message = msg,
1135 .callback_ops = clp->cl_mvops->call_sync_ops,
1136 .callback_data = &data
1137 };
1138
David Brazdil0f672f62019-12-10 10:32:29 +00001139 return nfs4_call_sync_custom(&task_setup);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001140}
1141
1142int nfs4_call_sync(struct rpc_clnt *clnt,
1143 struct nfs_server *server,
1144 struct rpc_message *msg,
1145 struct nfs4_sequence_args *args,
1146 struct nfs4_sequence_res *res,
1147 int cache_reply)
1148{
1149 nfs4_init_sequence(args, res, cache_reply, 0);
1150 return nfs4_call_sync_sequence(clnt, server, msg, args, res);
1151}
1152
1153static void
1154nfs4_inc_nlink_locked(struct inode *inode)
1155{
1156 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_OTHER;
1157 inc_nlink(inode);
1158}
1159
1160static void
1161nfs4_dec_nlink_locked(struct inode *inode)
1162{
1163 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_OTHER;
1164 drop_nlink(inode);
1165}
1166
1167static void
1168update_changeattr_locked(struct inode *dir, struct nfs4_change_info *cinfo,
1169 unsigned long timestamp, unsigned long cache_validity)
1170{
1171 struct nfs_inode *nfsi = NFS_I(dir);
1172
1173 nfsi->cache_validity |= NFS_INO_INVALID_CTIME
1174 | NFS_INO_INVALID_MTIME
1175 | NFS_INO_INVALID_DATA
1176 | cache_validity;
1177 if (cinfo->atomic && cinfo->before == inode_peek_iversion_raw(dir)) {
1178 nfsi->cache_validity &= ~NFS_INO_REVAL_PAGECACHE;
1179 nfsi->attrtimeo_timestamp = jiffies;
1180 } else {
1181 nfs_force_lookup_revalidate(dir);
1182 if (cinfo->before != inode_peek_iversion_raw(dir))
1183 nfsi->cache_validity |= NFS_INO_INVALID_ACCESS |
1184 NFS_INO_INVALID_ACL;
1185 }
1186 inode_set_iversion_raw(dir, cinfo->after);
1187 nfsi->read_cache_jiffies = timestamp;
1188 nfsi->attr_gencount = nfs_inc_attr_generation_counter();
1189 nfsi->cache_validity &= ~NFS_INO_INVALID_CHANGE;
1190 nfs_fscache_invalidate(dir);
1191}
1192
1193static void
1194update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo,
1195 unsigned long timestamp, unsigned long cache_validity)
1196{
1197 spin_lock(&dir->i_lock);
1198 update_changeattr_locked(dir, cinfo, timestamp, cache_validity);
1199 spin_unlock(&dir->i_lock);
1200}
1201
1202struct nfs4_open_createattrs {
1203 struct nfs4_label *label;
1204 struct iattr *sattr;
1205 const __u32 verf[2];
1206};
1207
1208static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
1209 int err, struct nfs4_exception *exception)
1210{
1211 if (err != -EINVAL)
1212 return false;
1213 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1214 return false;
1215 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1;
1216 exception->retry = 1;
1217 return true;
1218}
1219
David Brazdil0f672f62019-12-10 10:32:29 +00001220static fmode_t _nfs4_ctx_to_accessmode(const struct nfs_open_context *ctx)
1221{
1222 return ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC);
1223}
1224
1225static fmode_t _nfs4_ctx_to_openmode(const struct nfs_open_context *ctx)
1226{
1227 fmode_t ret = ctx->mode & (FMODE_READ|FMODE_WRITE);
1228
1229 return (ctx->mode & FMODE_EXEC) ? FMODE_READ | ret : ret;
1230}
1231
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001232static u32
1233nfs4_map_atomic_open_share(struct nfs_server *server,
1234 fmode_t fmode, int openflags)
1235{
1236 u32 res = 0;
1237
1238 switch (fmode & (FMODE_READ | FMODE_WRITE)) {
1239 case FMODE_READ:
1240 res = NFS4_SHARE_ACCESS_READ;
1241 break;
1242 case FMODE_WRITE:
1243 res = NFS4_SHARE_ACCESS_WRITE;
1244 break;
1245 case FMODE_READ|FMODE_WRITE:
1246 res = NFS4_SHARE_ACCESS_BOTH;
1247 }
1248 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1249 goto out;
1250 /* Want no delegation if we're using O_DIRECT */
1251 if (openflags & O_DIRECT)
1252 res |= NFS4_SHARE_WANT_NO_DELEG;
1253out:
1254 return res;
1255}
1256
1257static enum open_claim_type4
1258nfs4_map_atomic_open_claim(struct nfs_server *server,
1259 enum open_claim_type4 claim)
1260{
1261 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
1262 return claim;
1263 switch (claim) {
1264 default:
1265 return claim;
1266 case NFS4_OPEN_CLAIM_FH:
1267 return NFS4_OPEN_CLAIM_NULL;
1268 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1269 return NFS4_OPEN_CLAIM_DELEGATE_CUR;
1270 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1271 return NFS4_OPEN_CLAIM_DELEGATE_PREV;
1272 }
1273}
1274
1275static void nfs4_init_opendata_res(struct nfs4_opendata *p)
1276{
1277 p->o_res.f_attr = &p->f_attr;
1278 p->o_res.f_label = p->f_label;
1279 p->o_res.seqid = p->o_arg.seqid;
1280 p->c_res.seqid = p->c_arg.seqid;
1281 p->o_res.server = p->o_arg.server;
1282 p->o_res.access_request = p->o_arg.access;
1283 nfs_fattr_init(&p->f_attr);
1284 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
1285}
1286
1287static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1288 struct nfs4_state_owner *sp, fmode_t fmode, int flags,
1289 const struct nfs4_open_createattrs *c,
1290 enum open_claim_type4 claim,
1291 gfp_t gfp_mask)
1292{
1293 struct dentry *parent = dget_parent(dentry);
1294 struct inode *dir = d_inode(parent);
1295 struct nfs_server *server = NFS_SERVER(dir);
1296 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
1297 struct nfs4_label *label = (c != NULL) ? c->label : NULL;
1298 struct nfs4_opendata *p;
1299
1300 p = kzalloc(sizeof(*p), gfp_mask);
1301 if (p == NULL)
1302 goto err;
1303
1304 p->f_label = nfs4_label_alloc(server, gfp_mask);
1305 if (IS_ERR(p->f_label))
1306 goto err_free_p;
1307
1308 p->a_label = nfs4_label_alloc(server, gfp_mask);
1309 if (IS_ERR(p->a_label))
1310 goto err_free_f;
1311
1312 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
1313 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
1314 if (IS_ERR(p->o_arg.seqid))
1315 goto err_free_label;
1316 nfs_sb_active(dentry->d_sb);
1317 p->dentry = dget(dentry);
1318 p->dir = parent;
1319 p->owner = sp;
1320 atomic_inc(&sp->so_count);
1321 p->o_arg.open_flags = flags;
1322 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001323 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
1324 p->o_arg.share_access = nfs4_map_atomic_open_share(server,
1325 fmode, flags);
David Brazdil0f672f62019-12-10 10:32:29 +00001326 if (flags & O_CREAT) {
1327 p->o_arg.umask = current_umask();
1328 p->o_arg.label = nfs4_label_copy(p->a_label, label);
1329 if (c->sattr != NULL && c->sattr->ia_valid != 0) {
1330 p->o_arg.u.attrs = &p->attrs;
1331 memcpy(&p->attrs, c->sattr, sizeof(p->attrs));
1332
1333 memcpy(p->o_arg.u.verifier.data, c->verf,
1334 sizeof(p->o_arg.u.verifier.data));
1335 }
1336 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001337 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS
1338 * will return permission denied for all bits until close */
1339 if (!(flags & O_EXCL)) {
1340 /* ask server to check for all possible rights as results
1341 * are cached */
1342 switch (p->o_arg.claim) {
1343 default:
1344 break;
1345 case NFS4_OPEN_CLAIM_NULL:
1346 case NFS4_OPEN_CLAIM_FH:
1347 p->o_arg.access = NFS4_ACCESS_READ |
1348 NFS4_ACCESS_MODIFY |
1349 NFS4_ACCESS_EXTEND |
1350 NFS4_ACCESS_EXECUTE;
1351 }
1352 }
1353 p->o_arg.clientid = server->nfs_client->cl_clientid;
1354 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
1355 p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
1356 p->o_arg.name = &dentry->d_name;
1357 p->o_arg.server = server;
1358 p->o_arg.bitmask = nfs4_bitmask(server, label);
1359 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001360 switch (p->o_arg.claim) {
1361 case NFS4_OPEN_CLAIM_NULL:
1362 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1363 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
1364 p->o_arg.fh = NFS_FH(dir);
1365 break;
1366 case NFS4_OPEN_CLAIM_PREVIOUS:
1367 case NFS4_OPEN_CLAIM_FH:
1368 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1369 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1370 p->o_arg.fh = NFS_FH(d_inode(dentry));
1371 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001372 p->c_arg.fh = &p->o_res.fh;
1373 p->c_arg.stateid = &p->o_res.stateid;
1374 p->c_arg.seqid = p->o_arg.seqid;
1375 nfs4_init_opendata_res(p);
1376 kref_init(&p->kref);
1377 return p;
1378
1379err_free_label:
1380 nfs4_label_free(p->a_label);
1381err_free_f:
1382 nfs4_label_free(p->f_label);
1383err_free_p:
1384 kfree(p);
1385err:
1386 dput(parent);
1387 return NULL;
1388}
1389
1390static void nfs4_opendata_free(struct kref *kref)
1391{
1392 struct nfs4_opendata *p = container_of(kref,
1393 struct nfs4_opendata, kref);
1394 struct super_block *sb = p->dentry->d_sb;
1395
1396 nfs4_lgopen_release(p->lgp);
1397 nfs_free_seqid(p->o_arg.seqid);
1398 nfs4_sequence_free_slot(&p->o_res.seq_res);
1399 if (p->state != NULL)
1400 nfs4_put_open_state(p->state);
1401 nfs4_put_state_owner(p->owner);
1402
1403 nfs4_label_free(p->a_label);
1404 nfs4_label_free(p->f_label);
1405
1406 dput(p->dir);
1407 dput(p->dentry);
1408 nfs_sb_deactive(sb);
1409 nfs_fattr_free_names(&p->f_attr);
1410 kfree(p->f_attr.mdsthreshold);
1411 kfree(p);
1412}
1413
1414static void nfs4_opendata_put(struct nfs4_opendata *p)
1415{
1416 if (p != NULL)
1417 kref_put(&p->kref, nfs4_opendata_free);
1418}
1419
1420static bool nfs4_mode_match_open_stateid(struct nfs4_state *state,
1421 fmode_t fmode)
1422{
1423 switch(fmode & (FMODE_READ|FMODE_WRITE)) {
1424 case FMODE_READ|FMODE_WRITE:
1425 return state->n_rdwr != 0;
1426 case FMODE_WRITE:
1427 return state->n_wronly != 0;
1428 case FMODE_READ:
1429 return state->n_rdonly != 0;
1430 }
1431 WARN_ON_ONCE(1);
1432 return false;
1433}
1434
David Brazdil0f672f62019-12-10 10:32:29 +00001435static int can_open_cached(struct nfs4_state *state, fmode_t mode,
1436 int open_mode, enum open_claim_type4 claim)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001437{
1438 int ret = 0;
1439
1440 if (open_mode & (O_EXCL|O_TRUNC))
1441 goto out;
David Brazdil0f672f62019-12-10 10:32:29 +00001442 switch (claim) {
1443 case NFS4_OPEN_CLAIM_NULL:
1444 case NFS4_OPEN_CLAIM_FH:
1445 goto out;
1446 default:
1447 break;
1448 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001449 switch (mode & (FMODE_READ|FMODE_WRITE)) {
1450 case FMODE_READ:
1451 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
1452 && state->n_rdonly != 0;
1453 break;
1454 case FMODE_WRITE:
1455 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
1456 && state->n_wronly != 0;
1457 break;
1458 case FMODE_READ|FMODE_WRITE:
1459 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
1460 && state->n_rdwr != 0;
1461 }
1462out:
1463 return ret;
1464}
1465
1466static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode,
1467 enum open_claim_type4 claim)
1468{
1469 if (delegation == NULL)
1470 return 0;
1471 if ((delegation->type & fmode) != fmode)
1472 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001473 switch (claim) {
1474 case NFS4_OPEN_CLAIM_NULL:
1475 case NFS4_OPEN_CLAIM_FH:
1476 break;
1477 case NFS4_OPEN_CLAIM_PREVIOUS:
1478 if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
1479 break;
1480 /* Fall through */
1481 default:
1482 return 0;
1483 }
1484 nfs_mark_delegation_referenced(delegation);
1485 return 1;
1486}
1487
1488static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
1489{
1490 switch (fmode) {
1491 case FMODE_WRITE:
1492 state->n_wronly++;
1493 break;
1494 case FMODE_READ:
1495 state->n_rdonly++;
1496 break;
1497 case FMODE_READ|FMODE_WRITE:
1498 state->n_rdwr++;
1499 }
1500 nfs4_state_set_mode_locked(state, state->state | fmode);
1501}
1502
1503#ifdef CONFIG_NFS_V4_1
1504static bool nfs_open_stateid_recover_openmode(struct nfs4_state *state)
1505{
1506 if (state->n_rdonly && !test_bit(NFS_O_RDONLY_STATE, &state->flags))
1507 return true;
1508 if (state->n_wronly && !test_bit(NFS_O_WRONLY_STATE, &state->flags))
1509 return true;
1510 if (state->n_rdwr && !test_bit(NFS_O_RDWR_STATE, &state->flags))
1511 return true;
1512 return false;
1513}
1514#endif /* CONFIG_NFS_V4_1 */
1515
1516static void nfs_state_log_update_open_stateid(struct nfs4_state *state)
1517{
1518 if (test_and_clear_bit(NFS_STATE_CHANGE_WAIT, &state->flags))
1519 wake_up_all(&state->waitq);
1520}
1521
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001522static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state)
1523{
1524 struct nfs_client *clp = state->owner->so_server->nfs_client;
1525 bool need_recover = false;
1526
1527 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly)
1528 need_recover = true;
1529 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly)
1530 need_recover = true;
1531 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr)
1532 need_recover = true;
1533 if (need_recover)
1534 nfs4_state_mark_reclaim_nograce(clp, state);
1535}
1536
1537/*
1538 * Check for whether or not the caller may update the open stateid
1539 * to the value passed in by stateid.
1540 *
1541 * Note: This function relies heavily on the server implementing
1542 * RFC7530 Section 9.1.4.2, and RFC5661 Section 8.2.2
1543 * correctly.
1544 * i.e. The stateid seqids have to be initialised to 1, and
1545 * are then incremented on every state transition.
1546 */
Olivier Deprez0e641232021-09-23 10:07:05 +02001547static bool nfs_stateid_is_sequential(struct nfs4_state *state,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001548 const nfs4_stateid *stateid)
1549{
Olivier Deprez0e641232021-09-23 10:07:05 +02001550 if (test_bit(NFS_OPEN_STATE, &state->flags)) {
1551 /* The common case - we're updating to a new sequence number */
1552 if (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
1553 nfs4_stateid_is_next(&state->open_stateid, stateid)) {
1554 return true;
1555 }
1556 } else {
1557 /* This is the first OPEN in this generation */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001558 if (stateid->seqid == cpu_to_be32(1))
Olivier Deprez0e641232021-09-23 10:07:05 +02001559 return true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001560 }
1561 return false;
1562}
1563
1564static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
1565{
1566 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
1567 return;
1568 if (state->n_wronly)
1569 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1570 if (state->n_rdonly)
1571 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1572 if (state->n_rdwr)
1573 set_bit(NFS_O_RDWR_STATE, &state->flags);
1574 set_bit(NFS_OPEN_STATE, &state->flags);
1575}
1576
1577static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
1578 nfs4_stateid *stateid, fmode_t fmode)
1579{
1580 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1581 switch (fmode & (FMODE_READ|FMODE_WRITE)) {
1582 case FMODE_WRITE:
1583 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1584 break;
1585 case FMODE_READ:
1586 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1587 break;
1588 case 0:
1589 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1590 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1591 clear_bit(NFS_OPEN_STATE, &state->flags);
1592 }
1593 if (stateid == NULL)
1594 return;
1595 /* Handle OPEN+OPEN_DOWNGRADE races */
1596 if (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
1597 !nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
1598 nfs_resync_open_stateid_locked(state);
1599 goto out;
1600 }
1601 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1602 nfs4_stateid_copy(&state->stateid, stateid);
1603 nfs4_stateid_copy(&state->open_stateid, stateid);
1604 trace_nfs4_open_stateid_update(state->inode, stateid, 0);
1605out:
1606 nfs_state_log_update_open_stateid(state);
1607}
1608
1609static void nfs_clear_open_stateid(struct nfs4_state *state,
1610 nfs4_stateid *arg_stateid,
1611 nfs4_stateid *stateid, fmode_t fmode)
1612{
1613 write_seqlock(&state->seqlock);
1614 /* Ignore, if the CLOSE argment doesn't match the current stateid */
1615 if (nfs4_state_match_open_stateid_other(state, arg_stateid))
1616 nfs_clear_open_stateid_locked(state, stateid, fmode);
1617 write_sequnlock(&state->seqlock);
1618 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1619 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1620}
1621
1622static void nfs_set_open_stateid_locked(struct nfs4_state *state,
1623 const nfs4_stateid *stateid, nfs4_stateid *freeme)
David Brazdil0f672f62019-12-10 10:32:29 +00001624 __must_hold(&state->owner->so_lock)
1625 __must_hold(&state->seqlock)
1626 __must_hold(RCU)
1627
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001628{
1629 DEFINE_WAIT(wait);
1630 int status = 0;
1631 for (;;) {
1632
Olivier Deprez0e641232021-09-23 10:07:05 +02001633 if (nfs_stateid_is_sequential(state, stateid))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001634 break;
Olivier Deprez0e641232021-09-23 10:07:05 +02001635
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001636 if (status)
1637 break;
1638 /* Rely on seqids for serialisation with NFSv4.0 */
1639 if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client))
1640 break;
1641
Olivier Deprez0e641232021-09-23 10:07:05 +02001642 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001643 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE);
1644 /*
1645 * Ensure we process the state changes in the same order
1646 * in which the server processed them by delaying the
1647 * update of the stateid until we are in sequence.
1648 */
1649 write_sequnlock(&state->seqlock);
1650 spin_unlock(&state->owner->so_lock);
1651 rcu_read_unlock();
1652 trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0);
Olivier Deprez0e641232021-09-23 10:07:05 +02001653
1654 if (!fatal_signal_pending(current)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001655 if (schedule_timeout(5*HZ) == 0)
1656 status = -EAGAIN;
1657 else
1658 status = 0;
1659 } else
1660 status = -EINTR;
1661 finish_wait(&state->waitq, &wait);
1662 rcu_read_lock();
1663 spin_lock(&state->owner->so_lock);
1664 write_seqlock(&state->seqlock);
1665 }
1666
1667 if (test_bit(NFS_OPEN_STATE, &state->flags) &&
1668 !nfs4_stateid_match_other(stateid, &state->open_stateid)) {
1669 nfs4_stateid_copy(freeme, &state->open_stateid);
1670 nfs_test_and_clear_all_open_stateid(state);
1671 }
1672
1673 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1674 nfs4_stateid_copy(&state->stateid, stateid);
1675 nfs4_stateid_copy(&state->open_stateid, stateid);
1676 trace_nfs4_open_stateid_update(state->inode, stateid, status);
1677 nfs_state_log_update_open_stateid(state);
1678}
1679
1680static void nfs_state_set_open_stateid(struct nfs4_state *state,
1681 const nfs4_stateid *open_stateid,
1682 fmode_t fmode,
1683 nfs4_stateid *freeme)
1684{
1685 /*
1686 * Protect the call to nfs4_state_set_mode_locked and
1687 * serialise the stateid update
1688 */
1689 write_seqlock(&state->seqlock);
1690 nfs_set_open_stateid_locked(state, open_stateid, freeme);
1691 switch (fmode) {
1692 case FMODE_READ:
1693 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1694 break;
1695 case FMODE_WRITE:
1696 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1697 break;
1698 case FMODE_READ|FMODE_WRITE:
1699 set_bit(NFS_O_RDWR_STATE, &state->flags);
1700 }
1701 set_bit(NFS_OPEN_STATE, &state->flags);
1702 write_sequnlock(&state->seqlock);
1703}
1704
David Brazdil0f672f62019-12-10 10:32:29 +00001705static void nfs_state_clear_open_state_flags(struct nfs4_state *state)
1706{
1707 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1708 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1709 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1710 clear_bit(NFS_OPEN_STATE, &state->flags);
1711}
1712
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001713static void nfs_state_set_delegation(struct nfs4_state *state,
1714 const nfs4_stateid *deleg_stateid,
1715 fmode_t fmode)
1716{
1717 /*
1718 * Protect the call to nfs4_state_set_mode_locked and
1719 * serialise the stateid update
1720 */
1721 write_seqlock(&state->seqlock);
1722 nfs4_stateid_copy(&state->stateid, deleg_stateid);
1723 set_bit(NFS_DELEGATED_STATE, &state->flags);
1724 write_sequnlock(&state->seqlock);
1725}
1726
1727static void nfs_state_clear_delegation(struct nfs4_state *state)
1728{
1729 write_seqlock(&state->seqlock);
1730 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1731 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1732 write_sequnlock(&state->seqlock);
1733}
1734
1735static int update_open_stateid(struct nfs4_state *state,
1736 const nfs4_stateid *open_stateid,
1737 const nfs4_stateid *delegation,
1738 fmode_t fmode)
1739{
1740 struct nfs_server *server = NFS_SERVER(state->inode);
1741 struct nfs_client *clp = server->nfs_client;
1742 struct nfs_inode *nfsi = NFS_I(state->inode);
1743 struct nfs_delegation *deleg_cur;
1744 nfs4_stateid freeme = { };
1745 int ret = 0;
1746
1747 fmode &= (FMODE_READ|FMODE_WRITE);
1748
1749 rcu_read_lock();
1750 spin_lock(&state->owner->so_lock);
1751 if (open_stateid != NULL) {
1752 nfs_state_set_open_stateid(state, open_stateid, fmode, &freeme);
1753 ret = 1;
1754 }
1755
1756 deleg_cur = rcu_dereference(nfsi->delegation);
1757 if (deleg_cur == NULL)
1758 goto no_delegation;
1759
1760 spin_lock(&deleg_cur->lock);
1761 if (rcu_dereference(nfsi->delegation) != deleg_cur ||
1762 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) ||
1763 (deleg_cur->type & fmode) != fmode)
1764 goto no_delegation_unlock;
1765
1766 if (delegation == NULL)
1767 delegation = &deleg_cur->stateid;
1768 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation))
1769 goto no_delegation_unlock;
1770
1771 nfs_mark_delegation_referenced(deleg_cur);
1772 nfs_state_set_delegation(state, &deleg_cur->stateid, fmode);
1773 ret = 1;
1774no_delegation_unlock:
1775 spin_unlock(&deleg_cur->lock);
1776no_delegation:
1777 if (ret)
1778 update_open_stateflags(state, fmode);
1779 spin_unlock(&state->owner->so_lock);
1780 rcu_read_unlock();
1781
1782 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1783 nfs4_schedule_state_manager(clp);
1784 if (freeme.type != 0)
1785 nfs4_test_and_free_stateid(server, &freeme,
1786 state->owner->so_cred);
1787
1788 return ret;
1789}
1790
1791static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp,
1792 const nfs4_stateid *stateid)
1793{
1794 struct nfs4_state *state = lsp->ls_state;
1795 bool ret = false;
1796
1797 spin_lock(&state->state_lock);
1798 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid))
1799 goto out_noupdate;
1800 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid))
1801 goto out_noupdate;
1802 nfs4_stateid_copy(&lsp->ls_stateid, stateid);
1803 ret = true;
1804out_noupdate:
1805 spin_unlock(&state->state_lock);
1806 return ret;
1807}
1808
1809static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1810{
1811 struct nfs_delegation *delegation;
1812
1813 fmode &= FMODE_READ|FMODE_WRITE;
1814 rcu_read_lock();
1815 delegation = rcu_dereference(NFS_I(inode)->delegation);
1816 if (delegation == NULL || (delegation->type & fmode) == fmode) {
1817 rcu_read_unlock();
1818 return;
1819 }
1820 rcu_read_unlock();
1821 nfs4_inode_return_delegation(inode);
1822}
1823
1824static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1825{
1826 struct nfs4_state *state = opendata->state;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001827 struct nfs_delegation *delegation;
1828 int open_mode = opendata->o_arg.open_flags;
1829 fmode_t fmode = opendata->o_arg.fmode;
1830 enum open_claim_type4 claim = opendata->o_arg.claim;
1831 nfs4_stateid stateid;
1832 int ret = -EAGAIN;
1833
1834 for (;;) {
1835 spin_lock(&state->owner->so_lock);
David Brazdil0f672f62019-12-10 10:32:29 +00001836 if (can_open_cached(state, fmode, open_mode, claim)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001837 update_open_stateflags(state, fmode);
1838 spin_unlock(&state->owner->so_lock);
1839 goto out_return_state;
1840 }
1841 spin_unlock(&state->owner->so_lock);
1842 rcu_read_lock();
David Brazdil0f672f62019-12-10 10:32:29 +00001843 delegation = nfs4_get_valid_delegation(state->inode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001844 if (!can_open_delegated(delegation, fmode, claim)) {
1845 rcu_read_unlock();
1846 break;
1847 }
1848 /* Save the delegation */
1849 nfs4_stateid_copy(&stateid, &delegation->stateid);
1850 rcu_read_unlock();
1851 nfs_release_seqid(opendata->o_arg.seqid);
1852 if (!opendata->is_recover) {
1853 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1854 if (ret != 0)
1855 goto out;
1856 }
1857 ret = -EAGAIN;
1858
1859 /* Try to update the stateid using the delegation */
1860 if (update_open_stateid(state, NULL, &stateid, fmode))
1861 goto out_return_state;
1862 }
1863out:
1864 return ERR_PTR(ret);
1865out_return_state:
David Brazdil0f672f62019-12-10 10:32:29 +00001866 refcount_inc(&state->count);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001867 return state;
1868}
1869
1870static void
1871nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
1872{
1873 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client;
1874 struct nfs_delegation *delegation;
1875 int delegation_flags = 0;
1876
1877 rcu_read_lock();
1878 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1879 if (delegation)
1880 delegation_flags = delegation->flags;
1881 rcu_read_unlock();
1882 switch (data->o_arg.claim) {
1883 default:
1884 break;
1885 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1886 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1887 pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1888 "returning a delegation for "
1889 "OPEN(CLAIM_DELEGATE_CUR)\n",
1890 clp->cl_hostname);
1891 return;
1892 }
1893 if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1894 nfs_inode_set_delegation(state->inode,
1895 data->owner->so_cred,
1896 data->o_res.delegation_type,
1897 &data->o_res.delegation,
1898 data->o_res.pagemod_limit);
1899 else
1900 nfs_inode_reclaim_delegation(state->inode,
1901 data->owner->so_cred,
1902 data->o_res.delegation_type,
1903 &data->o_res.delegation,
1904 data->o_res.pagemod_limit);
1905
1906 if (data->o_res.do_recall)
1907 nfs_async_inode_return_delegation(state->inode,
1908 &data->o_res.delegation);
1909}
1910
1911/*
1912 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes
1913 * and update the nfs4_state.
1914 */
1915static struct nfs4_state *
1916_nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
1917{
1918 struct inode *inode = data->state->inode;
1919 struct nfs4_state *state = data->state;
1920 int ret;
1921
1922 if (!data->rpc_done) {
1923 if (data->rpc_status)
1924 return ERR_PTR(data->rpc_status);
1925 /* cached opens have already been processed */
1926 goto update;
1927 }
1928
1929 ret = nfs_refresh_inode(inode, &data->f_attr);
1930 if (ret)
1931 return ERR_PTR(ret);
1932
1933 if (data->o_res.delegation_type != 0)
1934 nfs4_opendata_check_deleg(data, state);
1935update:
David Brazdil0f672f62019-12-10 10:32:29 +00001936 if (!update_open_stateid(state, &data->o_res.stateid,
1937 NULL, data->o_arg.fmode))
1938 return ERR_PTR(-EAGAIN);
1939 refcount_inc(&state->count);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001940
1941 return state;
1942}
1943
1944static struct inode *
1945nfs4_opendata_get_inode(struct nfs4_opendata *data)
1946{
1947 struct inode *inode;
1948
1949 switch (data->o_arg.claim) {
1950 case NFS4_OPEN_CLAIM_NULL:
1951 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1952 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
1953 if (!(data->f_attr.valid & NFS_ATTR_FATTR))
1954 return ERR_PTR(-EAGAIN);
1955 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh,
1956 &data->f_attr, data->f_label);
1957 break;
1958 default:
1959 inode = d_inode(data->dentry);
1960 ihold(inode);
1961 nfs_refresh_inode(inode, &data->f_attr);
1962 }
1963 return inode;
1964}
1965
1966static struct nfs4_state *
1967nfs4_opendata_find_nfs4_state(struct nfs4_opendata *data)
1968{
1969 struct nfs4_state *state;
1970 struct inode *inode;
1971
1972 inode = nfs4_opendata_get_inode(data);
1973 if (IS_ERR(inode))
1974 return ERR_CAST(inode);
1975 if (data->state != NULL && data->state->inode == inode) {
1976 state = data->state;
David Brazdil0f672f62019-12-10 10:32:29 +00001977 refcount_inc(&state->count);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001978 } else
1979 state = nfs4_get_open_state(inode, data->owner);
1980 iput(inode);
1981 if (state == NULL)
1982 state = ERR_PTR(-ENOMEM);
1983 return state;
1984}
1985
1986static struct nfs4_state *
1987_nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1988{
1989 struct nfs4_state *state;
1990
1991 if (!data->rpc_done) {
1992 state = nfs4_try_open_cached(data);
1993 trace_nfs4_cached_open(data->state);
1994 goto out;
1995 }
1996
1997 state = nfs4_opendata_find_nfs4_state(data);
1998 if (IS_ERR(state))
1999 goto out;
2000
2001 if (data->o_res.delegation_type != 0)
2002 nfs4_opendata_check_deleg(data, state);
David Brazdil0f672f62019-12-10 10:32:29 +00002003 if (!update_open_stateid(state, &data->o_res.stateid,
2004 NULL, data->o_arg.fmode)) {
2005 nfs4_put_open_state(state);
2006 state = ERR_PTR(-EAGAIN);
2007 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002008out:
2009 nfs_release_seqid(data->o_arg.seqid);
2010 return state;
2011}
2012
2013static struct nfs4_state *
2014nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
2015{
2016 struct nfs4_state *ret;
2017
2018 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
2019 ret =_nfs4_opendata_reclaim_to_nfs4_state(data);
2020 else
2021 ret = _nfs4_opendata_to_nfs4_state(data);
2022 nfs4_sequence_free_slot(&data->o_res.seq_res);
2023 return ret;
2024}
2025
David Brazdil0f672f62019-12-10 10:32:29 +00002026static struct nfs_open_context *
2027nfs4_state_find_open_context_mode(struct nfs4_state *state, fmode_t mode)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002028{
2029 struct nfs_inode *nfsi = NFS_I(state->inode);
2030 struct nfs_open_context *ctx;
2031
David Brazdil0f672f62019-12-10 10:32:29 +00002032 rcu_read_lock();
2033 list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002034 if (ctx->state != state)
2035 continue;
David Brazdil0f672f62019-12-10 10:32:29 +00002036 if ((ctx->mode & mode) != mode)
2037 continue;
2038 if (!get_nfs_open_context(ctx))
2039 continue;
2040 rcu_read_unlock();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002041 return ctx;
2042 }
David Brazdil0f672f62019-12-10 10:32:29 +00002043 rcu_read_unlock();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002044 return ERR_PTR(-ENOENT);
2045}
2046
David Brazdil0f672f62019-12-10 10:32:29 +00002047static struct nfs_open_context *
2048nfs4_state_find_open_context(struct nfs4_state *state)
2049{
2050 struct nfs_open_context *ctx;
2051
2052 ctx = nfs4_state_find_open_context_mode(state, FMODE_READ|FMODE_WRITE);
2053 if (!IS_ERR(ctx))
2054 return ctx;
2055 ctx = nfs4_state_find_open_context_mode(state, FMODE_WRITE);
2056 if (!IS_ERR(ctx))
2057 return ctx;
2058 return nfs4_state_find_open_context_mode(state, FMODE_READ);
2059}
2060
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002061static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx,
2062 struct nfs4_state *state, enum open_claim_type4 claim)
2063{
2064 struct nfs4_opendata *opendata;
2065
2066 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0,
2067 NULL, claim, GFP_NOFS);
2068 if (opendata == NULL)
2069 return ERR_PTR(-ENOMEM);
2070 opendata->state = state;
David Brazdil0f672f62019-12-10 10:32:29 +00002071 refcount_inc(&state->count);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002072 return opendata;
2073}
2074
2075static int nfs4_open_recover_helper(struct nfs4_opendata *opendata,
2076 fmode_t fmode)
2077{
2078 struct nfs4_state *newstate;
2079 int ret;
2080
2081 if (!nfs4_mode_match_open_stateid(opendata->state, fmode))
2082 return 0;
2083 opendata->o_arg.open_flags = 0;
2084 opendata->o_arg.fmode = fmode;
2085 opendata->o_arg.share_access = nfs4_map_atomic_open_share(
2086 NFS_SB(opendata->dentry->d_sb),
2087 fmode, 0);
2088 memset(&opendata->o_res, 0, sizeof(opendata->o_res));
2089 memset(&opendata->c_res, 0, sizeof(opendata->c_res));
2090 nfs4_init_opendata_res(opendata);
2091 ret = _nfs4_recover_proc_open(opendata);
2092 if (ret != 0)
2093 return ret;
2094 newstate = nfs4_opendata_to_nfs4_state(opendata);
2095 if (IS_ERR(newstate))
2096 return PTR_ERR(newstate);
2097 if (newstate != opendata->state)
2098 ret = -ESTALE;
2099 nfs4_close_state(newstate, fmode);
2100 return ret;
2101}
2102
2103static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
2104{
2105 int ret;
2106
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002107 /* memory barrier prior to reading state->n_* */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002108 smp_rmb();
2109 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
2110 if (ret != 0)
2111 return ret;
2112 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE);
2113 if (ret != 0)
2114 return ret;
2115 ret = nfs4_open_recover_helper(opendata, FMODE_READ);
2116 if (ret != 0)
2117 return ret;
2118 /*
2119 * We may have performed cached opens for all three recoveries.
2120 * Check if we need to update the current stateid.
2121 */
2122 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
2123 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
2124 write_seqlock(&state->seqlock);
2125 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
2126 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
2127 write_sequnlock(&state->seqlock);
2128 }
2129 return 0;
2130}
2131
2132/*
2133 * OPEN_RECLAIM:
2134 * reclaim state on the server after a reboot.
2135 */
2136static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
2137{
2138 struct nfs_delegation *delegation;
2139 struct nfs4_opendata *opendata;
2140 fmode_t delegation_type = 0;
2141 int status;
2142
2143 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2144 NFS4_OPEN_CLAIM_PREVIOUS);
2145 if (IS_ERR(opendata))
2146 return PTR_ERR(opendata);
2147 rcu_read_lock();
2148 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2149 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
2150 delegation_type = delegation->type;
2151 rcu_read_unlock();
2152 opendata->o_arg.u.delegation_type = delegation_type;
2153 status = nfs4_open_recover(opendata, state);
2154 nfs4_opendata_put(opendata);
2155 return status;
2156}
2157
2158static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
2159{
2160 struct nfs_server *server = NFS_SERVER(state->inode);
2161 struct nfs4_exception exception = { };
2162 int err;
2163 do {
2164 err = _nfs4_do_open_reclaim(ctx, state);
2165 trace_nfs4_open_reclaim(ctx, 0, err);
2166 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2167 continue;
2168 if (err != -NFS4ERR_DELAY)
2169 break;
2170 nfs4_handle_exception(server, err, &exception);
2171 } while (exception.retry);
2172 return err;
2173}
2174
2175static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
2176{
2177 struct nfs_open_context *ctx;
2178 int ret;
2179
2180 ctx = nfs4_state_find_open_context(state);
2181 if (IS_ERR(ctx))
2182 return -EAGAIN;
David Brazdil0f672f62019-12-10 10:32:29 +00002183 clear_bit(NFS_DELEGATED_STATE, &state->flags);
2184 nfs_state_clear_open_state_flags(state);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002185 ret = nfs4_do_open_reclaim(ctx, state);
2186 put_nfs_open_context(ctx);
2187 return ret;
2188}
2189
2190static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err)
2191{
2192 switch (err) {
2193 default:
2194 printk(KERN_ERR "NFS: %s: unhandled error "
2195 "%d.\n", __func__, err);
2196 case 0:
2197 case -ENOENT:
2198 case -EAGAIN:
2199 case -ESTALE:
David Brazdil0f672f62019-12-10 10:32:29 +00002200 case -ETIMEDOUT:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002201 break;
2202 case -NFS4ERR_BADSESSION:
2203 case -NFS4ERR_BADSLOT:
2204 case -NFS4ERR_BAD_HIGH_SLOT:
2205 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
2206 case -NFS4ERR_DEADSESSION:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002207 return -EAGAIN;
2208 case -NFS4ERR_STALE_CLIENTID:
2209 case -NFS4ERR_STALE_STATEID:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002210 /* Don't recall a delegation if it was lost */
2211 nfs4_schedule_lease_recovery(server->nfs_client);
2212 return -EAGAIN;
2213 case -NFS4ERR_MOVED:
2214 nfs4_schedule_migration_recovery(server);
2215 return -EAGAIN;
2216 case -NFS4ERR_LEASE_MOVED:
2217 nfs4_schedule_lease_moved_recovery(server->nfs_client);
2218 return -EAGAIN;
2219 case -NFS4ERR_DELEG_REVOKED:
2220 case -NFS4ERR_ADMIN_REVOKED:
2221 case -NFS4ERR_EXPIRED:
2222 case -NFS4ERR_BAD_STATEID:
2223 case -NFS4ERR_OPENMODE:
2224 nfs_inode_find_state_and_recover(state->inode,
2225 stateid);
2226 nfs4_schedule_stateid_recovery(server, state);
2227 return -EAGAIN;
2228 case -NFS4ERR_DELAY:
2229 case -NFS4ERR_GRACE:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002230 ssleep(1);
2231 return -EAGAIN;
2232 case -ENOMEM:
2233 case -NFS4ERR_DENIED:
2234 if (fl) {
2235 struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner;
2236 if (lsp)
2237 set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
2238 }
2239 return 0;
2240 }
2241 return err;
2242}
2243
2244int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
David Brazdil0f672f62019-12-10 10:32:29 +00002245 struct nfs4_state *state, const nfs4_stateid *stateid)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002246{
2247 struct nfs_server *server = NFS_SERVER(state->inode);
2248 struct nfs4_opendata *opendata;
2249 int err = 0;
2250
2251 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2252 NFS4_OPEN_CLAIM_DELEG_CUR_FH);
2253 if (IS_ERR(opendata))
2254 return PTR_ERR(opendata);
2255 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
David Brazdil0f672f62019-12-10 10:32:29 +00002256 if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002257 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
2258 if (err)
David Brazdil0f672f62019-12-10 10:32:29 +00002259 goto out;
2260 }
2261 if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002262 err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
2263 if (err)
David Brazdil0f672f62019-12-10 10:32:29 +00002264 goto out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002265 }
David Brazdil0f672f62019-12-10 10:32:29 +00002266 if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) {
2267 err = nfs4_open_recover_helper(opendata, FMODE_READ);
2268 if (err)
2269 goto out;
2270 }
2271 nfs_state_clear_delegation(state);
2272out:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002273 nfs4_opendata_put(opendata);
2274 return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err);
2275}
2276
2277static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
2278{
2279 struct nfs4_opendata *data = calldata;
2280
2281 nfs4_setup_sequence(data->o_arg.server->nfs_client,
2282 &data->c_arg.seq_args, &data->c_res.seq_res, task);
2283}
2284
2285static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
2286{
2287 struct nfs4_opendata *data = calldata;
2288
2289 nfs40_sequence_done(task, &data->c_res.seq_res);
2290
2291 data->rpc_status = task->tk_status;
2292 if (data->rpc_status == 0) {
2293 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
2294 nfs_confirm_seqid(&data->owner->so_seqid, 0);
2295 renew_lease(data->o_res.server, data->timestamp);
2296 data->rpc_done = true;
2297 }
2298}
2299
2300static void nfs4_open_confirm_release(void *calldata)
2301{
2302 struct nfs4_opendata *data = calldata;
2303 struct nfs4_state *state = NULL;
2304
2305 /* If this request hasn't been cancelled, do nothing */
2306 if (!data->cancelled)
2307 goto out_free;
2308 /* In case of error, no cleanup! */
2309 if (!data->rpc_done)
2310 goto out_free;
2311 state = nfs4_opendata_to_nfs4_state(data);
2312 if (!IS_ERR(state))
2313 nfs4_close_state(state, data->o_arg.fmode);
2314out_free:
2315 nfs4_opendata_put(data);
2316}
2317
2318static const struct rpc_call_ops nfs4_open_confirm_ops = {
2319 .rpc_call_prepare = nfs4_open_confirm_prepare,
2320 .rpc_call_done = nfs4_open_confirm_done,
2321 .rpc_release = nfs4_open_confirm_release,
2322};
2323
2324/*
2325 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
2326 */
2327static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
2328{
2329 struct nfs_server *server = NFS_SERVER(d_inode(data->dir));
2330 struct rpc_task *task;
2331 struct rpc_message msg = {
2332 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
2333 .rpc_argp = &data->c_arg,
2334 .rpc_resp = &data->c_res,
2335 .rpc_cred = data->owner->so_cred,
2336 };
2337 struct rpc_task_setup task_setup_data = {
2338 .rpc_client = server->client,
2339 .rpc_message = &msg,
2340 .callback_ops = &nfs4_open_confirm_ops,
2341 .callback_data = data,
2342 .workqueue = nfsiod_workqueue,
2343 .flags = RPC_TASK_ASYNC,
2344 };
2345 int status;
2346
2347 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1,
2348 data->is_recover);
2349 kref_get(&data->kref);
2350 data->rpc_done = false;
2351 data->rpc_status = 0;
2352 data->timestamp = jiffies;
2353 task = rpc_run_task(&task_setup_data);
2354 if (IS_ERR(task))
2355 return PTR_ERR(task);
2356 status = rpc_wait_for_completion_task(task);
2357 if (status != 0) {
2358 data->cancelled = true;
2359 smp_wmb();
2360 } else
2361 status = data->rpc_status;
2362 rpc_put_task(task);
2363 return status;
2364}
2365
2366static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
2367{
2368 struct nfs4_opendata *data = calldata;
2369 struct nfs4_state_owner *sp = data->owner;
2370 struct nfs_client *clp = sp->so_server->nfs_client;
2371 enum open_claim_type4 claim = data->o_arg.claim;
2372
2373 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
2374 goto out_wait;
2375 /*
2376 * Check if we still need to send an OPEN call, or if we can use
2377 * a delegation instead.
2378 */
2379 if (data->state != NULL) {
2380 struct nfs_delegation *delegation;
2381
David Brazdil0f672f62019-12-10 10:32:29 +00002382 if (can_open_cached(data->state, data->o_arg.fmode,
2383 data->o_arg.open_flags, claim))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002384 goto out_no_action;
2385 rcu_read_lock();
David Brazdil0f672f62019-12-10 10:32:29 +00002386 delegation = nfs4_get_valid_delegation(data->state->inode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002387 if (can_open_delegated(delegation, data->o_arg.fmode, claim))
2388 goto unlock_no_action;
2389 rcu_read_unlock();
2390 }
2391 /* Update client id. */
2392 data->o_arg.clientid = clp->cl_clientid;
2393 switch (claim) {
2394 default:
2395 break;
2396 case NFS4_OPEN_CLAIM_PREVIOUS:
2397 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
2398 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
2399 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
2400 /* Fall through */
2401 case NFS4_OPEN_CLAIM_FH:
2402 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
2403 }
2404 data->timestamp = jiffies;
2405 if (nfs4_setup_sequence(data->o_arg.server->nfs_client,
2406 &data->o_arg.seq_args,
2407 &data->o_res.seq_res,
2408 task) != 0)
2409 nfs_release_seqid(data->o_arg.seqid);
2410
2411 /* Set the create mode (note dependency on the session type) */
2412 data->o_arg.createmode = NFS4_CREATE_UNCHECKED;
2413 if (data->o_arg.open_flags & O_EXCL) {
2414 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE;
2415 if (nfs4_has_persistent_session(clp))
2416 data->o_arg.createmode = NFS4_CREATE_GUARDED;
2417 else if (clp->cl_mvops->minor_version > 0)
2418 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1;
2419 }
2420 return;
2421unlock_no_action:
2422 trace_nfs4_cached_open(data->state);
2423 rcu_read_unlock();
2424out_no_action:
2425 task->tk_action = NULL;
2426out_wait:
2427 nfs4_sequence_done(task, &data->o_res.seq_res);
2428}
2429
2430static void nfs4_open_done(struct rpc_task *task, void *calldata)
2431{
2432 struct nfs4_opendata *data = calldata;
2433
2434 data->rpc_status = task->tk_status;
2435
2436 if (!nfs4_sequence_process(task, &data->o_res.seq_res))
2437 return;
2438
2439 if (task->tk_status == 0) {
2440 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) {
2441 switch (data->o_res.f_attr->mode & S_IFMT) {
2442 case S_IFREG:
2443 break;
2444 case S_IFLNK:
2445 data->rpc_status = -ELOOP;
2446 break;
2447 case S_IFDIR:
2448 data->rpc_status = -EISDIR;
2449 break;
2450 default:
2451 data->rpc_status = -ENOTDIR;
2452 }
2453 }
2454 renew_lease(data->o_res.server, data->timestamp);
2455 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
2456 nfs_confirm_seqid(&data->owner->so_seqid, 0);
2457 }
2458 data->rpc_done = true;
2459}
2460
2461static void nfs4_open_release(void *calldata)
2462{
2463 struct nfs4_opendata *data = calldata;
2464 struct nfs4_state *state = NULL;
2465
2466 /* If this request hasn't been cancelled, do nothing */
2467 if (!data->cancelled)
2468 goto out_free;
2469 /* In case of error, no cleanup! */
2470 if (data->rpc_status != 0 || !data->rpc_done)
2471 goto out_free;
2472 /* In case we need an open_confirm, no cleanup! */
2473 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
2474 goto out_free;
2475 state = nfs4_opendata_to_nfs4_state(data);
2476 if (!IS_ERR(state))
2477 nfs4_close_state(state, data->o_arg.fmode);
2478out_free:
2479 nfs4_opendata_put(data);
2480}
2481
2482static const struct rpc_call_ops nfs4_open_ops = {
2483 .rpc_call_prepare = nfs4_open_prepare,
2484 .rpc_call_done = nfs4_open_done,
2485 .rpc_release = nfs4_open_release,
2486};
2487
2488static int nfs4_run_open_task(struct nfs4_opendata *data,
2489 struct nfs_open_context *ctx)
2490{
2491 struct inode *dir = d_inode(data->dir);
2492 struct nfs_server *server = NFS_SERVER(dir);
2493 struct nfs_openargs *o_arg = &data->o_arg;
2494 struct nfs_openres *o_res = &data->o_res;
2495 struct rpc_task *task;
2496 struct rpc_message msg = {
2497 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
2498 .rpc_argp = o_arg,
2499 .rpc_resp = o_res,
2500 .rpc_cred = data->owner->so_cred,
2501 };
2502 struct rpc_task_setup task_setup_data = {
2503 .rpc_client = server->client,
2504 .rpc_message = &msg,
2505 .callback_ops = &nfs4_open_ops,
2506 .callback_data = data,
2507 .workqueue = nfsiod_workqueue,
2508 .flags = RPC_TASK_ASYNC,
2509 };
2510 int status;
2511
2512 kref_get(&data->kref);
2513 data->rpc_done = false;
2514 data->rpc_status = 0;
2515 data->cancelled = false;
2516 data->is_recover = false;
2517 if (!ctx) {
2518 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 1);
2519 data->is_recover = true;
David Brazdil0f672f62019-12-10 10:32:29 +00002520 task_setup_data.flags |= RPC_TASK_TIMEOUT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002521 } else {
2522 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 0);
2523 pnfs_lgopen_prepare(data, ctx);
2524 }
2525 task = rpc_run_task(&task_setup_data);
2526 if (IS_ERR(task))
2527 return PTR_ERR(task);
2528 status = rpc_wait_for_completion_task(task);
2529 if (status != 0) {
2530 data->cancelled = true;
2531 smp_wmb();
2532 } else
2533 status = data->rpc_status;
2534 rpc_put_task(task);
2535
2536 return status;
2537}
2538
2539static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
2540{
2541 struct inode *dir = d_inode(data->dir);
2542 struct nfs_openres *o_res = &data->o_res;
2543 int status;
2544
2545 status = nfs4_run_open_task(data, NULL);
2546 if (status != 0 || !data->rpc_done)
2547 return status;
2548
2549 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
2550
2551 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM)
2552 status = _nfs4_proc_open_confirm(data);
2553
2554 return status;
2555}
2556
2557/*
2558 * Additional permission checks in order to distinguish between an
2559 * open for read, and an open for execute. This works around the
2560 * fact that NFSv4 OPEN treats read and execute permissions as being
2561 * the same.
2562 * Note that in the non-execute case, we want to turn off permission
2563 * checking if we just created a new file (POSIX open() semantics).
2564 */
David Brazdil0f672f62019-12-10 10:32:29 +00002565static int nfs4_opendata_access(const struct cred *cred,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002566 struct nfs4_opendata *opendata,
2567 struct nfs4_state *state, fmode_t fmode,
2568 int openflags)
2569{
2570 struct nfs_access_entry cache;
2571 u32 mask, flags;
2572
2573 /* access call failed or for some reason the server doesn't
2574 * support any access modes -- defer access call until later */
2575 if (opendata->o_res.access_supported == 0)
2576 return 0;
2577
2578 mask = 0;
2579 /*
2580 * Use openflags to check for exec, because fmode won't
2581 * always have FMODE_EXEC set when file open for exec.
2582 */
2583 if (openflags & __FMODE_EXEC) {
2584 /* ONLY check for exec rights */
2585 if (S_ISDIR(state->inode->i_mode))
2586 mask = NFS4_ACCESS_LOOKUP;
2587 else
2588 mask = NFS4_ACCESS_EXECUTE;
2589 } else if ((fmode & FMODE_READ) && !opendata->file_created)
2590 mask = NFS4_ACCESS_READ;
2591
2592 cache.cred = cred;
2593 nfs_access_set_mask(&cache, opendata->o_res.access_result);
2594 nfs_access_add_cache(state->inode, &cache);
2595
2596 flags = NFS4_ACCESS_READ | NFS4_ACCESS_EXECUTE | NFS4_ACCESS_LOOKUP;
2597 if ((mask & ~cache.mask & flags) == 0)
2598 return 0;
2599
2600 return -EACCES;
2601}
2602
2603/*
2604 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
2605 */
2606static int _nfs4_proc_open(struct nfs4_opendata *data,
2607 struct nfs_open_context *ctx)
2608{
2609 struct inode *dir = d_inode(data->dir);
2610 struct nfs_server *server = NFS_SERVER(dir);
2611 struct nfs_openargs *o_arg = &data->o_arg;
2612 struct nfs_openres *o_res = &data->o_res;
2613 int status;
2614
2615 status = nfs4_run_open_task(data, ctx);
2616 if (!data->rpc_done)
2617 return status;
2618 if (status != 0) {
2619 if (status == -NFS4ERR_BADNAME &&
2620 !(o_arg->open_flags & O_CREAT))
2621 return -ENOENT;
2622 return status;
2623 }
2624
2625 nfs_fattr_map_and_free_names(server, &data->f_attr);
2626
2627 if (o_arg->open_flags & O_CREAT) {
2628 if (o_arg->open_flags & O_EXCL)
2629 data->file_created = true;
2630 else if (o_res->cinfo.before != o_res->cinfo.after)
2631 data->file_created = true;
2632 if (data->file_created ||
2633 inode_peek_iversion_raw(dir) != o_res->cinfo.after)
2634 update_changeattr(dir, &o_res->cinfo,
2635 o_res->f_attr->time_start, 0);
2636 }
2637 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
2638 server->caps &= ~NFS_CAP_POSIX_LOCK;
2639 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2640 status = _nfs4_proc_open_confirm(data);
2641 if (status != 0)
2642 return status;
2643 }
2644 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) {
2645 nfs4_sequence_free_slot(&o_res->seq_res);
2646 nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr,
2647 o_res->f_label, NULL);
2648 }
2649 return 0;
2650}
2651
2652/*
2653 * OPEN_EXPIRED:
2654 * reclaim state on the server after a network partition.
2655 * Assumes caller holds the appropriate lock
2656 */
2657static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2658{
2659 struct nfs4_opendata *opendata;
2660 int ret;
2661
2662 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2663 NFS4_OPEN_CLAIM_FH);
2664 if (IS_ERR(opendata))
2665 return PTR_ERR(opendata);
2666 ret = nfs4_open_recover(opendata, state);
2667 if (ret == -ESTALE)
2668 d_drop(ctx->dentry);
2669 nfs4_opendata_put(opendata);
2670 return ret;
2671}
2672
2673static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2674{
2675 struct nfs_server *server = NFS_SERVER(state->inode);
2676 struct nfs4_exception exception = { };
2677 int err;
2678
2679 do {
2680 err = _nfs4_open_expired(ctx, state);
2681 trace_nfs4_open_expired(ctx, 0, err);
2682 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2683 continue;
2684 switch (err) {
2685 default:
2686 goto out;
2687 case -NFS4ERR_GRACE:
2688 case -NFS4ERR_DELAY:
2689 nfs4_handle_exception(server, err, &exception);
2690 err = 0;
2691 }
2692 } while (exception.retry);
2693out:
2694 return err;
2695}
2696
2697static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2698{
2699 struct nfs_open_context *ctx;
2700 int ret;
2701
2702 ctx = nfs4_state_find_open_context(state);
2703 if (IS_ERR(ctx))
2704 return -EAGAIN;
2705 ret = nfs4_do_open_expired(ctx, state);
2706 put_nfs_open_context(ctx);
2707 return ret;
2708}
2709
2710static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state,
2711 const nfs4_stateid *stateid)
2712{
2713 nfs_remove_bad_delegation(state->inode, stateid);
2714 nfs_state_clear_delegation(state);
2715}
2716
2717static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
2718{
2719 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
2720 nfs_finish_clear_delegation_stateid(state, NULL);
2721}
2722
2723static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2724{
2725 /* NFSv4.0 doesn't allow for delegation recovery on open expire */
2726 nfs40_clear_delegation_stateid(state);
David Brazdil0f672f62019-12-10 10:32:29 +00002727 nfs_state_clear_open_state_flags(state);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002728 return nfs4_open_expired(sp, state);
2729}
2730
2731static int nfs40_test_and_free_expired_stateid(struct nfs_server *server,
2732 nfs4_stateid *stateid,
David Brazdil0f672f62019-12-10 10:32:29 +00002733 const struct cred *cred)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002734{
2735 return -NFS4ERR_BAD_STATEID;
2736}
2737
2738#if defined(CONFIG_NFS_V4_1)
2739static int nfs41_test_and_free_expired_stateid(struct nfs_server *server,
2740 nfs4_stateid *stateid,
David Brazdil0f672f62019-12-10 10:32:29 +00002741 const struct cred *cred)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002742{
2743 int status;
2744
2745 switch (stateid->type) {
2746 default:
2747 break;
2748 case NFS4_INVALID_STATEID_TYPE:
2749 case NFS4_SPECIAL_STATEID_TYPE:
2750 return -NFS4ERR_BAD_STATEID;
2751 case NFS4_REVOKED_STATEID_TYPE:
2752 goto out_free;
2753 }
2754
2755 status = nfs41_test_stateid(server, stateid, cred);
2756 switch (status) {
2757 case -NFS4ERR_EXPIRED:
2758 case -NFS4ERR_ADMIN_REVOKED:
2759 case -NFS4ERR_DELEG_REVOKED:
2760 break;
2761 default:
2762 return status;
2763 }
2764out_free:
2765 /* Ack the revoked state to the server */
2766 nfs41_free_stateid(server, stateid, cred, true);
2767 return -NFS4ERR_EXPIRED;
2768}
2769
David Brazdil0f672f62019-12-10 10:32:29 +00002770static int nfs41_check_delegation_stateid(struct nfs4_state *state)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002771{
2772 struct nfs_server *server = NFS_SERVER(state->inode);
2773 nfs4_stateid stateid;
2774 struct nfs_delegation *delegation;
David Brazdil0f672f62019-12-10 10:32:29 +00002775 const struct cred *cred = NULL;
2776 int status, ret = NFS_OK;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002777
2778 /* Get the delegation credential for use by test/free_stateid */
2779 rcu_read_lock();
2780 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2781 if (delegation == NULL) {
2782 rcu_read_unlock();
2783 nfs_state_clear_delegation(state);
David Brazdil0f672f62019-12-10 10:32:29 +00002784 return NFS_OK;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002785 }
2786
2787 nfs4_stateid_copy(&stateid, &delegation->stateid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002788
2789 if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
2790 &delegation->flags)) {
2791 rcu_read_unlock();
David Brazdil0f672f62019-12-10 10:32:29 +00002792 return NFS_OK;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002793 }
2794
David Brazdil0f672f62019-12-10 10:32:29 +00002795 if (delegation->cred)
2796 cred = get_cred(delegation->cred);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002797 rcu_read_unlock();
2798 status = nfs41_test_and_free_expired_stateid(server, &stateid, cred);
2799 trace_nfs4_test_delegation_stateid(state, NULL, status);
2800 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID)
2801 nfs_finish_clear_delegation_stateid(state, &stateid);
David Brazdil0f672f62019-12-10 10:32:29 +00002802 else
2803 ret = status;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002804
David Brazdil0f672f62019-12-10 10:32:29 +00002805 put_cred(cred);
2806 return ret;
2807}
2808
2809static void nfs41_delegation_recover_stateid(struct nfs4_state *state)
2810{
2811 nfs4_stateid tmp;
2812
2813 if (test_bit(NFS_DELEGATED_STATE, &state->flags) &&
2814 nfs4_copy_delegation_stateid(state->inode, state->state,
2815 &tmp, NULL) &&
2816 nfs4_stateid_match_other(&state->stateid, &tmp))
2817 nfs_state_set_delegation(state, &tmp, state->state);
2818 else
2819 nfs_state_clear_delegation(state);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002820}
2821
2822/**
2823 * nfs41_check_expired_locks - possibly free a lock stateid
2824 *
2825 * @state: NFSv4 state for an inode
2826 *
2827 * Returns NFS_OK if recovery for this stateid is now finished.
2828 * Otherwise a negative NFS4ERR value is returned.
2829 */
2830static int nfs41_check_expired_locks(struct nfs4_state *state)
2831{
2832 int status, ret = NFS_OK;
2833 struct nfs4_lock_state *lsp, *prev = NULL;
2834 struct nfs_server *server = NFS_SERVER(state->inode);
2835
2836 if (!test_bit(LK_STATE_IN_USE, &state->flags))
2837 goto out;
2838
2839 spin_lock(&state->state_lock);
2840 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
2841 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
David Brazdil0f672f62019-12-10 10:32:29 +00002842 const struct cred *cred = lsp->ls_state->owner->so_cred;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002843
2844 refcount_inc(&lsp->ls_count);
2845 spin_unlock(&state->state_lock);
2846
2847 nfs4_put_lock_state(prev);
2848 prev = lsp;
2849
2850 status = nfs41_test_and_free_expired_stateid(server,
2851 &lsp->ls_stateid,
2852 cred);
2853 trace_nfs4_test_lock_stateid(state, lsp, status);
2854 if (status == -NFS4ERR_EXPIRED ||
2855 status == -NFS4ERR_BAD_STATEID) {
2856 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
2857 lsp->ls_stateid.type = NFS4_INVALID_STATEID_TYPE;
2858 if (!recover_lost_locks)
2859 set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
2860 } else if (status != NFS_OK) {
2861 ret = status;
2862 nfs4_put_lock_state(prev);
2863 goto out;
2864 }
2865 spin_lock(&state->state_lock);
2866 }
2867 }
2868 spin_unlock(&state->state_lock);
2869 nfs4_put_lock_state(prev);
2870out:
2871 return ret;
2872}
2873
2874/**
2875 * nfs41_check_open_stateid - possibly free an open stateid
2876 *
2877 * @state: NFSv4 state for an inode
2878 *
2879 * Returns NFS_OK if recovery for this stateid is now finished.
2880 * Otherwise a negative NFS4ERR value is returned.
2881 */
2882static int nfs41_check_open_stateid(struct nfs4_state *state)
2883{
2884 struct nfs_server *server = NFS_SERVER(state->inode);
2885 nfs4_stateid *stateid = &state->open_stateid;
David Brazdil0f672f62019-12-10 10:32:29 +00002886 const struct cred *cred = state->owner->so_cred;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002887 int status;
2888
David Brazdil0f672f62019-12-10 10:32:29 +00002889 if (test_bit(NFS_OPEN_STATE, &state->flags) == 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002890 return -NFS4ERR_BAD_STATEID;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002891 status = nfs41_test_and_free_expired_stateid(server, stateid, cred);
2892 trace_nfs4_test_open_stateid(state, NULL, status);
2893 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) {
David Brazdil0f672f62019-12-10 10:32:29 +00002894 nfs_state_clear_open_state_flags(state);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002895 stateid->type = NFS4_INVALID_STATEID_TYPE;
2896 return status;
2897 }
2898 if (nfs_open_stateid_recover_openmode(state))
2899 return -NFS4ERR_OPENMODE;
2900 return NFS_OK;
2901}
2902
2903static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2904{
2905 int status;
2906
David Brazdil0f672f62019-12-10 10:32:29 +00002907 status = nfs41_check_delegation_stateid(state);
2908 if (status != NFS_OK)
2909 return status;
2910 nfs41_delegation_recover_stateid(state);
2911
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002912 status = nfs41_check_expired_locks(state);
2913 if (status != NFS_OK)
2914 return status;
2915 status = nfs41_check_open_stateid(state);
2916 if (status != NFS_OK)
2917 status = nfs4_open_expired(sp, state);
2918 return status;
2919}
2920#endif
2921
2922/*
2923 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
2924 * fields corresponding to attributes that were used to store the verifier.
2925 * Make sure we clobber those fields in the later setattr call
2926 */
2927static unsigned nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
2928 struct iattr *sattr, struct nfs4_label **label)
2929{
2930 const __u32 *bitmask = opendata->o_arg.server->exclcreat_bitmask;
2931 __u32 attrset[3];
2932 unsigned ret;
2933 unsigned i;
2934
2935 for (i = 0; i < ARRAY_SIZE(attrset); i++) {
2936 attrset[i] = opendata->o_res.attrset[i];
2937 if (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE4_1)
2938 attrset[i] &= ~bitmask[i];
2939 }
2940
2941 ret = (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE) ?
2942 sattr->ia_valid : 0;
2943
2944 if ((attrset[1] & (FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET))) {
2945 if (sattr->ia_valid & ATTR_ATIME_SET)
2946 ret |= ATTR_ATIME_SET;
2947 else
2948 ret |= ATTR_ATIME;
2949 }
2950
2951 if ((attrset[1] & (FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET))) {
2952 if (sattr->ia_valid & ATTR_MTIME_SET)
2953 ret |= ATTR_MTIME_SET;
2954 else
2955 ret |= ATTR_MTIME;
2956 }
2957
2958 if (!(attrset[2] & FATTR4_WORD2_SECURITY_LABEL))
2959 *label = NULL;
2960 return ret;
2961}
2962
2963static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
David Brazdil0f672f62019-12-10 10:32:29 +00002964 int flags, struct nfs_open_context *ctx)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002965{
2966 struct nfs4_state_owner *sp = opendata->owner;
2967 struct nfs_server *server = sp->so_server;
2968 struct dentry *dentry;
2969 struct nfs4_state *state;
David Brazdil0f672f62019-12-10 10:32:29 +00002970 fmode_t acc_mode = _nfs4_ctx_to_accessmode(ctx);
Olivier Deprez0e641232021-09-23 10:07:05 +02002971 struct inode *dir = d_inode(opendata->dir);
2972 unsigned long dir_verifier;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002973 unsigned int seq;
2974 int ret;
2975
2976 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
Olivier Deprez0e641232021-09-23 10:07:05 +02002977 dir_verifier = nfs_save_change_attribute(dir);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002978
2979 ret = _nfs4_proc_open(opendata, ctx);
2980 if (ret != 0)
2981 goto out;
2982
2983 state = _nfs4_opendata_to_nfs4_state(opendata);
2984 ret = PTR_ERR(state);
2985 if (IS_ERR(state))
2986 goto out;
2987 ctx->state = state;
2988 if (server->caps & NFS_CAP_POSIX_LOCK)
2989 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
2990 if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK)
2991 set_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags);
2992
2993 dentry = opendata->dentry;
2994 if (d_really_is_negative(dentry)) {
2995 struct dentry *alias;
2996 d_drop(dentry);
2997 alias = d_exact_alias(dentry, state->inode);
2998 if (!alias)
2999 alias = d_splice_alias(igrab(state->inode), dentry);
3000 /* d_splice_alias() can't fail here - it's a non-directory */
3001 if (alias) {
3002 dput(ctx->dentry);
3003 ctx->dentry = dentry = alias;
3004 }
Olivier Deprez0e641232021-09-23 10:07:05 +02003005 }
3006
3007 switch(opendata->o_arg.claim) {
3008 default:
3009 break;
3010 case NFS4_OPEN_CLAIM_NULL:
3011 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
3012 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
3013 if (!opendata->rpc_done)
3014 break;
3015 if (opendata->o_res.delegation_type != 0)
3016 dir_verifier = nfs_save_change_attribute(dir);
3017 nfs_set_verifier(dentry, dir_verifier);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003018 }
3019
3020 /* Parse layoutget results before we check for access */
3021 pnfs_parse_lgopen(state->inode, opendata->lgp, ctx);
3022
David Brazdil0f672f62019-12-10 10:32:29 +00003023 ret = nfs4_opendata_access(sp->so_cred, opendata, state,
3024 acc_mode, flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003025 if (ret != 0)
3026 goto out;
3027
3028 if (d_inode(dentry) == state->inode) {
3029 nfs_inode_attach_open_context(ctx);
3030 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
3031 nfs4_schedule_stateid_recovery(server, state);
3032 }
3033
3034out:
David Brazdil0f672f62019-12-10 10:32:29 +00003035 if (!opendata->cancelled)
3036 nfs4_sequence_free_slot(&opendata->o_res.seq_res);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003037 return ret;
3038}
3039
3040/*
3041 * Returns a referenced nfs4_state
3042 */
3043static int _nfs4_do_open(struct inode *dir,
3044 struct nfs_open_context *ctx,
3045 int flags,
3046 const struct nfs4_open_createattrs *c,
3047 int *opened)
3048{
3049 struct nfs4_state_owner *sp;
3050 struct nfs4_state *state = NULL;
3051 struct nfs_server *server = NFS_SERVER(dir);
3052 struct nfs4_opendata *opendata;
3053 struct dentry *dentry = ctx->dentry;
David Brazdil0f672f62019-12-10 10:32:29 +00003054 const struct cred *cred = ctx->cred;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003055 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold;
David Brazdil0f672f62019-12-10 10:32:29 +00003056 fmode_t fmode = _nfs4_ctx_to_openmode(ctx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003057 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL;
3058 struct iattr *sattr = c->sattr;
3059 struct nfs4_label *label = c->label;
3060 struct nfs4_label *olabel = NULL;
3061 int status;
3062
3063 /* Protect against reboot recovery conflicts */
3064 status = -ENOMEM;
3065 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
3066 if (sp == NULL) {
3067 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
3068 goto out_err;
3069 }
3070 status = nfs4_client_recover_expired_lease(server->nfs_client);
3071 if (status != 0)
3072 goto err_put_state_owner;
3073 if (d_really_is_positive(dentry))
3074 nfs4_return_incompatible_delegation(d_inode(dentry), fmode);
3075 status = -ENOMEM;
3076 if (d_really_is_positive(dentry))
3077 claim = NFS4_OPEN_CLAIM_FH;
3078 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags,
3079 c, claim, GFP_KERNEL);
3080 if (opendata == NULL)
3081 goto err_put_state_owner;
3082
3083 if (label) {
3084 olabel = nfs4_label_alloc(server, GFP_KERNEL);
3085 if (IS_ERR(olabel)) {
3086 status = PTR_ERR(olabel);
3087 goto err_opendata_put;
3088 }
3089 }
3090
3091 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
3092 if (!opendata->f_attr.mdsthreshold) {
3093 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
3094 if (!opendata->f_attr.mdsthreshold)
3095 goto err_free_label;
3096 }
3097 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
3098 }
3099 if (d_really_is_positive(dentry))
3100 opendata->state = nfs4_get_open_state(d_inode(dentry), sp);
3101
David Brazdil0f672f62019-12-10 10:32:29 +00003102 status = _nfs4_open_and_get_state(opendata, flags, ctx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003103 if (status != 0)
3104 goto err_free_label;
3105 state = ctx->state;
3106
3107 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) &&
3108 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
3109 unsigned attrs = nfs4_exclusive_attrset(opendata, sattr, &label);
3110 /*
3111 * send create attributes which was not set by open
3112 * with an extra setattr.
3113 */
3114 if (attrs || label) {
3115 unsigned ia_old = sattr->ia_valid;
3116
3117 sattr->ia_valid = attrs;
3118 nfs_fattr_init(opendata->o_res.f_attr);
3119 status = nfs4_do_setattr(state->inode, cred,
3120 opendata->o_res.f_attr, sattr,
3121 ctx, label, olabel);
3122 if (status == 0) {
3123 nfs_setattr_update_inode(state->inode, sattr,
3124 opendata->o_res.f_attr);
3125 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
3126 }
3127 sattr->ia_valid = ia_old;
3128 }
3129 }
3130 if (opened && opendata->file_created)
3131 *opened = 1;
3132
3133 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
3134 *ctx_th = opendata->f_attr.mdsthreshold;
3135 opendata->f_attr.mdsthreshold = NULL;
3136 }
3137
3138 nfs4_label_free(olabel);
3139
3140 nfs4_opendata_put(opendata);
3141 nfs4_put_state_owner(sp);
3142 return 0;
3143err_free_label:
3144 nfs4_label_free(olabel);
3145err_opendata_put:
3146 nfs4_opendata_put(opendata);
3147err_put_state_owner:
3148 nfs4_put_state_owner(sp);
3149out_err:
3150 return status;
3151}
3152
3153
3154static struct nfs4_state *nfs4_do_open(struct inode *dir,
3155 struct nfs_open_context *ctx,
3156 int flags,
3157 struct iattr *sattr,
3158 struct nfs4_label *label,
3159 int *opened)
3160{
3161 struct nfs_server *server = NFS_SERVER(dir);
David Brazdil0f672f62019-12-10 10:32:29 +00003162 struct nfs4_exception exception = {
3163 .interruptible = true,
3164 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003165 struct nfs4_state *res;
3166 struct nfs4_open_createattrs c = {
3167 .label = label,
3168 .sattr = sattr,
3169 .verf = {
3170 [0] = (__u32)jiffies,
3171 [1] = (__u32)current->pid,
3172 },
3173 };
3174 int status;
3175
3176 do {
3177 status = _nfs4_do_open(dir, ctx, flags, &c, opened);
3178 res = ctx->state;
3179 trace_nfs4_open_file(ctx, flags, status);
3180 if (status == 0)
3181 break;
3182 /* NOTE: BAD_SEQID means the server and client disagree about the
3183 * book-keeping w.r.t. state-changing operations
3184 * (OPEN/CLOSE/LOCK/LOCKU...)
3185 * It is actually a sign of a bug on the client or on the server.
3186 *
3187 * If we receive a BAD_SEQID error in the particular case of
3188 * doing an OPEN, we assume that nfs_increment_open_seqid() will
3189 * have unhashed the old state_owner for us, and that we can
3190 * therefore safely retry using a new one. We should still warn
3191 * the user though...
3192 */
3193 if (status == -NFS4ERR_BAD_SEQID) {
3194 pr_warn_ratelimited("NFS: v4 server %s "
3195 " returned a bad sequence-id error!\n",
3196 NFS_SERVER(dir)->nfs_client->cl_hostname);
3197 exception.retry = 1;
3198 continue;
3199 }
3200 /*
3201 * BAD_STATEID on OPEN means that the server cancelled our
3202 * state before it received the OPEN_CONFIRM.
3203 * Recover by retrying the request as per the discussion
3204 * on Page 181 of RFC3530.
3205 */
3206 if (status == -NFS4ERR_BAD_STATEID) {
3207 exception.retry = 1;
3208 continue;
3209 }
Olivier Deprez0e641232021-09-23 10:07:05 +02003210 if (status == -NFS4ERR_EXPIRED) {
3211 nfs4_schedule_lease_recovery(server->nfs_client);
3212 exception.retry = 1;
3213 continue;
3214 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003215 if (status == -EAGAIN) {
3216 /* We must have found a delegation */
3217 exception.retry = 1;
3218 continue;
3219 }
3220 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception))
3221 continue;
3222 res = ERR_PTR(nfs4_handle_exception(server,
3223 status, &exception));
3224 } while (exception.retry);
3225 return res;
3226}
3227
3228static int _nfs4_do_setattr(struct inode *inode,
3229 struct nfs_setattrargs *arg,
3230 struct nfs_setattrres *res,
David Brazdil0f672f62019-12-10 10:32:29 +00003231 const struct cred *cred,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003232 struct nfs_open_context *ctx)
3233{
3234 struct nfs_server *server = NFS_SERVER(inode);
3235 struct rpc_message msg = {
3236 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
3237 .rpc_argp = arg,
3238 .rpc_resp = res,
3239 .rpc_cred = cred,
3240 };
David Brazdil0f672f62019-12-10 10:32:29 +00003241 const struct cred *delegation_cred = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003242 unsigned long timestamp = jiffies;
3243 bool truncate;
3244 int status;
3245
3246 nfs_fattr_init(res->fattr);
3247
3248 /* Servers should only apply open mode checks for file size changes */
3249 truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false;
Olivier Deprez0e641232021-09-23 10:07:05 +02003250 if (!truncate) {
3251 nfs4_inode_make_writeable(inode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003252 goto zero_stateid;
Olivier Deprez0e641232021-09-23 10:07:05 +02003253 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003254
3255 if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) {
3256 /* Use that stateid */
David Brazdil0f672f62019-12-10 10:32:29 +00003257 } else if (ctx != NULL && ctx->state) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003258 struct nfs_lock_context *l_ctx;
3259 if (!nfs4_valid_open_stateid(ctx->state))
3260 return -EBADF;
3261 l_ctx = nfs_get_lock_context(ctx);
3262 if (IS_ERR(l_ctx))
3263 return PTR_ERR(l_ctx);
3264 status = nfs4_select_rw_stateid(ctx->state, FMODE_WRITE, l_ctx,
3265 &arg->stateid, &delegation_cred);
3266 nfs_put_lock_context(l_ctx);
3267 if (status == -EIO)
3268 return -EBADF;
3269 } else {
3270zero_stateid:
3271 nfs4_stateid_copy(&arg->stateid, &zero_stateid);
3272 }
3273 if (delegation_cred)
3274 msg.rpc_cred = delegation_cred;
3275
3276 status = nfs4_call_sync(server->client, server, &msg, &arg->seq_args, &res->seq_res, 1);
3277
David Brazdil0f672f62019-12-10 10:32:29 +00003278 put_cred(delegation_cred);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003279 if (status == 0 && ctx != NULL)
3280 renew_lease(server, timestamp);
3281 trace_nfs4_setattr(inode, &arg->stateid, status);
3282 return status;
3283}
3284
David Brazdil0f672f62019-12-10 10:32:29 +00003285static int nfs4_do_setattr(struct inode *inode, const struct cred *cred,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003286 struct nfs_fattr *fattr, struct iattr *sattr,
3287 struct nfs_open_context *ctx, struct nfs4_label *ilabel,
3288 struct nfs4_label *olabel)
3289{
3290 struct nfs_server *server = NFS_SERVER(inode);
3291 __u32 bitmask[NFS4_BITMASK_SZ];
3292 struct nfs4_state *state = ctx ? ctx->state : NULL;
3293 struct nfs_setattrargs arg = {
3294 .fh = NFS_FH(inode),
3295 .iap = sattr,
3296 .server = server,
3297 .bitmask = bitmask,
3298 .label = ilabel,
3299 };
3300 struct nfs_setattrres res = {
3301 .fattr = fattr,
3302 .label = olabel,
3303 .server = server,
3304 };
3305 struct nfs4_exception exception = {
3306 .state = state,
3307 .inode = inode,
3308 .stateid = &arg.stateid,
3309 };
3310 int err;
3311
3312 do {
3313 nfs4_bitmap_copy_adjust_setattr(bitmask,
3314 nfs4_bitmask(server, olabel),
3315 inode);
3316
3317 err = _nfs4_do_setattr(inode, &arg, &res, cred, ctx);
3318 switch (err) {
3319 case -NFS4ERR_OPENMODE:
3320 if (!(sattr->ia_valid & ATTR_SIZE)) {
3321 pr_warn_once("NFSv4: server %s is incorrectly "
3322 "applying open mode checks to "
3323 "a SETATTR that is not "
3324 "changing file size.\n",
3325 server->nfs_client->cl_hostname);
3326 }
3327 if (state && !(state->state & FMODE_WRITE)) {
3328 err = -EBADF;
3329 if (sattr->ia_valid & ATTR_OPEN)
3330 err = -EACCES;
3331 goto out;
3332 }
3333 }
3334 err = nfs4_handle_exception(server, err, &exception);
3335 } while (exception.retry);
3336out:
3337 return err;
3338}
3339
3340static bool
3341nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task)
3342{
3343 if (inode == NULL || !nfs_have_layout(inode))
3344 return false;
3345
3346 return pnfs_wait_on_layoutreturn(inode, task);
3347}
3348
David Brazdil0f672f62019-12-10 10:32:29 +00003349/*
3350 * Update the seqid of an open stateid
3351 */
3352static void nfs4_sync_open_stateid(nfs4_stateid *dst,
3353 struct nfs4_state *state)
3354{
3355 __be32 seqid_open;
3356 u32 dst_seqid;
3357 int seq;
3358
3359 for (;;) {
3360 if (!nfs4_valid_open_stateid(state))
3361 break;
3362 seq = read_seqbegin(&state->seqlock);
3363 if (!nfs4_state_match_open_stateid_other(state, dst)) {
3364 nfs4_stateid_copy(dst, &state->open_stateid);
3365 if (read_seqretry(&state->seqlock, seq))
3366 continue;
3367 break;
3368 }
3369 seqid_open = state->open_stateid.seqid;
3370 if (read_seqretry(&state->seqlock, seq))
3371 continue;
3372
3373 dst_seqid = be32_to_cpu(dst->seqid);
3374 if ((s32)(dst_seqid - be32_to_cpu(seqid_open)) < 0)
3375 dst->seqid = seqid_open;
3376 break;
3377 }
3378}
3379
3380/*
3381 * Update the seqid of an open stateid after receiving
3382 * NFS4ERR_OLD_STATEID
3383 */
3384static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst,
3385 struct nfs4_state *state)
3386{
3387 __be32 seqid_open;
3388 u32 dst_seqid;
3389 bool ret;
Olivier Deprez0e641232021-09-23 10:07:05 +02003390 int seq, status = -EAGAIN;
3391 DEFINE_WAIT(wait);
David Brazdil0f672f62019-12-10 10:32:29 +00003392
3393 for (;;) {
3394 ret = false;
3395 if (!nfs4_valid_open_stateid(state))
3396 break;
3397 seq = read_seqbegin(&state->seqlock);
3398 if (!nfs4_state_match_open_stateid_other(state, dst)) {
3399 if (read_seqretry(&state->seqlock, seq))
3400 continue;
3401 break;
3402 }
Olivier Deprez0e641232021-09-23 10:07:05 +02003403
3404 write_seqlock(&state->seqlock);
David Brazdil0f672f62019-12-10 10:32:29 +00003405 seqid_open = state->open_stateid.seqid;
David Brazdil0f672f62019-12-10 10:32:29 +00003406
3407 dst_seqid = be32_to_cpu(dst->seqid);
Olivier Deprez0e641232021-09-23 10:07:05 +02003408
3409 /* Did another OPEN bump the state's seqid? try again: */
3410 if ((s32)(be32_to_cpu(seqid_open) - dst_seqid) > 0) {
David Brazdil0f672f62019-12-10 10:32:29 +00003411 dst->seqid = seqid_open;
Olivier Deprez0e641232021-09-23 10:07:05 +02003412 write_sequnlock(&state->seqlock);
3413 ret = true;
3414 break;
3415 }
3416
3417 /* server says we're behind but we haven't seen the update yet */
3418 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
3419 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE);
3420 write_sequnlock(&state->seqlock);
3421 trace_nfs4_close_stateid_update_wait(state->inode, dst, 0);
3422
3423 if (fatal_signal_pending(current))
3424 status = -EINTR;
3425 else
3426 if (schedule_timeout(5*HZ) != 0)
3427 status = 0;
3428
3429 finish_wait(&state->waitq, &wait);
3430
3431 if (!status)
3432 continue;
3433 if (status == -EINTR)
3434 break;
3435
3436 /* we slept the whole 5 seconds, we must have lost a seqid */
3437 dst->seqid = cpu_to_be32(dst_seqid + 1);
David Brazdil0f672f62019-12-10 10:32:29 +00003438 ret = true;
3439 break;
3440 }
3441
3442 return ret;
3443}
3444
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003445struct nfs4_closedata {
3446 struct inode *inode;
3447 struct nfs4_state *state;
3448 struct nfs_closeargs arg;
3449 struct nfs_closeres res;
3450 struct {
3451 struct nfs4_layoutreturn_args arg;
3452 struct nfs4_layoutreturn_res res;
3453 struct nfs4_xdr_opaque_data ld_private;
3454 u32 roc_barrier;
3455 bool roc;
3456 } lr;
3457 struct nfs_fattr fattr;
3458 unsigned long timestamp;
3459};
3460
3461static void nfs4_free_closedata(void *data)
3462{
3463 struct nfs4_closedata *calldata = data;
3464 struct nfs4_state_owner *sp = calldata->state->owner;
3465 struct super_block *sb = calldata->state->inode->i_sb;
3466
3467 if (calldata->lr.roc)
3468 pnfs_roc_release(&calldata->lr.arg, &calldata->lr.res,
3469 calldata->res.lr_ret);
3470 nfs4_put_open_state(calldata->state);
3471 nfs_free_seqid(calldata->arg.seqid);
3472 nfs4_put_state_owner(sp);
3473 nfs_sb_deactive(sb);
3474 kfree(calldata);
3475}
3476
3477static void nfs4_close_done(struct rpc_task *task, void *data)
3478{
3479 struct nfs4_closedata *calldata = data;
3480 struct nfs4_state *state = calldata->state;
3481 struct nfs_server *server = NFS_SERVER(calldata->inode);
3482 nfs4_stateid *res_stateid = NULL;
3483 struct nfs4_exception exception = {
3484 .state = state,
3485 .inode = calldata->inode,
3486 .stateid = &calldata->arg.stateid,
3487 };
3488
3489 dprintk("%s: begin!\n", __func__);
3490 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
3491 return;
3492 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
3493
3494 /* Handle Layoutreturn errors */
Olivier Deprez0e641232021-09-23 10:07:05 +02003495 if (pnfs_roc_done(task, &calldata->arg.lr_args, &calldata->res.lr_res,
3496 &calldata->res.lr_ret) == -EAGAIN)
David Brazdil0f672f62019-12-10 10:32:29 +00003497 goto out_restart;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003498
3499 /* hmm. we are done with the inode, and in the process of freeing
3500 * the state_owner. we keep this around to process errors
3501 */
3502 switch (task->tk_status) {
3503 case 0:
3504 res_stateid = &calldata->res.stateid;
3505 renew_lease(server, calldata->timestamp);
3506 break;
3507 case -NFS4ERR_ACCESS:
3508 if (calldata->arg.bitmask != NULL) {
3509 calldata->arg.bitmask = NULL;
3510 calldata->res.fattr = NULL;
3511 goto out_restart;
3512
3513 }
3514 break;
3515 case -NFS4ERR_OLD_STATEID:
3516 /* Did we race with OPEN? */
David Brazdil0f672f62019-12-10 10:32:29 +00003517 if (nfs4_refresh_open_old_stateid(&calldata->arg.stateid,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003518 state))
3519 goto out_restart;
3520 goto out_release;
3521 case -NFS4ERR_ADMIN_REVOKED:
3522 case -NFS4ERR_STALE_STATEID:
3523 case -NFS4ERR_EXPIRED:
3524 nfs4_free_revoked_stateid(server,
3525 &calldata->arg.stateid,
3526 task->tk_msg.rpc_cred);
3527 /* Fallthrough */
3528 case -NFS4ERR_BAD_STATEID:
David Brazdil0f672f62019-12-10 10:32:29 +00003529 if (calldata->arg.fmode == 0)
3530 break;
3531 /* Fallthrough */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003532 default:
3533 task->tk_status = nfs4_async_handle_exception(task,
3534 server, task->tk_status, &exception);
3535 if (exception.retry)
3536 goto out_restart;
3537 }
3538 nfs_clear_open_stateid(state, &calldata->arg.stateid,
3539 res_stateid, calldata->arg.fmode);
3540out_release:
3541 task->tk_status = 0;
3542 nfs_release_seqid(calldata->arg.seqid);
3543 nfs_refresh_inode(calldata->inode, &calldata->fattr);
3544 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
3545 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003546out_restart:
3547 task->tk_status = 0;
3548 rpc_restart_call_prepare(task);
3549 goto out_release;
3550}
3551
3552static void nfs4_close_prepare(struct rpc_task *task, void *data)
3553{
3554 struct nfs4_closedata *calldata = data;
3555 struct nfs4_state *state = calldata->state;
3556 struct inode *inode = calldata->inode;
3557 struct pnfs_layout_hdr *lo;
3558 bool is_rdonly, is_wronly, is_rdwr;
3559 int call_close = 0;
3560
3561 dprintk("%s: begin!\n", __func__);
3562 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
3563 goto out_wait;
3564
3565 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
3566 spin_lock(&state->owner->so_lock);
3567 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
3568 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
3569 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
3570 /* Calculate the change in open mode */
3571 calldata->arg.fmode = 0;
3572 if (state->n_rdwr == 0) {
3573 if (state->n_rdonly == 0)
3574 call_close |= is_rdonly;
3575 else if (is_rdonly)
3576 calldata->arg.fmode |= FMODE_READ;
3577 if (state->n_wronly == 0)
3578 call_close |= is_wronly;
3579 else if (is_wronly)
3580 calldata->arg.fmode |= FMODE_WRITE;
3581 if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE))
3582 call_close |= is_rdwr;
3583 } else if (is_rdwr)
3584 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
3585
David Brazdil0f672f62019-12-10 10:32:29 +00003586 nfs4_sync_open_stateid(&calldata->arg.stateid, state);
3587 if (!nfs4_valid_open_stateid(state))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003588 call_close = 0;
3589 spin_unlock(&state->owner->so_lock);
3590
3591 if (!call_close) {
3592 /* Note: exit _without_ calling nfs4_close_done */
3593 goto out_no_action;
3594 }
3595
3596 if (!calldata->lr.roc && nfs4_wait_on_layoutreturn(inode, task)) {
3597 nfs_release_seqid(calldata->arg.seqid);
3598 goto out_wait;
3599 }
3600
3601 lo = calldata->arg.lr_args ? calldata->arg.lr_args->layout : NULL;
3602 if (lo && !pnfs_layout_is_valid(lo)) {
3603 calldata->arg.lr_args = NULL;
3604 calldata->res.lr_res = NULL;
3605 }
3606
3607 if (calldata->arg.fmode == 0)
3608 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
3609
3610 if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) {
3611 /* Close-to-open cache consistency revalidation */
3612 if (!nfs4_have_delegation(inode, FMODE_READ))
3613 calldata->arg.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
3614 else
3615 calldata->arg.bitmask = NULL;
3616 }
3617
3618 calldata->arg.share_access =
3619 nfs4_map_atomic_open_share(NFS_SERVER(inode),
3620 calldata->arg.fmode, 0);
3621
3622 if (calldata->res.fattr == NULL)
3623 calldata->arg.bitmask = NULL;
3624 else if (calldata->arg.bitmask == NULL)
3625 calldata->res.fattr = NULL;
3626 calldata->timestamp = jiffies;
3627 if (nfs4_setup_sequence(NFS_SERVER(inode)->nfs_client,
3628 &calldata->arg.seq_args,
3629 &calldata->res.seq_res,
3630 task) != 0)
3631 nfs_release_seqid(calldata->arg.seqid);
3632 dprintk("%s: done!\n", __func__);
3633 return;
3634out_no_action:
3635 task->tk_action = NULL;
3636out_wait:
3637 nfs4_sequence_done(task, &calldata->res.seq_res);
3638}
3639
3640static const struct rpc_call_ops nfs4_close_ops = {
3641 .rpc_call_prepare = nfs4_close_prepare,
3642 .rpc_call_done = nfs4_close_done,
3643 .rpc_release = nfs4_free_closedata,
3644};
3645
3646/*
3647 * It is possible for data to be read/written from a mem-mapped file
3648 * after the sys_close call (which hits the vfs layer as a flush).
3649 * This means that we can't safely call nfsv4 close on a file until
3650 * the inode is cleared. This in turn means that we are not good
3651 * NFSv4 citizens - we do not indicate to the server to update the file's
3652 * share state even when we are done with one of the three share
3653 * stateid's in the inode.
3654 *
3655 * NOTE: Caller must be holding the sp->so_owner semaphore!
3656 */
3657int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
3658{
3659 struct nfs_server *server = NFS_SERVER(state->inode);
3660 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
3661 struct nfs4_closedata *calldata;
3662 struct nfs4_state_owner *sp = state->owner;
3663 struct rpc_task *task;
3664 struct rpc_message msg = {
3665 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
3666 .rpc_cred = state->owner->so_cred,
3667 };
3668 struct rpc_task_setup task_setup_data = {
3669 .rpc_client = server->client,
3670 .rpc_message = &msg,
3671 .callback_ops = &nfs4_close_ops,
3672 .workqueue = nfsiod_workqueue,
3673 .flags = RPC_TASK_ASYNC,
3674 };
3675 int status = -ENOMEM;
3676
3677 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP,
3678 &task_setup_data.rpc_client, &msg);
3679
3680 calldata = kzalloc(sizeof(*calldata), gfp_mask);
3681 if (calldata == NULL)
3682 goto out;
3683 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1, 0);
3684 calldata->inode = state->inode;
3685 calldata->state = state;
3686 calldata->arg.fh = NFS_FH(state->inode);
3687 if (!nfs4_copy_open_stateid(&calldata->arg.stateid, state))
3688 goto out_free_calldata;
3689 /* Serialization for the sequence id */
3690 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
3691 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask);
3692 if (IS_ERR(calldata->arg.seqid))
3693 goto out_free_calldata;
3694 nfs_fattr_init(&calldata->fattr);
3695 calldata->arg.fmode = 0;
3696 calldata->lr.arg.ld_private = &calldata->lr.ld_private;
3697 calldata->res.fattr = &calldata->fattr;
3698 calldata->res.seqid = calldata->arg.seqid;
3699 calldata->res.server = server;
3700 calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
3701 calldata->lr.roc = pnfs_roc(state->inode,
3702 &calldata->lr.arg, &calldata->lr.res, msg.rpc_cred);
3703 if (calldata->lr.roc) {
3704 calldata->arg.lr_args = &calldata->lr.arg;
3705 calldata->res.lr_res = &calldata->lr.res;
3706 }
3707 nfs_sb_active(calldata->inode->i_sb);
3708
3709 msg.rpc_argp = &calldata->arg;
3710 msg.rpc_resp = &calldata->res;
3711 task_setup_data.callback_data = calldata;
3712 task = rpc_run_task(&task_setup_data);
3713 if (IS_ERR(task))
3714 return PTR_ERR(task);
3715 status = 0;
3716 if (wait)
3717 status = rpc_wait_for_completion_task(task);
3718 rpc_put_task(task);
3719 return status;
3720out_free_calldata:
3721 kfree(calldata);
3722out:
3723 nfs4_put_open_state(state);
3724 nfs4_put_state_owner(sp);
3725 return status;
3726}
3727
3728static struct inode *
3729nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
3730 int open_flags, struct iattr *attr, int *opened)
3731{
3732 struct nfs4_state *state;
3733 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL;
3734
3735 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
3736
3737 /* Protect against concurrent sillydeletes */
3738 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened);
3739
3740 nfs4_label_release_security(label);
3741
3742 if (IS_ERR(state))
3743 return ERR_CAST(state);
3744 return state->inode;
3745}
3746
3747static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
3748{
3749 if (ctx->state == NULL)
3750 return;
3751 if (is_sync)
David Brazdil0f672f62019-12-10 10:32:29 +00003752 nfs4_close_sync(ctx->state, _nfs4_ctx_to_openmode(ctx));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003753 else
David Brazdil0f672f62019-12-10 10:32:29 +00003754 nfs4_close_state(ctx->state, _nfs4_ctx_to_openmode(ctx));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003755}
3756
3757#define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL)
3758#define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL)
3759#define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_MODE_UMASK - 1UL)
3760
3761static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3762{
3763 u32 bitmask[3] = {}, minorversion = server->nfs_client->cl_minorversion;
3764 struct nfs4_server_caps_arg args = {
3765 .fhandle = fhandle,
3766 .bitmask = bitmask,
3767 };
3768 struct nfs4_server_caps_res res = {};
3769 struct rpc_message msg = {
3770 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
3771 .rpc_argp = &args,
3772 .rpc_resp = &res,
3773 };
3774 int status;
3775 int i;
3776
3777 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS |
3778 FATTR4_WORD0_FH_EXPIRE_TYPE |
3779 FATTR4_WORD0_LINK_SUPPORT |
3780 FATTR4_WORD0_SYMLINK_SUPPORT |
3781 FATTR4_WORD0_ACLSUPPORT;
3782 if (minorversion)
3783 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT;
3784
3785 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3786 if (status == 0) {
3787 /* Sanity check the server answers */
3788 switch (minorversion) {
3789 case 0:
3790 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK;
3791 res.attr_bitmask[2] = 0;
3792 break;
3793 case 1:
3794 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK;
3795 break;
3796 case 2:
3797 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK;
3798 }
3799 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
3800 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
3801 NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
3802 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
3803 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
3804 NFS_CAP_CTIME|NFS_CAP_MTIME|
3805 NFS_CAP_SECURITY_LABEL);
3806 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
3807 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
3808 server->caps |= NFS_CAP_ACLS;
3809 if (res.has_links != 0)
3810 server->caps |= NFS_CAP_HARDLINKS;
3811 if (res.has_symlinks != 0)
3812 server->caps |= NFS_CAP_SYMLINKS;
3813 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
3814 server->caps |= NFS_CAP_FILEID;
3815 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
3816 server->caps |= NFS_CAP_MODE;
3817 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
3818 server->caps |= NFS_CAP_NLINK;
3819 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
3820 server->caps |= NFS_CAP_OWNER;
3821 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
3822 server->caps |= NFS_CAP_OWNER_GROUP;
3823 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
3824 server->caps |= NFS_CAP_ATIME;
3825 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
3826 server->caps |= NFS_CAP_CTIME;
3827 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
3828 server->caps |= NFS_CAP_MTIME;
3829#ifdef CONFIG_NFS_V4_SECURITY_LABEL
3830 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
3831 server->caps |= NFS_CAP_SECURITY_LABEL;
3832#endif
3833 memcpy(server->attr_bitmask_nl, res.attr_bitmask,
3834 sizeof(server->attr_bitmask));
3835 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
3836
3837 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
3838 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
3839 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
3840 server->cache_consistency_bitmask[2] = 0;
3841
3842 /* Avoid a regression due to buggy server */
3843 for (i = 0; i < ARRAY_SIZE(res.exclcreat_bitmask); i++)
3844 res.exclcreat_bitmask[i] &= res.attr_bitmask[i];
3845 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask,
3846 sizeof(server->exclcreat_bitmask));
3847
3848 server->acl_bitmask = res.acl_bitmask;
3849 server->fh_expire_type = res.fh_expire_type;
3850 }
3851
3852 return status;
3853}
3854
3855int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3856{
David Brazdil0f672f62019-12-10 10:32:29 +00003857 struct nfs4_exception exception = {
3858 .interruptible = true,
3859 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003860 int err;
3861 do {
3862 err = nfs4_handle_exception(server,
3863 _nfs4_server_capabilities(server, fhandle),
3864 &exception);
3865 } while (exception.retry);
3866 return err;
3867}
3868
3869static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
3870 struct nfs_fsinfo *info)
3871{
3872 u32 bitmask[3];
3873 struct nfs4_lookup_root_arg args = {
3874 .bitmask = bitmask,
3875 };
3876 struct nfs4_lookup_res res = {
3877 .server = server,
3878 .fattr = info->fattr,
3879 .fh = fhandle,
3880 };
3881 struct rpc_message msg = {
3882 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
3883 .rpc_argp = &args,
3884 .rpc_resp = &res,
3885 };
3886
3887 bitmask[0] = nfs4_fattr_bitmap[0];
3888 bitmask[1] = nfs4_fattr_bitmap[1];
3889 /*
3890 * Process the label in the upcoming getfattr
3891 */
3892 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL;
3893
3894 nfs_fattr_init(info->fattr);
3895 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3896}
3897
3898static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
3899 struct nfs_fsinfo *info)
3900{
David Brazdil0f672f62019-12-10 10:32:29 +00003901 struct nfs4_exception exception = {
3902 .interruptible = true,
3903 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003904 int err;
3905 do {
3906 err = _nfs4_lookup_root(server, fhandle, info);
3907 trace_nfs4_lookup_root(server, fhandle, info->fattr, err);
3908 switch (err) {
3909 case 0:
3910 case -NFS4ERR_WRONGSEC:
3911 goto out;
3912 default:
3913 err = nfs4_handle_exception(server, err, &exception);
3914 }
3915 } while (exception.retry);
3916out:
3917 return err;
3918}
3919
3920static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3921 struct nfs_fsinfo *info, rpc_authflavor_t flavor)
3922{
3923 struct rpc_auth_create_args auth_args = {
3924 .pseudoflavor = flavor,
3925 };
3926 struct rpc_auth *auth;
3927
3928 auth = rpcauth_create(&auth_args, server->client);
3929 if (IS_ERR(auth))
3930 return -EACCES;
3931 return nfs4_lookup_root(server, fhandle, info);
3932}
3933
3934/*
3935 * Retry pseudoroot lookup with various security flavors. We do this when:
3936 *
3937 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC
3938 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation
3939 *
3940 * Returns zero on success, or a negative NFS4ERR value, or a
3941 * negative errno value.
3942 */
3943static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3944 struct nfs_fsinfo *info)
3945{
3946 /* Per 3530bis 15.33.5 */
3947 static const rpc_authflavor_t flav_array[] = {
3948 RPC_AUTH_GSS_KRB5P,
3949 RPC_AUTH_GSS_KRB5I,
3950 RPC_AUTH_GSS_KRB5,
3951 RPC_AUTH_UNIX, /* courtesy */
3952 RPC_AUTH_NULL,
3953 };
3954 int status = -EPERM;
3955 size_t i;
3956
3957 if (server->auth_info.flavor_len > 0) {
3958 /* try each flavor specified by user */
3959 for (i = 0; i < server->auth_info.flavor_len; i++) {
3960 status = nfs4_lookup_root_sec(server, fhandle, info,
3961 server->auth_info.flavors[i]);
3962 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
3963 continue;
3964 break;
3965 }
3966 } else {
3967 /* no flavors specified by user, try default list */
3968 for (i = 0; i < ARRAY_SIZE(flav_array); i++) {
3969 status = nfs4_lookup_root_sec(server, fhandle, info,
3970 flav_array[i]);
3971 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
3972 continue;
3973 break;
3974 }
3975 }
3976
3977 /*
David Brazdil0f672f62019-12-10 10:32:29 +00003978 * -EACCES could mean that the user doesn't have correct permissions
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003979 * to access the mount. It could also mean that we tried to mount
3980 * with a gss auth flavor, but rpc.gssd isn't running. Either way,
3981 * existing mount programs don't handle -EACCES very well so it should
3982 * be mapped to -EPERM instead.
3983 */
3984 if (status == -EACCES)
3985 status = -EPERM;
3986 return status;
3987}
3988
3989/**
3990 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot
3991 * @server: initialized nfs_server handle
3992 * @fhandle: we fill in the pseudo-fs root file handle
3993 * @info: we fill in an FSINFO struct
3994 * @auth_probe: probe the auth flavours
3995 *
3996 * Returns zero on success, or a negative errno.
3997 */
3998int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
3999 struct nfs_fsinfo *info,
4000 bool auth_probe)
4001{
4002 int status = 0;
4003
4004 if (!auth_probe)
4005 status = nfs4_lookup_root(server, fhandle, info);
4006
4007 if (auth_probe || status == NFS4ERR_WRONGSEC)
4008 status = server->nfs_client->cl_mvops->find_root_sec(server,
4009 fhandle, info);
4010
4011 if (status == 0)
4012 status = nfs4_server_capabilities(server, fhandle);
4013 if (status == 0)
4014 status = nfs4_do_fsinfo(server, fhandle, info);
4015
4016 return nfs4_map_errors(status);
4017}
4018
4019static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
4020 struct nfs_fsinfo *info)
4021{
4022 int error;
4023 struct nfs_fattr *fattr = info->fattr;
4024 struct nfs4_label *label = NULL;
4025
4026 error = nfs4_server_capabilities(server, mntfh);
4027 if (error < 0) {
4028 dprintk("nfs4_get_root: getcaps error = %d\n", -error);
4029 return error;
4030 }
4031
4032 label = nfs4_label_alloc(server, GFP_KERNEL);
4033 if (IS_ERR(label))
4034 return PTR_ERR(label);
4035
4036 error = nfs4_proc_getattr(server, mntfh, fattr, label, NULL);
4037 if (error < 0) {
4038 dprintk("nfs4_get_root: getattr error = %d\n", -error);
4039 goto err_free_label;
4040 }
4041
4042 if (fattr->valid & NFS_ATTR_FATTR_FSID &&
4043 !nfs_fsid_equal(&server->fsid, &fattr->fsid))
4044 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
4045
4046err_free_label:
4047 nfs4_label_free(label);
4048
4049 return error;
4050}
4051
4052/*
4053 * Get locations and (maybe) other attributes of a referral.
4054 * Note that we'll actually follow the referral later when
4055 * we detect fsid mismatch in inode revalidation
4056 */
4057static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
4058 const struct qstr *name, struct nfs_fattr *fattr,
4059 struct nfs_fh *fhandle)
4060{
4061 int status = -ENOMEM;
4062 struct page *page = NULL;
4063 struct nfs4_fs_locations *locations = NULL;
4064
4065 page = alloc_page(GFP_KERNEL);
4066 if (page == NULL)
4067 goto out;
4068 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
4069 if (locations == NULL)
4070 goto out;
4071
4072 status = nfs4_proc_fs_locations(client, dir, name, locations, page);
4073 if (status != 0)
4074 goto out;
4075
4076 /*
4077 * If the fsid didn't change, this is a migration event, not a
4078 * referral. Cause us to drop into the exception handler, which
4079 * will kick off migration recovery.
4080 */
4081 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
4082 dprintk("%s: server did not return a different fsid for"
4083 " a referral at %s\n", __func__, name->name);
4084 status = -NFS4ERR_MOVED;
4085 goto out;
4086 }
4087 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
4088 nfs_fixup_referral_attributes(&locations->fattr);
4089
4090 /* replace the lookup nfs_fattr with the locations nfs_fattr */
4091 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
4092 memset(fhandle, 0, sizeof(struct nfs_fh));
4093out:
4094 if (page)
4095 __free_page(page);
4096 kfree(locations);
4097 return status;
4098}
4099
4100static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
4101 struct nfs_fattr *fattr, struct nfs4_label *label,
4102 struct inode *inode)
4103{
4104 __u32 bitmask[NFS4_BITMASK_SZ];
4105 struct nfs4_getattr_arg args = {
4106 .fh = fhandle,
4107 .bitmask = bitmask,
4108 };
4109 struct nfs4_getattr_res res = {
4110 .fattr = fattr,
4111 .label = label,
4112 .server = server,
4113 };
4114 struct rpc_message msg = {
4115 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
4116 .rpc_argp = &args,
4117 .rpc_resp = &res,
4118 };
4119
4120 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, label), inode);
4121
4122 nfs_fattr_init(fattr);
4123 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4124}
4125
4126static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
4127 struct nfs_fattr *fattr, struct nfs4_label *label,
4128 struct inode *inode)
4129{
David Brazdil0f672f62019-12-10 10:32:29 +00004130 struct nfs4_exception exception = {
4131 .interruptible = true,
4132 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004133 int err;
4134 do {
4135 err = _nfs4_proc_getattr(server, fhandle, fattr, label, inode);
4136 trace_nfs4_getattr(server, fhandle, fattr, err);
4137 err = nfs4_handle_exception(server, err,
4138 &exception);
4139 } while (exception.retry);
4140 return err;
4141}
4142
4143/*
4144 * The file is not closed if it is opened due to the a request to change
4145 * the size of the file. The open call will not be needed once the
4146 * VFS layer lookup-intents are implemented.
4147 *
4148 * Close is called when the inode is destroyed.
4149 * If we haven't opened the file for O_WRONLY, we
4150 * need to in the size_change case to obtain a stateid.
4151 *
4152 * Got race?
4153 * Because OPEN is always done by name in nfsv4, it is
4154 * possible that we opened a different file by the same
4155 * name. We can recognize this race condition, but we
4156 * can't do anything about it besides returning an error.
4157 *
4158 * This will be fixed with VFS changes (lookup-intent).
4159 */
4160static int
4161nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
4162 struct iattr *sattr)
4163{
4164 struct inode *inode = d_inode(dentry);
David Brazdil0f672f62019-12-10 10:32:29 +00004165 const struct cred *cred = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004166 struct nfs_open_context *ctx = NULL;
4167 struct nfs4_label *label = NULL;
4168 int status;
4169
4170 if (pnfs_ld_layoutret_on_setattr(inode) &&
4171 sattr->ia_valid & ATTR_SIZE &&
4172 sattr->ia_size < i_size_read(inode))
4173 pnfs_commit_and_return_layout(inode);
4174
4175 nfs_fattr_init(fattr);
4176
4177 /* Deal with open(O_TRUNC) */
4178 if (sattr->ia_valid & ATTR_OPEN)
4179 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME);
4180
4181 /* Optimization: if the end result is no change, don't RPC */
4182 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0)
4183 return 0;
4184
4185 /* Search for an existing open(O_WRITE) file */
4186 if (sattr->ia_valid & ATTR_FILE) {
4187
4188 ctx = nfs_file_open_context(sattr->ia_file);
4189 if (ctx)
4190 cred = ctx->cred;
4191 }
4192
4193 label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
4194 if (IS_ERR(label))
4195 return PTR_ERR(label);
4196
4197 /* Return any delegations if we're going to change ACLs */
4198 if ((sattr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0)
4199 nfs4_inode_make_writeable(inode);
4200
4201 status = nfs4_do_setattr(inode, cred, fattr, sattr, ctx, NULL, label);
4202 if (status == 0) {
4203 nfs_setattr_update_inode(inode, sattr, fattr);
4204 nfs_setsecurity(inode, fattr, label);
4205 }
4206 nfs4_label_free(label);
4207 return status;
4208}
4209
4210static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
4211 const struct qstr *name, struct nfs_fh *fhandle,
4212 struct nfs_fattr *fattr, struct nfs4_label *label)
4213{
4214 struct nfs_server *server = NFS_SERVER(dir);
4215 int status;
4216 struct nfs4_lookup_arg args = {
4217 .bitmask = server->attr_bitmask,
4218 .dir_fh = NFS_FH(dir),
4219 .name = name,
4220 };
4221 struct nfs4_lookup_res res = {
4222 .server = server,
4223 .fattr = fattr,
4224 .label = label,
4225 .fh = fhandle,
4226 };
4227 struct rpc_message msg = {
4228 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
4229 .rpc_argp = &args,
4230 .rpc_resp = &res,
4231 };
4232
4233 args.bitmask = nfs4_bitmask(server, label);
4234
4235 nfs_fattr_init(fattr);
4236
4237 dprintk("NFS call lookup %s\n", name->name);
4238 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
4239 dprintk("NFS reply lookup: %d\n", status);
4240 return status;
4241}
4242
4243static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
4244{
4245 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
4246 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
4247 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
4248 fattr->nlink = 2;
4249}
4250
4251static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
4252 const struct qstr *name, struct nfs_fh *fhandle,
4253 struct nfs_fattr *fattr, struct nfs4_label *label)
4254{
David Brazdil0f672f62019-12-10 10:32:29 +00004255 struct nfs4_exception exception = {
4256 .interruptible = true,
4257 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004258 struct rpc_clnt *client = *clnt;
4259 int err;
4260 do {
4261 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr, label);
4262 trace_nfs4_lookup(dir, name, err);
4263 switch (err) {
4264 case -NFS4ERR_BADNAME:
4265 err = -ENOENT;
4266 goto out;
4267 case -NFS4ERR_MOVED:
4268 err = nfs4_get_referral(client, dir, name, fattr, fhandle);
4269 if (err == -NFS4ERR_MOVED)
4270 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
4271 goto out;
4272 case -NFS4ERR_WRONGSEC:
4273 err = -EPERM;
4274 if (client != *clnt)
4275 goto out;
4276 client = nfs4_negotiate_security(client, dir, name);
4277 if (IS_ERR(client))
4278 return PTR_ERR(client);
4279
4280 exception.retry = 1;
4281 break;
4282 default:
4283 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
4284 }
4285 } while (exception.retry);
4286
4287out:
4288 if (err == 0)
4289 *clnt = client;
4290 else if (client != *clnt)
4291 rpc_shutdown_client(client);
4292
4293 return err;
4294}
4295
4296static int nfs4_proc_lookup(struct inode *dir, const struct qstr *name,
4297 struct nfs_fh *fhandle, struct nfs_fattr *fattr,
4298 struct nfs4_label *label)
4299{
4300 int status;
4301 struct rpc_clnt *client = NFS_CLIENT(dir);
4302
4303 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, label);
4304 if (client != NFS_CLIENT(dir)) {
4305 rpc_shutdown_client(client);
4306 nfs_fixup_secinfo_attributes(fattr);
4307 }
4308 return status;
4309}
4310
4311struct rpc_clnt *
4312nfs4_proc_lookup_mountpoint(struct inode *dir, const struct qstr *name,
4313 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
4314{
4315 struct rpc_clnt *client = NFS_CLIENT(dir);
4316 int status;
4317
4318 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL);
4319 if (status < 0)
4320 return ERR_PTR(status);
4321 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
4322}
4323
4324static int _nfs4_proc_lookupp(struct inode *inode,
4325 struct nfs_fh *fhandle, struct nfs_fattr *fattr,
4326 struct nfs4_label *label)
4327{
4328 struct rpc_clnt *clnt = NFS_CLIENT(inode);
4329 struct nfs_server *server = NFS_SERVER(inode);
4330 int status;
4331 struct nfs4_lookupp_arg args = {
4332 .bitmask = server->attr_bitmask,
4333 .fh = NFS_FH(inode),
4334 };
4335 struct nfs4_lookupp_res res = {
4336 .server = server,
4337 .fattr = fattr,
4338 .label = label,
4339 .fh = fhandle,
4340 };
4341 struct rpc_message msg = {
4342 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUPP],
4343 .rpc_argp = &args,
4344 .rpc_resp = &res,
4345 };
4346
4347 args.bitmask = nfs4_bitmask(server, label);
4348
4349 nfs_fattr_init(fattr);
4350
4351 dprintk("NFS call lookupp ino=0x%lx\n", inode->i_ino);
4352 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
4353 &res.seq_res, 0);
4354 dprintk("NFS reply lookupp: %d\n", status);
4355 return status;
4356}
4357
4358static int nfs4_proc_lookupp(struct inode *inode, struct nfs_fh *fhandle,
4359 struct nfs_fattr *fattr, struct nfs4_label *label)
4360{
David Brazdil0f672f62019-12-10 10:32:29 +00004361 struct nfs4_exception exception = {
4362 .interruptible = true,
4363 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004364 int err;
4365 do {
4366 err = _nfs4_proc_lookupp(inode, fhandle, fattr, label);
4367 trace_nfs4_lookupp(inode, err);
4368 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4369 &exception);
4370 } while (exception.retry);
4371 return err;
4372}
4373
4374static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
4375{
4376 struct nfs_server *server = NFS_SERVER(inode);
4377 struct nfs4_accessargs args = {
4378 .fh = NFS_FH(inode),
4379 .access = entry->mask,
4380 };
4381 struct nfs4_accessres res = {
4382 .server = server,
4383 };
4384 struct rpc_message msg = {
4385 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
4386 .rpc_argp = &args,
4387 .rpc_resp = &res,
4388 .rpc_cred = entry->cred,
4389 };
4390 int status = 0;
4391
4392 if (!nfs4_have_delegation(inode, FMODE_READ)) {
4393 res.fattr = nfs_alloc_fattr();
4394 if (res.fattr == NULL)
4395 return -ENOMEM;
4396 args.bitmask = server->cache_consistency_bitmask;
4397 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004398 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4399 if (!status) {
4400 nfs_access_set_mask(entry, res.access);
4401 if (res.fattr)
4402 nfs_refresh_inode(inode, res.fattr);
4403 }
4404 nfs_free_fattr(res.fattr);
4405 return status;
4406}
4407
4408static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
4409{
David Brazdil0f672f62019-12-10 10:32:29 +00004410 struct nfs4_exception exception = {
4411 .interruptible = true,
4412 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004413 int err;
4414 do {
4415 err = _nfs4_proc_access(inode, entry);
4416 trace_nfs4_access(inode, err);
4417 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4418 &exception);
4419 } while (exception.retry);
4420 return err;
4421}
4422
4423/*
4424 * TODO: For the time being, we don't try to get any attributes
4425 * along with any of the zero-copy operations READ, READDIR,
4426 * READLINK, WRITE.
4427 *
4428 * In the case of the first three, we want to put the GETATTR
4429 * after the read-type operation -- this is because it is hard
4430 * to predict the length of a GETATTR response in v4, and thus
4431 * align the READ data correctly. This means that the GETATTR
4432 * may end up partially falling into the page cache, and we should
4433 * shift it into the 'tail' of the xdr_buf before processing.
4434 * To do this efficiently, we need to know the total length
4435 * of data received, which doesn't seem to be available outside
4436 * of the RPC layer.
4437 *
4438 * In the case of WRITE, we also want to put the GETATTR after
4439 * the operation -- in this case because we want to make sure
4440 * we get the post-operation mtime and size.
4441 *
4442 * Both of these changes to the XDR layer would in fact be quite
4443 * minor, but I decided to leave them for a subsequent patch.
4444 */
4445static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
4446 unsigned int pgbase, unsigned int pglen)
4447{
4448 struct nfs4_readlink args = {
4449 .fh = NFS_FH(inode),
4450 .pgbase = pgbase,
4451 .pglen = pglen,
4452 .pages = &page,
4453 };
4454 struct nfs4_readlink_res res;
4455 struct rpc_message msg = {
4456 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
4457 .rpc_argp = &args,
4458 .rpc_resp = &res,
4459 };
4460
4461 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
4462}
4463
4464static int nfs4_proc_readlink(struct inode *inode, struct page *page,
4465 unsigned int pgbase, unsigned int pglen)
4466{
David Brazdil0f672f62019-12-10 10:32:29 +00004467 struct nfs4_exception exception = {
4468 .interruptible = true,
4469 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004470 int err;
4471 do {
4472 err = _nfs4_proc_readlink(inode, page, pgbase, pglen);
4473 trace_nfs4_readlink(inode, err);
4474 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4475 &exception);
4476 } while (exception.retry);
4477 return err;
4478}
4479
4480/*
4481 * This is just for mknod. open(O_CREAT) will always do ->open_context().
4482 */
4483static int
4484nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
4485 int flags)
4486{
4487 struct nfs_server *server = NFS_SERVER(dir);
4488 struct nfs4_label l, *ilabel = NULL;
4489 struct nfs_open_context *ctx;
4490 struct nfs4_state *state;
4491 int status = 0;
4492
4493 ctx = alloc_nfs_open_context(dentry, FMODE_READ, NULL);
4494 if (IS_ERR(ctx))
4495 return PTR_ERR(ctx);
4496
4497 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
4498
4499 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
4500 sattr->ia_mode &= ~current_umask();
4501 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL);
4502 if (IS_ERR(state)) {
4503 status = PTR_ERR(state);
4504 goto out;
4505 }
4506out:
4507 nfs4_label_release_security(ilabel);
4508 put_nfs_open_context(ctx);
4509 return status;
4510}
4511
4512static int
4513_nfs4_proc_remove(struct inode *dir, const struct qstr *name, u32 ftype)
4514{
4515 struct nfs_server *server = NFS_SERVER(dir);
4516 struct nfs_removeargs args = {
4517 .fh = NFS_FH(dir),
4518 .name = *name,
4519 };
4520 struct nfs_removeres res = {
4521 .server = server,
4522 };
4523 struct rpc_message msg = {
4524 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
4525 .rpc_argp = &args,
4526 .rpc_resp = &res,
4527 };
4528 unsigned long timestamp = jiffies;
4529 int status;
4530
4531 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
4532 if (status == 0) {
4533 spin_lock(&dir->i_lock);
4534 update_changeattr_locked(dir, &res.cinfo, timestamp, 0);
4535 /* Removing a directory decrements nlink in the parent */
4536 if (ftype == NF4DIR && dir->i_nlink > 2)
4537 nfs4_dec_nlink_locked(dir);
4538 spin_unlock(&dir->i_lock);
4539 }
4540 return status;
4541}
4542
4543static int nfs4_proc_remove(struct inode *dir, struct dentry *dentry)
4544{
David Brazdil0f672f62019-12-10 10:32:29 +00004545 struct nfs4_exception exception = {
4546 .interruptible = true,
4547 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004548 struct inode *inode = d_inode(dentry);
4549 int err;
4550
4551 if (inode) {
4552 if (inode->i_nlink == 1)
4553 nfs4_inode_return_delegation(inode);
4554 else
4555 nfs4_inode_make_writeable(inode);
4556 }
4557 do {
4558 err = _nfs4_proc_remove(dir, &dentry->d_name, NF4REG);
4559 trace_nfs4_remove(dir, &dentry->d_name, err);
4560 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4561 &exception);
4562 } while (exception.retry);
4563 return err;
4564}
4565
4566static int nfs4_proc_rmdir(struct inode *dir, const struct qstr *name)
4567{
David Brazdil0f672f62019-12-10 10:32:29 +00004568 struct nfs4_exception exception = {
4569 .interruptible = true,
4570 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004571 int err;
4572
4573 do {
4574 err = _nfs4_proc_remove(dir, name, NF4DIR);
4575 trace_nfs4_remove(dir, name, err);
4576 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4577 &exception);
4578 } while (exception.retry);
4579 return err;
4580}
4581
4582static void nfs4_proc_unlink_setup(struct rpc_message *msg,
4583 struct dentry *dentry,
4584 struct inode *inode)
4585{
4586 struct nfs_removeargs *args = msg->rpc_argp;
4587 struct nfs_removeres *res = msg->rpc_resp;
4588
4589 res->server = NFS_SB(dentry->d_sb);
4590 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
4591 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1, 0);
4592
4593 nfs_fattr_init(res->dir_attr);
4594
4595 if (inode)
4596 nfs4_inode_return_delegation(inode);
4597}
4598
4599static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
4600{
4601 nfs4_setup_sequence(NFS_SB(data->dentry->d_sb)->nfs_client,
4602 &data->args.seq_args,
4603 &data->res.seq_res,
4604 task);
4605}
4606
4607static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
4608{
4609 struct nfs_unlinkdata *data = task->tk_calldata;
4610 struct nfs_removeres *res = &data->res;
4611
4612 if (!nfs4_sequence_done(task, &res->seq_res))
4613 return 0;
4614 if (nfs4_async_handle_error(task, res->server, NULL,
4615 &data->timeout) == -EAGAIN)
4616 return 0;
4617 if (task->tk_status == 0)
4618 update_changeattr(dir, &res->cinfo,
4619 res->dir_attr->time_start, 0);
4620 return 1;
4621}
4622
4623static void nfs4_proc_rename_setup(struct rpc_message *msg,
4624 struct dentry *old_dentry,
4625 struct dentry *new_dentry)
4626{
4627 struct nfs_renameargs *arg = msg->rpc_argp;
4628 struct nfs_renameres *res = msg->rpc_resp;
4629 struct inode *old_inode = d_inode(old_dentry);
4630 struct inode *new_inode = d_inode(new_dentry);
4631
4632 if (old_inode)
4633 nfs4_inode_make_writeable(old_inode);
4634 if (new_inode)
4635 nfs4_inode_return_delegation(new_inode);
4636 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
4637 res->server = NFS_SB(old_dentry->d_sb);
4638 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1, 0);
4639}
4640
4641static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
4642{
4643 nfs4_setup_sequence(NFS_SERVER(data->old_dir)->nfs_client,
4644 &data->args.seq_args,
4645 &data->res.seq_res,
4646 task);
4647}
4648
4649static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
4650 struct inode *new_dir)
4651{
4652 struct nfs_renamedata *data = task->tk_calldata;
4653 struct nfs_renameres *res = &data->res;
4654
4655 if (!nfs4_sequence_done(task, &res->seq_res))
4656 return 0;
4657 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN)
4658 return 0;
4659
4660 if (task->tk_status == 0) {
4661 if (new_dir != old_dir) {
4662 /* Note: If we moved a directory, nlink will change */
4663 update_changeattr(old_dir, &res->old_cinfo,
4664 res->old_fattr->time_start,
4665 NFS_INO_INVALID_OTHER);
4666 update_changeattr(new_dir, &res->new_cinfo,
4667 res->new_fattr->time_start,
4668 NFS_INO_INVALID_OTHER);
4669 } else
4670 update_changeattr(old_dir, &res->old_cinfo,
4671 res->old_fattr->time_start,
4672 0);
4673 }
4674 return 1;
4675}
4676
4677static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
4678{
4679 struct nfs_server *server = NFS_SERVER(inode);
4680 __u32 bitmask[NFS4_BITMASK_SZ];
4681 struct nfs4_link_arg arg = {
4682 .fh = NFS_FH(inode),
4683 .dir_fh = NFS_FH(dir),
4684 .name = name,
4685 .bitmask = bitmask,
4686 };
4687 struct nfs4_link_res res = {
4688 .server = server,
4689 .label = NULL,
4690 };
4691 struct rpc_message msg = {
4692 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
4693 .rpc_argp = &arg,
4694 .rpc_resp = &res,
4695 };
4696 int status = -ENOMEM;
4697
4698 res.fattr = nfs_alloc_fattr();
4699 if (res.fattr == NULL)
4700 goto out;
4701
4702 res.label = nfs4_label_alloc(server, GFP_KERNEL);
4703 if (IS_ERR(res.label)) {
4704 status = PTR_ERR(res.label);
4705 goto out;
4706 }
4707
4708 nfs4_inode_make_writeable(inode);
4709 nfs4_bitmap_copy_adjust_setattr(bitmask, nfs4_bitmask(server, res.label), inode);
4710
4711 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4712 if (!status) {
4713 update_changeattr(dir, &res.cinfo, res.fattr->time_start, 0);
4714 status = nfs_post_op_update_inode(inode, res.fattr);
4715 if (!status)
4716 nfs_setsecurity(inode, res.fattr, res.label);
4717 }
4718
4719
4720 nfs4_label_free(res.label);
4721
4722out:
4723 nfs_free_fattr(res.fattr);
4724 return status;
4725}
4726
4727static int nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
4728{
David Brazdil0f672f62019-12-10 10:32:29 +00004729 struct nfs4_exception exception = {
4730 .interruptible = true,
4731 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004732 int err;
4733 do {
4734 err = nfs4_handle_exception(NFS_SERVER(inode),
4735 _nfs4_proc_link(inode, dir, name),
4736 &exception);
4737 } while (exception.retry);
4738 return err;
4739}
4740
4741struct nfs4_createdata {
4742 struct rpc_message msg;
4743 struct nfs4_create_arg arg;
4744 struct nfs4_create_res res;
4745 struct nfs_fh fh;
4746 struct nfs_fattr fattr;
4747 struct nfs4_label *label;
4748};
4749
4750static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
4751 const struct qstr *name, struct iattr *sattr, u32 ftype)
4752{
4753 struct nfs4_createdata *data;
4754
4755 data = kzalloc(sizeof(*data), GFP_KERNEL);
4756 if (data != NULL) {
4757 struct nfs_server *server = NFS_SERVER(dir);
4758
4759 data->label = nfs4_label_alloc(server, GFP_KERNEL);
4760 if (IS_ERR(data->label))
4761 goto out_free;
4762
4763 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
4764 data->msg.rpc_argp = &data->arg;
4765 data->msg.rpc_resp = &data->res;
4766 data->arg.dir_fh = NFS_FH(dir);
4767 data->arg.server = server;
4768 data->arg.name = name;
4769 data->arg.attrs = sattr;
4770 data->arg.ftype = ftype;
4771 data->arg.bitmask = nfs4_bitmask(server, data->label);
4772 data->arg.umask = current_umask();
4773 data->res.server = server;
4774 data->res.fh = &data->fh;
4775 data->res.fattr = &data->fattr;
4776 data->res.label = data->label;
4777 nfs_fattr_init(data->res.fattr);
4778 }
4779 return data;
4780out_free:
4781 kfree(data);
4782 return NULL;
4783}
4784
4785static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
4786{
4787 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
4788 &data->arg.seq_args, &data->res.seq_res, 1);
4789 if (status == 0) {
4790 spin_lock(&dir->i_lock);
4791 update_changeattr_locked(dir, &data->res.dir_cinfo,
4792 data->res.fattr->time_start, 0);
4793 /* Creating a directory bumps nlink in the parent */
4794 if (data->arg.ftype == NF4DIR)
4795 nfs4_inc_nlink_locked(dir);
4796 spin_unlock(&dir->i_lock);
4797 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label);
4798 }
4799 return status;
4800}
4801
4802static void nfs4_free_createdata(struct nfs4_createdata *data)
4803{
4804 nfs4_label_free(data->label);
4805 kfree(data);
4806}
4807
4808static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
4809 struct page *page, unsigned int len, struct iattr *sattr,
4810 struct nfs4_label *label)
4811{
4812 struct nfs4_createdata *data;
4813 int status = -ENAMETOOLONG;
4814
4815 if (len > NFS4_MAXPATHLEN)
4816 goto out;
4817
4818 status = -ENOMEM;
4819 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
4820 if (data == NULL)
4821 goto out;
4822
4823 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
4824 data->arg.u.symlink.pages = &page;
4825 data->arg.u.symlink.len = len;
4826 data->arg.label = label;
4827
4828 status = nfs4_do_create(dir, dentry, data);
4829
4830 nfs4_free_createdata(data);
4831out:
4832 return status;
4833}
4834
4835static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
4836 struct page *page, unsigned int len, struct iattr *sattr)
4837{
David Brazdil0f672f62019-12-10 10:32:29 +00004838 struct nfs4_exception exception = {
4839 .interruptible = true,
4840 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004841 struct nfs4_label l, *label = NULL;
4842 int err;
4843
4844 label = nfs4_label_init_security(dir, dentry, sattr, &l);
4845
4846 do {
4847 err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label);
4848 trace_nfs4_symlink(dir, &dentry->d_name, err);
4849 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4850 &exception);
4851 } while (exception.retry);
4852
4853 nfs4_label_release_security(label);
4854 return err;
4855}
4856
4857static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
4858 struct iattr *sattr, struct nfs4_label *label)
4859{
4860 struct nfs4_createdata *data;
4861 int status = -ENOMEM;
4862
4863 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
4864 if (data == NULL)
4865 goto out;
4866
4867 data->arg.label = label;
4868 status = nfs4_do_create(dir, dentry, data);
4869
4870 nfs4_free_createdata(data);
4871out:
4872 return status;
4873}
4874
4875static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
4876 struct iattr *sattr)
4877{
4878 struct nfs_server *server = NFS_SERVER(dir);
David Brazdil0f672f62019-12-10 10:32:29 +00004879 struct nfs4_exception exception = {
4880 .interruptible = true,
4881 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004882 struct nfs4_label l, *label = NULL;
4883 int err;
4884
4885 label = nfs4_label_init_security(dir, dentry, sattr, &l);
4886
4887 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
4888 sattr->ia_mode &= ~current_umask();
4889 do {
4890 err = _nfs4_proc_mkdir(dir, dentry, sattr, label);
4891 trace_nfs4_mkdir(dir, &dentry->d_name, err);
4892 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4893 &exception);
4894 } while (exception.retry);
4895 nfs4_label_release_security(label);
4896
4897 return err;
4898}
4899
David Brazdil0f672f62019-12-10 10:32:29 +00004900static int _nfs4_proc_readdir(struct dentry *dentry, const struct cred *cred,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004901 u64 cookie, struct page **pages, unsigned int count, bool plus)
4902{
4903 struct inode *dir = d_inode(dentry);
Olivier Deprez0e641232021-09-23 10:07:05 +02004904 struct nfs_server *server = NFS_SERVER(dir);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004905 struct nfs4_readdir_arg args = {
4906 .fh = NFS_FH(dir),
4907 .pages = pages,
4908 .pgbase = 0,
4909 .count = count,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004910 .plus = plus,
4911 };
4912 struct nfs4_readdir_res res;
4913 struct rpc_message msg = {
4914 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
4915 .rpc_argp = &args,
4916 .rpc_resp = &res,
4917 .rpc_cred = cred,
4918 };
4919 int status;
4920
4921 dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__,
4922 dentry,
4923 (unsigned long long)cookie);
Olivier Deprez0e641232021-09-23 10:07:05 +02004924 if (!(server->caps & NFS_CAP_SECURITY_LABEL))
4925 args.bitmask = server->attr_bitmask_nl;
4926 else
4927 args.bitmask = server->attr_bitmask;
4928
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004929 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
4930 res.pgbase = args.pgbase;
Olivier Deprez0e641232021-09-23 10:07:05 +02004931 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
4932 &res.seq_res, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004933 if (status >= 0) {
4934 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
4935 status += args.pgbase;
4936 }
4937
4938 nfs_invalidate_atime(dir);
4939
4940 dprintk("%s: returns %d\n", __func__, status);
4941 return status;
4942}
4943
David Brazdil0f672f62019-12-10 10:32:29 +00004944static int nfs4_proc_readdir(struct dentry *dentry, const struct cred *cred,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004945 u64 cookie, struct page **pages, unsigned int count, bool plus)
4946{
David Brazdil0f672f62019-12-10 10:32:29 +00004947 struct nfs4_exception exception = {
4948 .interruptible = true,
4949 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004950 int err;
4951 do {
4952 err = _nfs4_proc_readdir(dentry, cred, cookie,
4953 pages, count, plus);
4954 trace_nfs4_readdir(d_inode(dentry), err);
4955 err = nfs4_handle_exception(NFS_SERVER(d_inode(dentry)), err,
4956 &exception);
4957 } while (exception.retry);
4958 return err;
4959}
4960
4961static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
4962 struct iattr *sattr, struct nfs4_label *label, dev_t rdev)
4963{
4964 struct nfs4_createdata *data;
4965 int mode = sattr->ia_mode;
4966 int status = -ENOMEM;
4967
4968 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
4969 if (data == NULL)
4970 goto out;
4971
4972 if (S_ISFIFO(mode))
4973 data->arg.ftype = NF4FIFO;
4974 else if (S_ISBLK(mode)) {
4975 data->arg.ftype = NF4BLK;
4976 data->arg.u.device.specdata1 = MAJOR(rdev);
4977 data->arg.u.device.specdata2 = MINOR(rdev);
4978 }
4979 else if (S_ISCHR(mode)) {
4980 data->arg.ftype = NF4CHR;
4981 data->arg.u.device.specdata1 = MAJOR(rdev);
4982 data->arg.u.device.specdata2 = MINOR(rdev);
4983 } else if (!S_ISSOCK(mode)) {
4984 status = -EINVAL;
4985 goto out_free;
4986 }
4987
4988 data->arg.label = label;
4989 status = nfs4_do_create(dir, dentry, data);
4990out_free:
4991 nfs4_free_createdata(data);
4992out:
4993 return status;
4994}
4995
4996static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
4997 struct iattr *sattr, dev_t rdev)
4998{
4999 struct nfs_server *server = NFS_SERVER(dir);
David Brazdil0f672f62019-12-10 10:32:29 +00005000 struct nfs4_exception exception = {
5001 .interruptible = true,
5002 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005003 struct nfs4_label l, *label = NULL;
5004 int err;
5005
5006 label = nfs4_label_init_security(dir, dentry, sattr, &l);
5007
5008 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
5009 sattr->ia_mode &= ~current_umask();
5010 do {
5011 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev);
5012 trace_nfs4_mknod(dir, &dentry->d_name, err);
5013 err = nfs4_handle_exception(NFS_SERVER(dir), err,
5014 &exception);
5015 } while (exception.retry);
5016
5017 nfs4_label_release_security(label);
5018
5019 return err;
5020}
5021
5022static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
5023 struct nfs_fsstat *fsstat)
5024{
5025 struct nfs4_statfs_arg args = {
5026 .fh = fhandle,
5027 .bitmask = server->attr_bitmask,
5028 };
5029 struct nfs4_statfs_res res = {
5030 .fsstat = fsstat,
5031 };
5032 struct rpc_message msg = {
5033 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
5034 .rpc_argp = &args,
5035 .rpc_resp = &res,
5036 };
5037
5038 nfs_fattr_init(fsstat->fattr);
5039 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
5040}
5041
5042static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
5043{
David Brazdil0f672f62019-12-10 10:32:29 +00005044 struct nfs4_exception exception = {
5045 .interruptible = true,
5046 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005047 int err;
5048 do {
5049 err = nfs4_handle_exception(server,
5050 _nfs4_proc_statfs(server, fhandle, fsstat),
5051 &exception);
5052 } while (exception.retry);
5053 return err;
5054}
5055
5056static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
5057 struct nfs_fsinfo *fsinfo)
5058{
5059 struct nfs4_fsinfo_arg args = {
5060 .fh = fhandle,
5061 .bitmask = server->attr_bitmask,
5062 };
5063 struct nfs4_fsinfo_res res = {
5064 .fsinfo = fsinfo,
5065 };
5066 struct rpc_message msg = {
5067 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
5068 .rpc_argp = &args,
5069 .rpc_resp = &res,
5070 };
5071
5072 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
5073}
5074
5075static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
5076{
David Brazdil0f672f62019-12-10 10:32:29 +00005077 struct nfs4_exception exception = {
5078 .interruptible = true,
5079 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005080 int err;
5081
5082 do {
5083 err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
5084 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
5085 if (err == 0) {
Olivier Deprez0e641232021-09-23 10:07:05 +02005086 nfs4_set_lease_period(server->nfs_client, fsinfo->lease_time * HZ);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005087 break;
5088 }
5089 err = nfs4_handle_exception(server, err, &exception);
5090 } while (exception.retry);
5091 return err;
5092}
5093
5094static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
5095{
5096 int error;
5097
5098 nfs_fattr_init(fsinfo->fattr);
5099 error = nfs4_do_fsinfo(server, fhandle, fsinfo);
5100 if (error == 0) {
5101 /* block layout checks this! */
5102 server->pnfs_blksize = fsinfo->blksize;
5103 set_pnfs_layoutdriver(server, fhandle, fsinfo);
5104 }
5105
5106 return error;
5107}
5108
5109static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
5110 struct nfs_pathconf *pathconf)
5111{
5112 struct nfs4_pathconf_arg args = {
5113 .fh = fhandle,
5114 .bitmask = server->attr_bitmask,
5115 };
5116 struct nfs4_pathconf_res res = {
5117 .pathconf = pathconf,
5118 };
5119 struct rpc_message msg = {
5120 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
5121 .rpc_argp = &args,
5122 .rpc_resp = &res,
5123 };
5124
5125 /* None of the pathconf attributes are mandatory to implement */
5126 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
5127 memset(pathconf, 0, sizeof(*pathconf));
5128 return 0;
5129 }
5130
5131 nfs_fattr_init(pathconf->fattr);
5132 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
5133}
5134
5135static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
5136 struct nfs_pathconf *pathconf)
5137{
David Brazdil0f672f62019-12-10 10:32:29 +00005138 struct nfs4_exception exception = {
5139 .interruptible = true,
5140 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005141 int err;
5142
5143 do {
5144 err = nfs4_handle_exception(server,
5145 _nfs4_proc_pathconf(server, fhandle, pathconf),
5146 &exception);
5147 } while (exception.retry);
5148 return err;
5149}
5150
5151int nfs4_set_rw_stateid(nfs4_stateid *stateid,
5152 const struct nfs_open_context *ctx,
5153 const struct nfs_lock_context *l_ctx,
5154 fmode_t fmode)
5155{
5156 return nfs4_select_rw_stateid(ctx->state, fmode, l_ctx, stateid, NULL);
5157}
5158EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid);
5159
5160static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
5161 const struct nfs_open_context *ctx,
5162 const struct nfs_lock_context *l_ctx,
5163 fmode_t fmode)
5164{
5165 nfs4_stateid current_stateid;
5166
5167 /* If the current stateid represents a lost lock, then exit */
5168 if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode) == -EIO)
5169 return true;
5170 return nfs4_stateid_match(stateid, &current_stateid);
5171}
5172
5173static bool nfs4_error_stateid_expired(int err)
5174{
5175 switch (err) {
5176 case -NFS4ERR_DELEG_REVOKED:
5177 case -NFS4ERR_ADMIN_REVOKED:
5178 case -NFS4ERR_BAD_STATEID:
5179 case -NFS4ERR_STALE_STATEID:
5180 case -NFS4ERR_OLD_STATEID:
5181 case -NFS4ERR_OPENMODE:
5182 case -NFS4ERR_EXPIRED:
5183 return true;
5184 }
5185 return false;
5186}
5187
5188static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)
5189{
5190 struct nfs_server *server = NFS_SERVER(hdr->inode);
5191
5192 trace_nfs4_read(hdr, task->tk_status);
5193 if (task->tk_status < 0) {
5194 struct nfs4_exception exception = {
5195 .inode = hdr->inode,
5196 .state = hdr->args.context->state,
5197 .stateid = &hdr->args.stateid,
5198 };
5199 task->tk_status = nfs4_async_handle_exception(task,
5200 server, task->tk_status, &exception);
5201 if (exception.retry) {
5202 rpc_restart_call_prepare(task);
5203 return -EAGAIN;
5204 }
5205 }
5206
5207 if (task->tk_status > 0)
5208 renew_lease(server, hdr->timestamp);
5209 return 0;
5210}
5211
5212static bool nfs4_read_stateid_changed(struct rpc_task *task,
5213 struct nfs_pgio_args *args)
5214{
5215
5216 if (!nfs4_error_stateid_expired(task->tk_status) ||
5217 nfs4_stateid_is_current(&args->stateid,
5218 args->context,
5219 args->lock_context,
5220 FMODE_READ))
5221 return false;
5222 rpc_restart_call_prepare(task);
5223 return true;
5224}
5225
5226static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
5227{
5228
5229 dprintk("--> %s\n", __func__);
5230
5231 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
5232 return -EAGAIN;
5233 if (nfs4_read_stateid_changed(task, &hdr->args))
5234 return -EAGAIN;
5235 if (task->tk_status > 0)
5236 nfs_invalidate_atime(hdr->inode);
5237 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
5238 nfs4_read_done_cb(task, hdr);
5239}
5240
5241static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr,
5242 struct rpc_message *msg)
5243{
5244 hdr->timestamp = jiffies;
5245 if (!hdr->pgio_done_cb)
5246 hdr->pgio_done_cb = nfs4_read_done_cb;
5247 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
5248 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
5249}
5250
5251static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task,
5252 struct nfs_pgio_header *hdr)
5253{
5254 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode)->nfs_client,
5255 &hdr->args.seq_args,
5256 &hdr->res.seq_res,
5257 task))
5258 return 0;
5259 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
5260 hdr->args.lock_context,
5261 hdr->rw_mode) == -EIO)
5262 return -EIO;
5263 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags)))
5264 return -EIO;
5265 return 0;
5266}
5267
5268static int nfs4_write_done_cb(struct rpc_task *task,
5269 struct nfs_pgio_header *hdr)
5270{
5271 struct inode *inode = hdr->inode;
5272
5273 trace_nfs4_write(hdr, task->tk_status);
5274 if (task->tk_status < 0) {
5275 struct nfs4_exception exception = {
5276 .inode = hdr->inode,
5277 .state = hdr->args.context->state,
5278 .stateid = &hdr->args.stateid,
5279 };
5280 task->tk_status = nfs4_async_handle_exception(task,
5281 NFS_SERVER(inode), task->tk_status,
5282 &exception);
5283 if (exception.retry) {
5284 rpc_restart_call_prepare(task);
5285 return -EAGAIN;
5286 }
5287 }
5288 if (task->tk_status >= 0) {
5289 renew_lease(NFS_SERVER(inode), hdr->timestamp);
5290 nfs_writeback_update_inode(hdr);
5291 }
5292 return 0;
5293}
5294
5295static bool nfs4_write_stateid_changed(struct rpc_task *task,
5296 struct nfs_pgio_args *args)
5297{
5298
5299 if (!nfs4_error_stateid_expired(task->tk_status) ||
5300 nfs4_stateid_is_current(&args->stateid,
5301 args->context,
5302 args->lock_context,
5303 FMODE_WRITE))
5304 return false;
5305 rpc_restart_call_prepare(task);
5306 return true;
5307}
5308
5309static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
5310{
5311 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
5312 return -EAGAIN;
5313 if (nfs4_write_stateid_changed(task, &hdr->args))
5314 return -EAGAIN;
5315 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
5316 nfs4_write_done_cb(task, hdr);
5317}
5318
5319static
5320bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
5321{
5322 /* Don't request attributes for pNFS or O_DIRECT writes */
5323 if (hdr->ds_clp != NULL || hdr->dreq != NULL)
5324 return false;
5325 /* Otherwise, request attributes if and only if we don't hold
5326 * a delegation
5327 */
5328 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
5329}
5330
5331static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
5332 struct rpc_message *msg,
5333 struct rpc_clnt **clnt)
5334{
5335 struct nfs_server *server = NFS_SERVER(hdr->inode);
5336
5337 if (!nfs4_write_need_cache_consistency_data(hdr)) {
5338 hdr->args.bitmask = NULL;
5339 hdr->res.fattr = NULL;
5340 } else
5341 hdr->args.bitmask = server->cache_consistency_bitmask;
5342
5343 if (!hdr->pgio_done_cb)
5344 hdr->pgio_done_cb = nfs4_write_done_cb;
5345 hdr->res.server = server;
5346 hdr->timestamp = jiffies;
5347
5348 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
Olivier Deprez0e641232021-09-23 10:07:05 +02005349 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005350 nfs4_state_protect_write(server->nfs_client, clnt, msg, hdr);
5351}
5352
5353static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
5354{
5355 nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client,
5356 &data->args.seq_args,
5357 &data->res.seq_res,
5358 task);
5359}
5360
5361static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
5362{
5363 struct inode *inode = data->inode;
5364
5365 trace_nfs4_commit(data, task->tk_status);
5366 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
5367 NULL, NULL) == -EAGAIN) {
5368 rpc_restart_call_prepare(task);
5369 return -EAGAIN;
5370 }
5371 return 0;
5372}
5373
5374static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
5375{
5376 if (!nfs4_sequence_done(task, &data->res.seq_res))
5377 return -EAGAIN;
5378 return data->commit_done_cb(task, data);
5379}
5380
5381static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg,
5382 struct rpc_clnt **clnt)
5383{
5384 struct nfs_server *server = NFS_SERVER(data->inode);
5385
5386 if (data->commit_done_cb == NULL)
5387 data->commit_done_cb = nfs4_commit_done_cb;
5388 data->res.server = server;
5389 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
5390 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
5391 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_COMMIT, clnt, msg);
5392}
5393
5394static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args,
5395 struct nfs_commitres *res)
5396{
5397 struct inode *dst_inode = file_inode(dst);
5398 struct nfs_server *server = NFS_SERVER(dst_inode);
5399 struct rpc_message msg = {
5400 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT],
5401 .rpc_argp = args,
5402 .rpc_resp = res,
5403 };
5404
5405 args->fh = NFS_FH(dst_inode);
5406 return nfs4_call_sync(server->client, server, &msg,
5407 &args->seq_args, &res->seq_res, 1);
5408}
5409
5410int nfs4_proc_commit(struct file *dst, __u64 offset, __u32 count, struct nfs_commitres *res)
5411{
5412 struct nfs_commitargs args = {
5413 .offset = offset,
5414 .count = count,
5415 };
5416 struct nfs_server *dst_server = NFS_SERVER(file_inode(dst));
5417 struct nfs4_exception exception = { };
5418 int status;
5419
5420 do {
5421 status = _nfs4_proc_commit(dst, &args, res);
5422 status = nfs4_handle_exception(dst_server, status, &exception);
5423 } while (exception.retry);
5424
5425 return status;
5426}
5427
5428struct nfs4_renewdata {
5429 struct nfs_client *client;
5430 unsigned long timestamp;
5431};
5432
5433/*
5434 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
5435 * standalone procedure for queueing an asynchronous RENEW.
5436 */
5437static void nfs4_renew_release(void *calldata)
5438{
5439 struct nfs4_renewdata *data = calldata;
5440 struct nfs_client *clp = data->client;
5441
5442 if (refcount_read(&clp->cl_count) > 1)
5443 nfs4_schedule_state_renewal(clp);
5444 nfs_put_client(clp);
5445 kfree(data);
5446}
5447
5448static void nfs4_renew_done(struct rpc_task *task, void *calldata)
5449{
5450 struct nfs4_renewdata *data = calldata;
5451 struct nfs_client *clp = data->client;
5452 unsigned long timestamp = data->timestamp;
5453
5454 trace_nfs4_renew_async(clp, task->tk_status);
5455 switch (task->tk_status) {
5456 case 0:
5457 break;
5458 case -NFS4ERR_LEASE_MOVED:
5459 nfs4_schedule_lease_moved_recovery(clp);
5460 break;
5461 default:
5462 /* Unless we're shutting down, schedule state recovery! */
5463 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
5464 return;
5465 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
5466 nfs4_schedule_lease_recovery(clp);
5467 return;
5468 }
5469 nfs4_schedule_path_down_recovery(clp);
5470 }
5471 do_renew_lease(clp, timestamp);
5472}
5473
5474static const struct rpc_call_ops nfs4_renew_ops = {
5475 .rpc_call_done = nfs4_renew_done,
5476 .rpc_release = nfs4_renew_release,
5477};
5478
David Brazdil0f672f62019-12-10 10:32:29 +00005479static int nfs4_proc_async_renew(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005480{
5481 struct rpc_message msg = {
5482 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
5483 .rpc_argp = clp,
5484 .rpc_cred = cred,
5485 };
5486 struct nfs4_renewdata *data;
5487
5488 if (renew_flags == 0)
5489 return 0;
5490 if (!refcount_inc_not_zero(&clp->cl_count))
5491 return -EIO;
5492 data = kmalloc(sizeof(*data), GFP_NOFS);
5493 if (data == NULL) {
5494 nfs_put_client(clp);
5495 return -ENOMEM;
5496 }
5497 data->client = clp;
5498 data->timestamp = jiffies;
5499 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT,
5500 &nfs4_renew_ops, data);
5501}
5502
David Brazdil0f672f62019-12-10 10:32:29 +00005503static int nfs4_proc_renew(struct nfs_client *clp, const struct cred *cred)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005504{
5505 struct rpc_message msg = {
5506 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
5507 .rpc_argp = clp,
5508 .rpc_cred = cred,
5509 };
5510 unsigned long now = jiffies;
5511 int status;
5512
5513 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5514 if (status < 0)
5515 return status;
5516 do_renew_lease(clp, now);
5517 return 0;
5518}
5519
5520static inline int nfs4_server_supports_acls(struct nfs_server *server)
5521{
5522 return server->caps & NFS_CAP_ACLS;
5523}
5524
5525/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
5526 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
5527 * the stack.
5528 */
5529#define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
5530
5531static int buf_to_pages_noslab(const void *buf, size_t buflen,
5532 struct page **pages)
5533{
5534 struct page *newpage, **spages;
5535 int rc = 0;
5536 size_t len;
5537 spages = pages;
5538
5539 do {
5540 len = min_t(size_t, PAGE_SIZE, buflen);
5541 newpage = alloc_page(GFP_KERNEL);
5542
5543 if (newpage == NULL)
5544 goto unwind;
5545 memcpy(page_address(newpage), buf, len);
5546 buf += len;
5547 buflen -= len;
5548 *pages++ = newpage;
5549 rc++;
5550 } while (buflen != 0);
5551
5552 return rc;
5553
5554unwind:
5555 for(; rc > 0; rc--)
5556 __free_page(spages[rc-1]);
5557 return -ENOMEM;
5558}
5559
5560struct nfs4_cached_acl {
5561 int cached;
5562 size_t len;
5563 char data[0];
5564};
5565
5566static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
5567{
5568 struct nfs_inode *nfsi = NFS_I(inode);
5569
5570 spin_lock(&inode->i_lock);
5571 kfree(nfsi->nfs4_acl);
5572 nfsi->nfs4_acl = acl;
5573 spin_unlock(&inode->i_lock);
5574}
5575
5576static void nfs4_zap_acl_attr(struct inode *inode)
5577{
5578 nfs4_set_cached_acl(inode, NULL);
5579}
5580
5581static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
5582{
5583 struct nfs_inode *nfsi = NFS_I(inode);
5584 struct nfs4_cached_acl *acl;
5585 int ret = -ENOENT;
5586
5587 spin_lock(&inode->i_lock);
5588 acl = nfsi->nfs4_acl;
5589 if (acl == NULL)
5590 goto out;
5591 if (buf == NULL) /* user is just asking for length */
5592 goto out_len;
5593 if (acl->cached == 0)
5594 goto out;
5595 ret = -ERANGE; /* see getxattr(2) man page */
5596 if (acl->len > buflen)
5597 goto out;
5598 memcpy(buf, acl->data, acl->len);
5599out_len:
5600 ret = acl->len;
5601out:
5602 spin_unlock(&inode->i_lock);
5603 return ret;
5604}
5605
5606static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
5607{
5608 struct nfs4_cached_acl *acl;
5609 size_t buflen = sizeof(*acl) + acl_len;
5610
5611 if (buflen <= PAGE_SIZE) {
5612 acl = kmalloc(buflen, GFP_KERNEL);
5613 if (acl == NULL)
5614 goto out;
5615 acl->cached = 1;
5616 _copy_from_pages(acl->data, pages, pgbase, acl_len);
5617 } else {
5618 acl = kmalloc(sizeof(*acl), GFP_KERNEL);
5619 if (acl == NULL)
5620 goto out;
5621 acl->cached = 0;
5622 }
5623 acl->len = acl_len;
5624out:
5625 nfs4_set_cached_acl(inode, acl);
5626}
5627
5628/*
5629 * The getxattr API returns the required buffer length when called with a
5630 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
5631 * the required buf. On a NULL buf, we send a page of data to the server
5632 * guessing that the ACL request can be serviced by a page. If so, we cache
5633 * up to the page of ACL data, and the 2nd call to getxattr is serviced by
5634 * the cache. If not so, we throw away the page, and cache the required
5635 * length. The next getxattr call will then produce another round trip to
5636 * the server, this time with the input buf of the required size.
5637 */
5638static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
5639{
5640 struct page *pages[NFS4ACL_MAXPAGES + 1] = {NULL, };
5641 struct nfs_getaclargs args = {
5642 .fh = NFS_FH(inode),
5643 .acl_pages = pages,
5644 .acl_len = buflen,
5645 };
5646 struct nfs_getaclres res = {
5647 .acl_len = buflen,
5648 };
5649 struct rpc_message msg = {
5650 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
5651 .rpc_argp = &args,
5652 .rpc_resp = &res,
5653 };
5654 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1;
5655 int ret = -ENOMEM, i;
5656
5657 if (npages > ARRAY_SIZE(pages))
5658 return -ERANGE;
5659
5660 for (i = 0; i < npages; i++) {
5661 pages[i] = alloc_page(GFP_KERNEL);
5662 if (!pages[i])
5663 goto out_free;
5664 }
5665
5666 /* for decoding across pages */
5667 res.acl_scratch = alloc_page(GFP_KERNEL);
5668 if (!res.acl_scratch)
5669 goto out_free;
5670
5671 args.acl_len = npages * PAGE_SIZE;
5672
5673 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
5674 __func__, buf, buflen, npages, args.acl_len);
5675 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
5676 &msg, &args.seq_args, &res.seq_res, 0);
5677 if (ret)
5678 goto out_free;
5679
5680 /* Handle the case where the passed-in buffer is too short */
5681 if (res.acl_flags & NFS4_ACL_TRUNC) {
5682 /* Did the user only issue a request for the acl length? */
5683 if (buf == NULL)
5684 goto out_ok;
5685 ret = -ERANGE;
5686 goto out_free;
5687 }
5688 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len);
5689 if (buf) {
5690 if (res.acl_len > buflen) {
5691 ret = -ERANGE;
5692 goto out_free;
5693 }
5694 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
5695 }
5696out_ok:
5697 ret = res.acl_len;
5698out_free:
5699 for (i = 0; i < npages; i++)
5700 if (pages[i])
5701 __free_page(pages[i]);
5702 if (res.acl_scratch)
5703 __free_page(res.acl_scratch);
5704 return ret;
5705}
5706
5707static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
5708{
David Brazdil0f672f62019-12-10 10:32:29 +00005709 struct nfs4_exception exception = {
5710 .interruptible = true,
5711 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005712 ssize_t ret;
5713 do {
5714 ret = __nfs4_get_acl_uncached(inode, buf, buflen);
5715 trace_nfs4_get_acl(inode, ret);
5716 if (ret >= 0)
5717 break;
5718 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
5719 } while (exception.retry);
5720 return ret;
5721}
5722
5723static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
5724{
5725 struct nfs_server *server = NFS_SERVER(inode);
5726 int ret;
5727
5728 if (!nfs4_server_supports_acls(server))
5729 return -EOPNOTSUPP;
5730 ret = nfs_revalidate_inode(server, inode);
5731 if (ret < 0)
5732 return ret;
5733 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
5734 nfs_zap_acl_cache(inode);
5735 ret = nfs4_read_cached_acl(inode, buf, buflen);
5736 if (ret != -ENOENT)
5737 /* -ENOENT is returned if there is no ACL or if there is an ACL
5738 * but no cached acl data, just the acl length */
5739 return ret;
5740 return nfs4_get_acl_uncached(inode, buf, buflen);
5741}
5742
5743static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
5744{
5745 struct nfs_server *server = NFS_SERVER(inode);
5746 struct page *pages[NFS4ACL_MAXPAGES];
5747 struct nfs_setaclargs arg = {
5748 .fh = NFS_FH(inode),
5749 .acl_pages = pages,
5750 .acl_len = buflen,
5751 };
5752 struct nfs_setaclres res;
5753 struct rpc_message msg = {
5754 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
5755 .rpc_argp = &arg,
5756 .rpc_resp = &res,
5757 };
5758 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
5759 int ret, i;
5760
Olivier Deprez0e641232021-09-23 10:07:05 +02005761 /* You can't remove system.nfs4_acl: */
5762 if (buflen == 0)
5763 return -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005764 if (!nfs4_server_supports_acls(server))
5765 return -EOPNOTSUPP;
5766 if (npages > ARRAY_SIZE(pages))
5767 return -ERANGE;
5768 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages);
5769 if (i < 0)
5770 return i;
5771 nfs4_inode_make_writeable(inode);
5772 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
5773
5774 /*
5775 * Free each page after tx, so the only ref left is
5776 * held by the network stack
5777 */
5778 for (; i > 0; i--)
5779 put_page(pages[i-1]);
5780
5781 /*
5782 * Acl update can result in inode attribute update.
5783 * so mark the attribute cache invalid.
5784 */
5785 spin_lock(&inode->i_lock);
5786 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_CHANGE
5787 | NFS_INO_INVALID_CTIME
5788 | NFS_INO_REVAL_FORCED;
5789 spin_unlock(&inode->i_lock);
5790 nfs_access_zap_cache(inode);
5791 nfs_zap_acl_cache(inode);
5792 return ret;
5793}
5794
5795static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
5796{
5797 struct nfs4_exception exception = { };
5798 int err;
5799 do {
5800 err = __nfs4_proc_set_acl(inode, buf, buflen);
5801 trace_nfs4_set_acl(inode, err);
Olivier Deprez0e641232021-09-23 10:07:05 +02005802 if (err == -NFS4ERR_BADOWNER || err == -NFS4ERR_BADNAME) {
5803 /*
5804 * no need to retry since the kernel
5805 * isn't involved in encoding the ACEs.
5806 */
5807 err = -EINVAL;
5808 break;
5809 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005810 err = nfs4_handle_exception(NFS_SERVER(inode), err,
5811 &exception);
5812 } while (exception.retry);
5813 return err;
5814}
5815
5816#ifdef CONFIG_NFS_V4_SECURITY_LABEL
5817static int _nfs4_get_security_label(struct inode *inode, void *buf,
5818 size_t buflen)
5819{
5820 struct nfs_server *server = NFS_SERVER(inode);
5821 struct nfs_fattr fattr;
5822 struct nfs4_label label = {0, 0, buflen, buf};
5823
5824 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
5825 struct nfs4_getattr_arg arg = {
5826 .fh = NFS_FH(inode),
5827 .bitmask = bitmask,
5828 };
5829 struct nfs4_getattr_res res = {
5830 .fattr = &fattr,
5831 .label = &label,
5832 .server = server,
5833 };
5834 struct rpc_message msg = {
5835 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
5836 .rpc_argp = &arg,
5837 .rpc_resp = &res,
5838 };
5839 int ret;
5840
5841 nfs_fattr_init(&fattr);
5842
5843 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0);
5844 if (ret)
5845 return ret;
5846 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
5847 return -ENOENT;
Olivier Deprez0e641232021-09-23 10:07:05 +02005848 return label.len;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005849}
5850
5851static int nfs4_get_security_label(struct inode *inode, void *buf,
5852 size_t buflen)
5853{
David Brazdil0f672f62019-12-10 10:32:29 +00005854 struct nfs4_exception exception = {
5855 .interruptible = true,
5856 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005857 int err;
5858
5859 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
5860 return -EOPNOTSUPP;
5861
5862 do {
5863 err = _nfs4_get_security_label(inode, buf, buflen);
5864 trace_nfs4_get_security_label(inode, err);
5865 err = nfs4_handle_exception(NFS_SERVER(inode), err,
5866 &exception);
5867 } while (exception.retry);
5868 return err;
5869}
5870
5871static int _nfs4_do_set_security_label(struct inode *inode,
5872 struct nfs4_label *ilabel,
5873 struct nfs_fattr *fattr,
5874 struct nfs4_label *olabel)
5875{
5876
5877 struct iattr sattr = {0};
5878 struct nfs_server *server = NFS_SERVER(inode);
5879 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
5880 struct nfs_setattrargs arg = {
5881 .fh = NFS_FH(inode),
5882 .iap = &sattr,
5883 .server = server,
5884 .bitmask = bitmask,
5885 .label = ilabel,
5886 };
5887 struct nfs_setattrres res = {
5888 .fattr = fattr,
5889 .label = olabel,
5890 .server = server,
5891 };
5892 struct rpc_message msg = {
5893 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
5894 .rpc_argp = &arg,
5895 .rpc_resp = &res,
5896 };
5897 int status;
5898
5899 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
5900
5901 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
5902 if (status)
5903 dprintk("%s failed: %d\n", __func__, status);
5904
5905 return status;
5906}
5907
5908static int nfs4_do_set_security_label(struct inode *inode,
5909 struct nfs4_label *ilabel,
5910 struct nfs_fattr *fattr,
5911 struct nfs4_label *olabel)
5912{
5913 struct nfs4_exception exception = { };
5914 int err;
5915
5916 do {
5917 err = _nfs4_do_set_security_label(inode, ilabel,
5918 fattr, olabel);
5919 trace_nfs4_set_security_label(inode, err);
5920 err = nfs4_handle_exception(NFS_SERVER(inode), err,
5921 &exception);
5922 } while (exception.retry);
5923 return err;
5924}
5925
5926static int
5927nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen)
5928{
5929 struct nfs4_label ilabel, *olabel = NULL;
5930 struct nfs_fattr fattr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005931 int status;
5932
5933 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
5934 return -EOPNOTSUPP;
5935
5936 nfs_fattr_init(&fattr);
5937
5938 ilabel.pi = 0;
5939 ilabel.lfs = 0;
5940 ilabel.label = (char *)buf;
5941 ilabel.len = buflen;
5942
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005943 olabel = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
5944 if (IS_ERR(olabel)) {
5945 status = -PTR_ERR(olabel);
5946 goto out;
5947 }
5948
5949 status = nfs4_do_set_security_label(inode, &ilabel, &fattr, olabel);
5950 if (status == 0)
5951 nfs_setsecurity(inode, &fattr, olabel);
5952
5953 nfs4_label_free(olabel);
5954out:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005955 return status;
5956}
5957#endif /* CONFIG_NFS_V4_SECURITY_LABEL */
5958
5959
5960static void nfs4_init_boot_verifier(const struct nfs_client *clp,
5961 nfs4_verifier *bootverf)
5962{
5963 __be32 verf[2];
5964
5965 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
5966 /* An impossible timestamp guarantees this value
5967 * will never match a generated boot time. */
5968 verf[0] = cpu_to_be32(U32_MAX);
5969 verf[1] = cpu_to_be32(U32_MAX);
5970 } else {
5971 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
5972 u64 ns = ktime_to_ns(nn->boot_time);
5973
5974 verf[0] = cpu_to_be32(ns >> 32);
5975 verf[1] = cpu_to_be32(ns);
5976 }
5977 memcpy(bootverf->data, verf, sizeof(bootverf->data));
5978}
5979
5980static int
5981nfs4_init_nonuniform_client_string(struct nfs_client *clp)
5982{
5983 size_t len;
5984 char *str;
5985
5986 if (clp->cl_owner_id != NULL)
5987 return 0;
5988
5989 rcu_read_lock();
5990 len = 14 +
5991 strlen(clp->cl_rpcclient->cl_nodename) +
5992 1 +
5993 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) +
5994 1;
5995 rcu_read_unlock();
5996 if (nfs4_client_id_uniquifier[0] != '\0')
5997 len += strlen(nfs4_client_id_uniquifier) + 1;
5998 if (len > NFS4_OPAQUE_LIMIT + 1)
5999 return -EINVAL;
6000
6001 /*
6002 * Since this string is allocated at mount time, and held until the
6003 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
6004 * about a memory-reclaim deadlock.
6005 */
6006 str = kmalloc(len, GFP_KERNEL);
6007 if (!str)
6008 return -ENOMEM;
6009
6010 rcu_read_lock();
6011 if (nfs4_client_id_uniquifier[0] != '\0')
6012 scnprintf(str, len, "Linux NFSv4.0 %s/%s/%s",
6013 clp->cl_rpcclient->cl_nodename,
6014 nfs4_client_id_uniquifier,
6015 rpc_peeraddr2str(clp->cl_rpcclient,
6016 RPC_DISPLAY_ADDR));
6017 else
6018 scnprintf(str, len, "Linux NFSv4.0 %s/%s",
6019 clp->cl_rpcclient->cl_nodename,
6020 rpc_peeraddr2str(clp->cl_rpcclient,
6021 RPC_DISPLAY_ADDR));
6022 rcu_read_unlock();
6023
6024 clp->cl_owner_id = str;
6025 return 0;
6026}
6027
6028static int
6029nfs4_init_uniquifier_client_string(struct nfs_client *clp)
6030{
6031 size_t len;
6032 char *str;
6033
6034 len = 10 + 10 + 1 + 10 + 1 +
6035 strlen(nfs4_client_id_uniquifier) + 1 +
6036 strlen(clp->cl_rpcclient->cl_nodename) + 1;
6037
6038 if (len > NFS4_OPAQUE_LIMIT + 1)
6039 return -EINVAL;
6040
6041 /*
6042 * Since this string is allocated at mount time, and held until the
6043 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
6044 * about a memory-reclaim deadlock.
6045 */
6046 str = kmalloc(len, GFP_KERNEL);
6047 if (!str)
6048 return -ENOMEM;
6049
6050 scnprintf(str, len, "Linux NFSv%u.%u %s/%s",
6051 clp->rpc_ops->version, clp->cl_minorversion,
6052 nfs4_client_id_uniquifier,
6053 clp->cl_rpcclient->cl_nodename);
6054 clp->cl_owner_id = str;
6055 return 0;
6056}
6057
6058static int
6059nfs4_init_uniform_client_string(struct nfs_client *clp)
6060{
6061 size_t len;
6062 char *str;
6063
6064 if (clp->cl_owner_id != NULL)
6065 return 0;
6066
6067 if (nfs4_client_id_uniquifier[0] != '\0')
6068 return nfs4_init_uniquifier_client_string(clp);
6069
6070 len = 10 + 10 + 1 + 10 + 1 +
6071 strlen(clp->cl_rpcclient->cl_nodename) + 1;
6072
6073 if (len > NFS4_OPAQUE_LIMIT + 1)
6074 return -EINVAL;
6075
6076 /*
6077 * Since this string is allocated at mount time, and held until the
6078 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
6079 * about a memory-reclaim deadlock.
6080 */
6081 str = kmalloc(len, GFP_KERNEL);
6082 if (!str)
6083 return -ENOMEM;
6084
6085 scnprintf(str, len, "Linux NFSv%u.%u %s",
6086 clp->rpc_ops->version, clp->cl_minorversion,
6087 clp->cl_rpcclient->cl_nodename);
6088 clp->cl_owner_id = str;
6089 return 0;
6090}
6091
6092/*
6093 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback
6094 * services. Advertise one based on the address family of the
6095 * clientaddr.
6096 */
6097static unsigned int
6098nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len)
6099{
6100 if (strchr(clp->cl_ipaddr, ':') != NULL)
6101 return scnprintf(buf, len, "tcp6");
6102 else
6103 return scnprintf(buf, len, "tcp");
6104}
6105
6106static void nfs4_setclientid_done(struct rpc_task *task, void *calldata)
6107{
6108 struct nfs4_setclientid *sc = calldata;
6109
6110 if (task->tk_status == 0)
6111 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred);
6112}
6113
6114static const struct rpc_call_ops nfs4_setclientid_ops = {
6115 .rpc_call_done = nfs4_setclientid_done,
6116};
6117
6118/**
6119 * nfs4_proc_setclientid - Negotiate client ID
6120 * @clp: state data structure
6121 * @program: RPC program for NFSv4 callback service
6122 * @port: IP port number for NFS4 callback service
David Brazdil0f672f62019-12-10 10:32:29 +00006123 * @cred: credential to use for this call
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006124 * @res: where to place the result
6125 *
6126 * Returns zero, a negative errno, or a negative NFS4ERR status code.
6127 */
6128int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
David Brazdil0f672f62019-12-10 10:32:29 +00006129 unsigned short port, const struct cred *cred,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006130 struct nfs4_setclientid_res *res)
6131{
6132 nfs4_verifier sc_verifier;
6133 struct nfs4_setclientid setclientid = {
6134 .sc_verifier = &sc_verifier,
6135 .sc_prog = program,
6136 .sc_clnt = clp,
6137 };
6138 struct rpc_message msg = {
6139 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
6140 .rpc_argp = &setclientid,
6141 .rpc_resp = res,
6142 .rpc_cred = cred,
6143 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006144 struct rpc_task_setup task_setup_data = {
6145 .rpc_client = clp->cl_rpcclient,
6146 .rpc_message = &msg,
6147 .callback_ops = &nfs4_setclientid_ops,
6148 .callback_data = &setclientid,
David Brazdil0f672f62019-12-10 10:32:29 +00006149 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006150 };
Olivier Deprez0e641232021-09-23 10:07:05 +02006151 unsigned long now = jiffies;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006152 int status;
6153
6154 /* nfs_client_id4 */
6155 nfs4_init_boot_verifier(clp, &sc_verifier);
6156
6157 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags))
6158 status = nfs4_init_uniform_client_string(clp);
6159 else
6160 status = nfs4_init_nonuniform_client_string(clp);
6161
6162 if (status)
6163 goto out;
6164
6165 /* cb_client4 */
6166 setclientid.sc_netid_len =
6167 nfs4_init_callback_netid(clp,
6168 setclientid.sc_netid,
6169 sizeof(setclientid.sc_netid));
6170 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
6171 sizeof(setclientid.sc_uaddr), "%s.%u.%u",
6172 clp->cl_ipaddr, port >> 8, port & 255);
6173
6174 dprintk("NFS call setclientid auth=%s, '%s'\n",
6175 clp->cl_rpcclient->cl_auth->au_ops->au_name,
6176 clp->cl_owner_id);
David Brazdil0f672f62019-12-10 10:32:29 +00006177
6178 status = nfs4_call_sync_custom(&task_setup_data);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006179 if (setclientid.sc_cred) {
David Brazdil0f672f62019-12-10 10:32:29 +00006180 kfree(clp->cl_acceptor);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006181 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
6182 put_rpccred(setclientid.sc_cred);
6183 }
Olivier Deprez0e641232021-09-23 10:07:05 +02006184
6185 if (status == 0)
6186 do_renew_lease(clp, now);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006187out:
6188 trace_nfs4_setclientid(clp, status);
6189 dprintk("NFS reply setclientid: %d\n", status);
6190 return status;
6191}
6192
6193/**
6194 * nfs4_proc_setclientid_confirm - Confirm client ID
6195 * @clp: state data structure
David Brazdil0f672f62019-12-10 10:32:29 +00006196 * @arg: result of a previous SETCLIENTID
6197 * @cred: credential to use for this call
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006198 *
6199 * Returns zero, a negative errno, or a negative NFS4ERR status code.
6200 */
6201int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
6202 struct nfs4_setclientid_res *arg,
David Brazdil0f672f62019-12-10 10:32:29 +00006203 const struct cred *cred)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006204{
6205 struct rpc_message msg = {
6206 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
6207 .rpc_argp = arg,
6208 .rpc_cred = cred,
6209 };
6210 int status;
6211
6212 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n",
6213 clp->cl_rpcclient->cl_auth->au_ops->au_name,
6214 clp->cl_clientid);
David Brazdil0f672f62019-12-10 10:32:29 +00006215 status = rpc_call_sync(clp->cl_rpcclient, &msg,
6216 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006217 trace_nfs4_setclientid_confirm(clp, status);
6218 dprintk("NFS reply setclientid_confirm: %d\n", status);
6219 return status;
6220}
6221
6222struct nfs4_delegreturndata {
6223 struct nfs4_delegreturnargs args;
6224 struct nfs4_delegreturnres res;
6225 struct nfs_fh fh;
6226 nfs4_stateid stateid;
6227 unsigned long timestamp;
6228 struct {
6229 struct nfs4_layoutreturn_args arg;
6230 struct nfs4_layoutreturn_res res;
6231 struct nfs4_xdr_opaque_data ld_private;
6232 u32 roc_barrier;
6233 bool roc;
6234 } lr;
6235 struct nfs_fattr fattr;
6236 int rpc_status;
6237 struct inode *inode;
6238};
6239
6240static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
6241{
6242 struct nfs4_delegreturndata *data = calldata;
6243 struct nfs4_exception exception = {
6244 .inode = data->inode,
6245 .stateid = &data->stateid,
Olivier Deprez0e641232021-09-23 10:07:05 +02006246 .task_is_privileged = data->args.seq_args.sa_privileged,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006247 };
6248
6249 if (!nfs4_sequence_done(task, &data->res.seq_res))
6250 return;
6251
6252 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
6253
6254 /* Handle Layoutreturn errors */
Olivier Deprez0e641232021-09-23 10:07:05 +02006255 if (pnfs_roc_done(task, &data->args.lr_args, &data->res.lr_res,
6256 &data->res.lr_ret) == -EAGAIN)
David Brazdil0f672f62019-12-10 10:32:29 +00006257 goto out_restart;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006258
6259 switch (task->tk_status) {
6260 case 0:
6261 renew_lease(data->res.server, data->timestamp);
6262 break;
6263 case -NFS4ERR_ADMIN_REVOKED:
6264 case -NFS4ERR_DELEG_REVOKED:
6265 case -NFS4ERR_EXPIRED:
6266 nfs4_free_revoked_stateid(data->res.server,
6267 data->args.stateid,
6268 task->tk_msg.rpc_cred);
6269 /* Fallthrough */
6270 case -NFS4ERR_BAD_STATEID:
6271 case -NFS4ERR_STALE_STATEID:
6272 task->tk_status = 0;
6273 break;
6274 case -NFS4ERR_OLD_STATEID:
6275 if (nfs4_refresh_delegation_stateid(&data->stateid, data->inode))
6276 goto out_restart;
6277 task->tk_status = 0;
6278 break;
6279 case -NFS4ERR_ACCESS:
6280 if (data->args.bitmask) {
6281 data->args.bitmask = NULL;
6282 data->res.fattr = NULL;
6283 goto out_restart;
6284 }
6285 /* Fallthrough */
6286 default:
6287 task->tk_status = nfs4_async_handle_exception(task,
6288 data->res.server, task->tk_status,
6289 &exception);
6290 if (exception.retry)
6291 goto out_restart;
6292 }
6293 data->rpc_status = task->tk_status;
6294 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006295out_restart:
6296 task->tk_status = 0;
6297 rpc_restart_call_prepare(task);
6298}
6299
6300static void nfs4_delegreturn_release(void *calldata)
6301{
6302 struct nfs4_delegreturndata *data = calldata;
6303 struct inode *inode = data->inode;
6304
Olivier Deprez0e641232021-09-23 10:07:05 +02006305 if (data->lr.roc)
6306 pnfs_roc_release(&data->lr.arg, &data->lr.res,
6307 data->res.lr_ret);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006308 if (inode) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006309 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
6310 nfs_iput_and_deactive(inode);
6311 }
6312 kfree(calldata);
6313}
6314
6315static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
6316{
6317 struct nfs4_delegreturndata *d_data;
6318 struct pnfs_layout_hdr *lo;
6319
6320 d_data = (struct nfs4_delegreturndata *)data;
6321
Olivier Deprez0e641232021-09-23 10:07:05 +02006322 if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) {
6323 nfs4_sequence_done(task, &d_data->res.seq_res);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006324 return;
Olivier Deprez0e641232021-09-23 10:07:05 +02006325 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006326
6327 lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL;
6328 if (lo && !pnfs_layout_is_valid(lo)) {
6329 d_data->args.lr_args = NULL;
6330 d_data->res.lr_res = NULL;
6331 }
6332
6333 nfs4_setup_sequence(d_data->res.server->nfs_client,
6334 &d_data->args.seq_args,
6335 &d_data->res.seq_res,
6336 task);
6337}
6338
6339static const struct rpc_call_ops nfs4_delegreturn_ops = {
6340 .rpc_call_prepare = nfs4_delegreturn_prepare,
6341 .rpc_call_done = nfs4_delegreturn_done,
6342 .rpc_release = nfs4_delegreturn_release,
6343};
6344
David Brazdil0f672f62019-12-10 10:32:29 +00006345static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006346{
6347 struct nfs4_delegreturndata *data;
6348 struct nfs_server *server = NFS_SERVER(inode);
6349 struct rpc_task *task;
6350 struct rpc_message msg = {
6351 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
6352 .rpc_cred = cred,
6353 };
6354 struct rpc_task_setup task_setup_data = {
6355 .rpc_client = server->client,
6356 .rpc_message = &msg,
6357 .callback_ops = &nfs4_delegreturn_ops,
6358 .flags = RPC_TASK_ASYNC,
6359 };
6360 int status = 0;
6361
6362 data = kzalloc(sizeof(*data), GFP_NOFS);
6363 if (data == NULL)
6364 return -ENOMEM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006365
6366 nfs4_state_protect(server->nfs_client,
6367 NFS_SP4_MACH_CRED_CLEANUP,
6368 &task_setup_data.rpc_client, &msg);
6369
6370 data->args.fhandle = &data->fh;
6371 data->args.stateid = &data->stateid;
6372 data->args.bitmask = server->cache_consistency_bitmask;
6373 nfs_copy_fh(&data->fh, NFS_FH(inode));
6374 nfs4_stateid_copy(&data->stateid, stateid);
6375 data->res.fattr = &data->fattr;
6376 data->res.server = server;
6377 data->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
6378 data->lr.arg.ld_private = &data->lr.ld_private;
6379 nfs_fattr_init(data->res.fattr);
6380 data->timestamp = jiffies;
6381 data->rpc_status = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006382 data->inode = nfs_igrab_and_active(inode);
Olivier Deprez0e641232021-09-23 10:07:05 +02006383 if (data->inode || issync) {
6384 data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res,
6385 cred);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006386 if (data->lr.roc) {
6387 data->args.lr_args = &data->lr.arg;
6388 data->res.lr_res = &data->lr.res;
6389 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006390 }
6391
Olivier Deprez0e641232021-09-23 10:07:05 +02006392 if (!data->inode)
6393 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
6394 1);
6395 else
6396 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
6397 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006398 task_setup_data.callback_data = data;
6399 msg.rpc_argp = &data->args;
6400 msg.rpc_resp = &data->res;
6401 task = rpc_run_task(&task_setup_data);
6402 if (IS_ERR(task))
6403 return PTR_ERR(task);
6404 if (!issync)
6405 goto out;
6406 status = rpc_wait_for_completion_task(task);
6407 if (status != 0)
6408 goto out;
6409 status = data->rpc_status;
6410out:
6411 rpc_put_task(task);
6412 return status;
6413}
6414
David Brazdil0f672f62019-12-10 10:32:29 +00006415int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006416{
6417 struct nfs_server *server = NFS_SERVER(inode);
6418 struct nfs4_exception exception = { };
6419 int err;
6420 do {
6421 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
6422 trace_nfs4_delegreturn(inode, stateid, err);
6423 switch (err) {
6424 case -NFS4ERR_STALE_STATEID:
6425 case -NFS4ERR_EXPIRED:
6426 case 0:
6427 return 0;
6428 }
6429 err = nfs4_handle_exception(server, err, &exception);
6430 } while (exception.retry);
6431 return err;
6432}
6433
6434static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6435{
6436 struct inode *inode = state->inode;
6437 struct nfs_server *server = NFS_SERVER(inode);
6438 struct nfs_client *clp = server->nfs_client;
6439 struct nfs_lockt_args arg = {
6440 .fh = NFS_FH(inode),
6441 .fl = request,
6442 };
6443 struct nfs_lockt_res res = {
6444 .denied = request,
6445 };
6446 struct rpc_message msg = {
6447 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
6448 .rpc_argp = &arg,
6449 .rpc_resp = &res,
6450 .rpc_cred = state->owner->so_cred,
6451 };
6452 struct nfs4_lock_state *lsp;
6453 int status;
6454
6455 arg.lock_owner.clientid = clp->cl_clientid;
6456 status = nfs4_set_lock_state(state, request);
6457 if (status != 0)
6458 goto out;
6459 lsp = request->fl_u.nfs4_fl.owner;
6460 arg.lock_owner.id = lsp->ls_seqid.owner_id;
6461 arg.lock_owner.s_dev = server->s_dev;
6462 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
6463 switch (status) {
6464 case 0:
6465 request->fl_type = F_UNLCK;
6466 break;
6467 case -NFS4ERR_DENIED:
6468 status = 0;
6469 }
6470 request->fl_ops->fl_release_private(request);
6471 request->fl_ops = NULL;
6472out:
6473 return status;
6474}
6475
6476static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6477{
David Brazdil0f672f62019-12-10 10:32:29 +00006478 struct nfs4_exception exception = {
6479 .interruptible = true,
6480 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006481 int err;
6482
6483 do {
6484 err = _nfs4_proc_getlk(state, cmd, request);
6485 trace_nfs4_get_lock(request, state, cmd, err);
6486 err = nfs4_handle_exception(NFS_SERVER(state->inode), err,
6487 &exception);
6488 } while (exception.retry);
6489 return err;
6490}
6491
David Brazdil0f672f62019-12-10 10:32:29 +00006492/*
6493 * Update the seqid of a lock stateid after receiving
6494 * NFS4ERR_OLD_STATEID
6495 */
6496static bool nfs4_refresh_lock_old_stateid(nfs4_stateid *dst,
6497 struct nfs4_lock_state *lsp)
6498{
6499 struct nfs4_state *state = lsp->ls_state;
6500 bool ret = false;
6501
6502 spin_lock(&state->state_lock);
6503 if (!nfs4_stateid_match_other(dst, &lsp->ls_stateid))
6504 goto out;
6505 if (!nfs4_stateid_is_newer(&lsp->ls_stateid, dst))
6506 nfs4_stateid_seqid_inc(dst);
6507 else
6508 dst->seqid = lsp->ls_stateid.seqid;
6509 ret = true;
6510out:
6511 spin_unlock(&state->state_lock);
6512 return ret;
6513}
6514
6515static bool nfs4_sync_lock_stateid(nfs4_stateid *dst,
6516 struct nfs4_lock_state *lsp)
6517{
6518 struct nfs4_state *state = lsp->ls_state;
6519 bool ret;
6520
6521 spin_lock(&state->state_lock);
6522 ret = !nfs4_stateid_match_other(dst, &lsp->ls_stateid);
6523 nfs4_stateid_copy(dst, &lsp->ls_stateid);
6524 spin_unlock(&state->state_lock);
6525 return ret;
6526}
6527
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006528struct nfs4_unlockdata {
6529 struct nfs_locku_args arg;
6530 struct nfs_locku_res res;
6531 struct nfs4_lock_state *lsp;
6532 struct nfs_open_context *ctx;
6533 struct nfs_lock_context *l_ctx;
6534 struct file_lock fl;
6535 struct nfs_server *server;
6536 unsigned long timestamp;
6537};
6538
6539static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
6540 struct nfs_open_context *ctx,
6541 struct nfs4_lock_state *lsp,
6542 struct nfs_seqid *seqid)
6543{
6544 struct nfs4_unlockdata *p;
David Brazdil0f672f62019-12-10 10:32:29 +00006545 struct nfs4_state *state = lsp->ls_state;
6546 struct inode *inode = state->inode;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006547
6548 p = kzalloc(sizeof(*p), GFP_NOFS);
6549 if (p == NULL)
6550 return NULL;
6551 p->arg.fh = NFS_FH(inode);
6552 p->arg.fl = &p->fl;
6553 p->arg.seqid = seqid;
6554 p->res.seqid = seqid;
6555 p->lsp = lsp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006556 /* Ensure we don't close file until we're done freeing locks! */
6557 p->ctx = get_nfs_open_context(ctx);
6558 p->l_ctx = nfs_get_lock_context(ctx);
David Brazdil0f672f62019-12-10 10:32:29 +00006559 locks_init_lock(&p->fl);
6560 locks_copy_lock(&p->fl, fl);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006561 p->server = NFS_SERVER(inode);
David Brazdil0f672f62019-12-10 10:32:29 +00006562 spin_lock(&state->state_lock);
6563 nfs4_stateid_copy(&p->arg.stateid, &lsp->ls_stateid);
6564 spin_unlock(&state->state_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006565 return p;
6566}
6567
6568static void nfs4_locku_release_calldata(void *data)
6569{
6570 struct nfs4_unlockdata *calldata = data;
6571 nfs_free_seqid(calldata->arg.seqid);
6572 nfs4_put_lock_state(calldata->lsp);
6573 nfs_put_lock_context(calldata->l_ctx);
6574 put_nfs_open_context(calldata->ctx);
6575 kfree(calldata);
6576}
6577
6578static void nfs4_locku_done(struct rpc_task *task, void *data)
6579{
6580 struct nfs4_unlockdata *calldata = data;
6581 struct nfs4_exception exception = {
6582 .inode = calldata->lsp->ls_state->inode,
6583 .stateid = &calldata->arg.stateid,
6584 };
6585
6586 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
6587 return;
6588 switch (task->tk_status) {
6589 case 0:
6590 renew_lease(calldata->server, calldata->timestamp);
6591 locks_lock_inode_wait(calldata->lsp->ls_state->inode, &calldata->fl);
6592 if (nfs4_update_lock_stateid(calldata->lsp,
6593 &calldata->res.stateid))
6594 break;
6595 /* Fall through */
6596 case -NFS4ERR_ADMIN_REVOKED:
6597 case -NFS4ERR_EXPIRED:
6598 nfs4_free_revoked_stateid(calldata->server,
6599 &calldata->arg.stateid,
6600 task->tk_msg.rpc_cred);
6601 /* Fall through */
6602 case -NFS4ERR_BAD_STATEID:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006603 case -NFS4ERR_STALE_STATEID:
David Brazdil0f672f62019-12-10 10:32:29 +00006604 if (nfs4_sync_lock_stateid(&calldata->arg.stateid,
6605 calldata->lsp))
6606 rpc_restart_call_prepare(task);
6607 break;
6608 case -NFS4ERR_OLD_STATEID:
6609 if (nfs4_refresh_lock_old_stateid(&calldata->arg.stateid,
6610 calldata->lsp))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006611 rpc_restart_call_prepare(task);
6612 break;
6613 default:
6614 task->tk_status = nfs4_async_handle_exception(task,
6615 calldata->server, task->tk_status,
6616 &exception);
6617 if (exception.retry)
6618 rpc_restart_call_prepare(task);
6619 }
6620 nfs_release_seqid(calldata->arg.seqid);
6621}
6622
6623static void nfs4_locku_prepare(struct rpc_task *task, void *data)
6624{
6625 struct nfs4_unlockdata *calldata = data;
6626
6627 if (test_bit(NFS_CONTEXT_UNLOCK, &calldata->l_ctx->open_context->flags) &&
6628 nfs_async_iocounter_wait(task, calldata->l_ctx))
6629 return;
6630
6631 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
6632 goto out_wait;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006633 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
6634 /* Note: exit _without_ running nfs4_locku_done */
6635 goto out_no_action;
6636 }
6637 calldata->timestamp = jiffies;
6638 if (nfs4_setup_sequence(calldata->server->nfs_client,
6639 &calldata->arg.seq_args,
6640 &calldata->res.seq_res,
6641 task) != 0)
6642 nfs_release_seqid(calldata->arg.seqid);
6643 return;
6644out_no_action:
6645 task->tk_action = NULL;
6646out_wait:
6647 nfs4_sequence_done(task, &calldata->res.seq_res);
6648}
6649
6650static const struct rpc_call_ops nfs4_locku_ops = {
6651 .rpc_call_prepare = nfs4_locku_prepare,
6652 .rpc_call_done = nfs4_locku_done,
6653 .rpc_release = nfs4_locku_release_calldata,
6654};
6655
6656static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
6657 struct nfs_open_context *ctx,
6658 struct nfs4_lock_state *lsp,
6659 struct nfs_seqid *seqid)
6660{
6661 struct nfs4_unlockdata *data;
6662 struct rpc_message msg = {
6663 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
6664 .rpc_cred = ctx->cred,
6665 };
6666 struct rpc_task_setup task_setup_data = {
6667 .rpc_client = NFS_CLIENT(lsp->ls_state->inode),
6668 .rpc_message = &msg,
6669 .callback_ops = &nfs4_locku_ops,
6670 .workqueue = nfsiod_workqueue,
6671 .flags = RPC_TASK_ASYNC,
6672 };
6673
6674 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client,
6675 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg);
6676
6677 /* Ensure this is an unlock - when canceling a lock, the
6678 * canceled lock is passed in, and it won't be an unlock.
6679 */
6680 fl->fl_type = F_UNLCK;
6681 if (fl->fl_flags & FL_CLOSE)
6682 set_bit(NFS_CONTEXT_UNLOCK, &ctx->flags);
6683
6684 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
6685 if (data == NULL) {
6686 nfs_free_seqid(seqid);
6687 return ERR_PTR(-ENOMEM);
6688 }
6689
6690 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1, 0);
6691 msg.rpc_argp = &data->arg;
6692 msg.rpc_resp = &data->res;
6693 task_setup_data.callback_data = data;
6694 return rpc_run_task(&task_setup_data);
6695}
6696
6697static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
6698{
6699 struct inode *inode = state->inode;
6700 struct nfs4_state_owner *sp = state->owner;
6701 struct nfs_inode *nfsi = NFS_I(inode);
6702 struct nfs_seqid *seqid;
6703 struct nfs4_lock_state *lsp;
6704 struct rpc_task *task;
6705 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
6706 int status = 0;
6707 unsigned char fl_flags = request->fl_flags;
6708
6709 status = nfs4_set_lock_state(state, request);
6710 /* Unlock _before_ we do the RPC call */
6711 request->fl_flags |= FL_EXISTS;
6712 /* Exclude nfs_delegation_claim_locks() */
6713 mutex_lock(&sp->so_delegreturn_mutex);
6714 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */
6715 down_read(&nfsi->rwsem);
6716 if (locks_lock_inode_wait(inode, request) == -ENOENT) {
6717 up_read(&nfsi->rwsem);
6718 mutex_unlock(&sp->so_delegreturn_mutex);
6719 goto out;
6720 }
6721 up_read(&nfsi->rwsem);
6722 mutex_unlock(&sp->so_delegreturn_mutex);
6723 if (status != 0)
6724 goto out;
6725 /* Is this a delegated lock? */
6726 lsp = request->fl_u.nfs4_fl.owner;
6727 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
6728 goto out;
6729 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid;
6730 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
6731 status = -ENOMEM;
6732 if (IS_ERR(seqid))
6733 goto out;
6734 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
6735 status = PTR_ERR(task);
6736 if (IS_ERR(task))
6737 goto out;
6738 status = rpc_wait_for_completion_task(task);
6739 rpc_put_task(task);
6740out:
6741 request->fl_flags = fl_flags;
6742 trace_nfs4_unlock(request, state, F_SETLK, status);
6743 return status;
6744}
6745
6746struct nfs4_lockdata {
6747 struct nfs_lock_args arg;
6748 struct nfs_lock_res res;
6749 struct nfs4_lock_state *lsp;
6750 struct nfs_open_context *ctx;
6751 struct file_lock fl;
6752 unsigned long timestamp;
6753 int rpc_status;
6754 int cancelled;
6755 struct nfs_server *server;
6756};
6757
6758static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
6759 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
6760 gfp_t gfp_mask)
6761{
6762 struct nfs4_lockdata *p;
6763 struct inode *inode = lsp->ls_state->inode;
6764 struct nfs_server *server = NFS_SERVER(inode);
6765 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
6766
6767 p = kzalloc(sizeof(*p), gfp_mask);
6768 if (p == NULL)
6769 return NULL;
6770
6771 p->arg.fh = NFS_FH(inode);
6772 p->arg.fl = &p->fl;
6773 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
6774 if (IS_ERR(p->arg.open_seqid))
6775 goto out_free;
6776 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
6777 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask);
6778 if (IS_ERR(p->arg.lock_seqid))
6779 goto out_free_seqid;
6780 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
6781 p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
6782 p->arg.lock_owner.s_dev = server->s_dev;
6783 p->res.lock_seqid = p->arg.lock_seqid;
6784 p->lsp = lsp;
6785 p->server = server;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006786 p->ctx = get_nfs_open_context(ctx);
David Brazdil0f672f62019-12-10 10:32:29 +00006787 locks_init_lock(&p->fl);
6788 locks_copy_lock(&p->fl, fl);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006789 return p;
6790out_free_seqid:
6791 nfs_free_seqid(p->arg.open_seqid);
6792out_free:
6793 kfree(p);
6794 return NULL;
6795}
6796
6797static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
6798{
6799 struct nfs4_lockdata *data = calldata;
6800 struct nfs4_state *state = data->lsp->ls_state;
6801
6802 dprintk("%s: begin!\n", __func__);
6803 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
6804 goto out_wait;
6805 /* Do we need to do an open_to_lock_owner? */
6806 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) {
6807 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
6808 goto out_release_lock_seqid;
6809 }
6810 nfs4_stateid_copy(&data->arg.open_stateid,
6811 &state->open_stateid);
6812 data->arg.new_lock_owner = 1;
6813 data->res.open_seqid = data->arg.open_seqid;
6814 } else {
6815 data->arg.new_lock_owner = 0;
6816 nfs4_stateid_copy(&data->arg.lock_stateid,
6817 &data->lsp->ls_stateid);
6818 }
6819 if (!nfs4_valid_open_stateid(state)) {
6820 data->rpc_status = -EBADF;
6821 task->tk_action = NULL;
6822 goto out_release_open_seqid;
6823 }
6824 data->timestamp = jiffies;
6825 if (nfs4_setup_sequence(data->server->nfs_client,
6826 &data->arg.seq_args,
6827 &data->res.seq_res,
6828 task) == 0)
6829 return;
6830out_release_open_seqid:
6831 nfs_release_seqid(data->arg.open_seqid);
6832out_release_lock_seqid:
6833 nfs_release_seqid(data->arg.lock_seqid);
6834out_wait:
6835 nfs4_sequence_done(task, &data->res.seq_res);
6836 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
6837}
6838
6839static void nfs4_lock_done(struct rpc_task *task, void *calldata)
6840{
6841 struct nfs4_lockdata *data = calldata;
6842 struct nfs4_lock_state *lsp = data->lsp;
6843
6844 dprintk("%s: begin!\n", __func__);
6845
6846 if (!nfs4_sequence_done(task, &data->res.seq_res))
6847 return;
6848
6849 data->rpc_status = task->tk_status;
6850 switch (task->tk_status) {
6851 case 0:
6852 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)),
6853 data->timestamp);
6854 if (data->arg.new_lock && !data->cancelled) {
6855 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
6856 if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0)
6857 goto out_restart;
6858 }
6859 if (data->arg.new_lock_owner != 0) {
6860 nfs_confirm_seqid(&lsp->ls_seqid, 0);
6861 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
6862 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
6863 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
6864 goto out_restart;
6865 break;
6866 case -NFS4ERR_BAD_STATEID:
6867 case -NFS4ERR_OLD_STATEID:
6868 case -NFS4ERR_STALE_STATEID:
6869 case -NFS4ERR_EXPIRED:
6870 if (data->arg.new_lock_owner != 0) {
6871 if (!nfs4_stateid_match(&data->arg.open_stateid,
6872 &lsp->ls_state->open_stateid))
6873 goto out_restart;
6874 } else if (!nfs4_stateid_match(&data->arg.lock_stateid,
6875 &lsp->ls_stateid))
6876 goto out_restart;
6877 }
6878out_done:
6879 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
6880 return;
6881out_restart:
6882 if (!data->cancelled)
6883 rpc_restart_call_prepare(task);
6884 goto out_done;
6885}
6886
6887static void nfs4_lock_release(void *calldata)
6888{
6889 struct nfs4_lockdata *data = calldata;
6890
6891 dprintk("%s: begin!\n", __func__);
6892 nfs_free_seqid(data->arg.open_seqid);
6893 if (data->cancelled && data->rpc_status == 0) {
6894 struct rpc_task *task;
6895 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
6896 data->arg.lock_seqid);
6897 if (!IS_ERR(task))
6898 rpc_put_task_async(task);
6899 dprintk("%s: cancelling lock!\n", __func__);
6900 } else
6901 nfs_free_seqid(data->arg.lock_seqid);
6902 nfs4_put_lock_state(data->lsp);
6903 put_nfs_open_context(data->ctx);
6904 kfree(data);
6905 dprintk("%s: done!\n", __func__);
6906}
6907
6908static const struct rpc_call_ops nfs4_lock_ops = {
6909 .rpc_call_prepare = nfs4_lock_prepare,
6910 .rpc_call_done = nfs4_lock_done,
6911 .rpc_release = nfs4_lock_release,
6912};
6913
6914static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
6915{
6916 switch (error) {
6917 case -NFS4ERR_ADMIN_REVOKED:
6918 case -NFS4ERR_EXPIRED:
6919 case -NFS4ERR_BAD_STATEID:
6920 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
6921 if (new_lock_owner != 0 ||
6922 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0)
6923 nfs4_schedule_stateid_recovery(server, lsp->ls_state);
6924 break;
6925 case -NFS4ERR_STALE_STATEID:
6926 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
6927 nfs4_schedule_lease_recovery(server->nfs_client);
6928 };
6929}
6930
6931static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
6932{
6933 struct nfs4_lockdata *data;
6934 struct rpc_task *task;
6935 struct rpc_message msg = {
6936 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
6937 .rpc_cred = state->owner->so_cred,
6938 };
6939 struct rpc_task_setup task_setup_data = {
6940 .rpc_client = NFS_CLIENT(state->inode),
6941 .rpc_message = &msg,
6942 .callback_ops = &nfs4_lock_ops,
6943 .workqueue = nfsiod_workqueue,
6944 .flags = RPC_TASK_ASYNC,
6945 };
6946 int ret;
6947
6948 dprintk("%s: begin!\n", __func__);
6949 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
6950 fl->fl_u.nfs4_fl.owner,
6951 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
6952 if (data == NULL)
6953 return -ENOMEM;
6954 if (IS_SETLKW(cmd))
6955 data->arg.block = 1;
6956 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1,
6957 recovery_type > NFS_LOCK_NEW);
6958 msg.rpc_argp = &data->arg;
6959 msg.rpc_resp = &data->res;
6960 task_setup_data.callback_data = data;
6961 if (recovery_type > NFS_LOCK_NEW) {
6962 if (recovery_type == NFS_LOCK_RECLAIM)
6963 data->arg.reclaim = NFS_LOCK_RECLAIM;
6964 } else
6965 data->arg.new_lock = 1;
6966 task = rpc_run_task(&task_setup_data);
6967 if (IS_ERR(task))
6968 return PTR_ERR(task);
6969 ret = rpc_wait_for_completion_task(task);
6970 if (ret == 0) {
6971 ret = data->rpc_status;
6972 if (ret)
6973 nfs4_handle_setlk_error(data->server, data->lsp,
6974 data->arg.new_lock_owner, ret);
6975 } else
6976 data->cancelled = true;
Olivier Deprez0e641232021-09-23 10:07:05 +02006977 trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006978 rpc_put_task(task);
6979 dprintk("%s: done, ret = %d!\n", __func__, ret);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006980 return ret;
6981}
6982
6983static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
6984{
6985 struct nfs_server *server = NFS_SERVER(state->inode);
6986 struct nfs4_exception exception = {
6987 .inode = state->inode,
6988 };
6989 int err;
6990
6991 do {
6992 /* Cache the lock if possible... */
6993 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
6994 return 0;
6995 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
6996 if (err != -NFS4ERR_DELAY)
6997 break;
6998 nfs4_handle_exception(server, err, &exception);
6999 } while (exception.retry);
7000 return err;
7001}
7002
7003static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
7004{
7005 struct nfs_server *server = NFS_SERVER(state->inode);
7006 struct nfs4_exception exception = {
7007 .inode = state->inode,
7008 };
7009 int err;
7010
7011 err = nfs4_set_lock_state(state, request);
7012 if (err != 0)
7013 return err;
7014 if (!recover_lost_locks) {
7015 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags);
7016 return 0;
7017 }
7018 do {
7019 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
7020 return 0;
7021 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
7022 switch (err) {
7023 default:
7024 goto out;
7025 case -NFS4ERR_GRACE:
7026 case -NFS4ERR_DELAY:
7027 nfs4_handle_exception(server, err, &exception);
7028 err = 0;
7029 }
7030 } while (exception.retry);
7031out:
7032 return err;
7033}
7034
7035#if defined(CONFIG_NFS_V4_1)
7036static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
7037{
7038 struct nfs4_lock_state *lsp;
7039 int status;
7040
7041 status = nfs4_set_lock_state(state, request);
7042 if (status != 0)
7043 return status;
7044 lsp = request->fl_u.nfs4_fl.owner;
7045 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) ||
7046 test_bit(NFS_LOCK_LOST, &lsp->ls_flags))
7047 return 0;
7048 return nfs4_lock_expired(state, request);
7049}
7050#endif
7051
7052static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7053{
7054 struct nfs_inode *nfsi = NFS_I(state->inode);
7055 struct nfs4_state_owner *sp = state->owner;
7056 unsigned char fl_flags = request->fl_flags;
7057 int status;
7058
7059 request->fl_flags |= FL_ACCESS;
7060 status = locks_lock_inode_wait(state->inode, request);
7061 if (status < 0)
7062 goto out;
7063 mutex_lock(&sp->so_delegreturn_mutex);
7064 down_read(&nfsi->rwsem);
7065 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
7066 /* Yes: cache locks! */
7067 /* ...but avoid races with delegation recall... */
7068 request->fl_flags = fl_flags & ~FL_SLEEP;
7069 status = locks_lock_inode_wait(state->inode, request);
7070 up_read(&nfsi->rwsem);
7071 mutex_unlock(&sp->so_delegreturn_mutex);
7072 goto out;
7073 }
7074 up_read(&nfsi->rwsem);
7075 mutex_unlock(&sp->so_delegreturn_mutex);
7076 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
7077out:
7078 request->fl_flags = fl_flags;
7079 return status;
7080}
7081
7082static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7083{
7084 struct nfs4_exception exception = {
7085 .state = state,
7086 .inode = state->inode,
David Brazdil0f672f62019-12-10 10:32:29 +00007087 .interruptible = true,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007088 };
7089 int err;
7090
7091 do {
7092 err = _nfs4_proc_setlk(state, cmd, request);
7093 if (err == -NFS4ERR_DENIED)
7094 err = -EAGAIN;
7095 err = nfs4_handle_exception(NFS_SERVER(state->inode),
7096 err, &exception);
7097 } while (exception.retry);
7098 return err;
7099}
7100
7101#define NFS4_LOCK_MINTIMEOUT (1 * HZ)
7102#define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
7103
7104static int
7105nfs4_retry_setlk_simple(struct nfs4_state *state, int cmd,
7106 struct file_lock *request)
7107{
7108 int status = -ERESTARTSYS;
7109 unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
7110
7111 while(!signalled()) {
7112 status = nfs4_proc_setlk(state, cmd, request);
7113 if ((status != -EAGAIN) || IS_SETLK(cmd))
7114 break;
7115 freezable_schedule_timeout_interruptible(timeout);
7116 timeout *= 2;
7117 timeout = min_t(unsigned long, NFS4_LOCK_MAXTIMEOUT, timeout);
7118 status = -ERESTARTSYS;
7119 }
7120 return status;
7121}
7122
7123#ifdef CONFIG_NFS_V4_1
7124struct nfs4_lock_waiter {
7125 struct task_struct *task;
7126 struct inode *inode;
7127 struct nfs_lowner *owner;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007128};
7129
7130static int
7131nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, void *key)
7132{
7133 int ret;
7134 struct nfs4_lock_waiter *waiter = wait->private;
7135
7136 /* NULL key means to wake up everyone */
7137 if (key) {
7138 struct cb_notify_lock_args *cbnl = key;
7139 struct nfs_lowner *lowner = &cbnl->cbnl_owner,
7140 *wowner = waiter->owner;
7141
7142 /* Only wake if the callback was for the same owner. */
7143 if (lowner->id != wowner->id || lowner->s_dev != wowner->s_dev)
7144 return 0;
7145
7146 /* Make sure it's for the right inode */
7147 if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh))
7148 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007149 }
7150
7151 /* override "private" so we can use default_wake_function */
7152 wait->private = waiter->task;
David Brazdil0f672f62019-12-10 10:32:29 +00007153 ret = woken_wake_function(wait, mode, flags, key);
7154 if (ret)
7155 list_del_init(&wait->entry);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007156 wait->private = waiter;
7157 return ret;
7158}
7159
7160static int
7161nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7162{
7163 int status = -ERESTARTSYS;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007164 struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner;
7165 struct nfs_server *server = NFS_SERVER(state->inode);
7166 struct nfs_client *clp = server->nfs_client;
7167 wait_queue_head_t *q = &clp->cl_lock_waitq;
7168 struct nfs_lowner owner = { .clientid = clp->cl_clientid,
7169 .id = lsp->ls_seqid.owner_id,
7170 .s_dev = server->s_dev };
7171 struct nfs4_lock_waiter waiter = { .task = current,
7172 .inode = state->inode,
David Brazdil0f672f62019-12-10 10:32:29 +00007173 .owner = &owner};
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007174 wait_queue_entry_t wait;
7175
7176 /* Don't bother with waitqueue if we don't expect a callback */
7177 if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags))
7178 return nfs4_retry_setlk_simple(state, cmd, request);
7179
7180 init_wait(&wait);
7181 wait.private = &waiter;
7182 wait.func = nfs4_wake_lock_waiter;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007183
7184 while(!signalled()) {
David Brazdil0f672f62019-12-10 10:32:29 +00007185 add_wait_queue(q, &wait);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007186 status = nfs4_proc_setlk(state, cmd, request);
David Brazdil0f672f62019-12-10 10:32:29 +00007187 if ((status != -EAGAIN) || IS_SETLK(cmd)) {
7188 finish_wait(q, &wait);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007189 break;
David Brazdil0f672f62019-12-10 10:32:29 +00007190 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007191
7192 status = -ERESTARTSYS;
David Brazdil0f672f62019-12-10 10:32:29 +00007193 freezer_do_not_count();
7194 wait_woken(&wait, TASK_INTERRUPTIBLE, NFS4_LOCK_MAXTIMEOUT);
7195 freezer_count();
7196 finish_wait(q, &wait);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007197 }
7198
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007199 return status;
7200}
7201#else /* !CONFIG_NFS_V4_1 */
7202static inline int
7203nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7204{
7205 return nfs4_retry_setlk_simple(state, cmd, request);
7206}
7207#endif
7208
7209static int
7210nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
7211{
7212 struct nfs_open_context *ctx;
7213 struct nfs4_state *state;
7214 int status;
7215
7216 /* verify open state */
7217 ctx = nfs_file_open_context(filp);
7218 state = ctx->state;
7219
7220 if (IS_GETLK(cmd)) {
7221 if (state != NULL)
7222 return nfs4_proc_getlk(state, F_GETLK, request);
7223 return 0;
7224 }
7225
7226 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
7227 return -EINVAL;
7228
7229 if (request->fl_type == F_UNLCK) {
7230 if (state != NULL)
7231 return nfs4_proc_unlck(state, cmd, request);
7232 return 0;
7233 }
7234
7235 if (state == NULL)
7236 return -ENOLCK;
7237
7238 if ((request->fl_flags & FL_POSIX) &&
7239 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
7240 return -ENOLCK;
7241
7242 /*
7243 * Don't rely on the VFS having checked the file open mode,
7244 * since it won't do this for flock() locks.
7245 */
7246 switch (request->fl_type) {
7247 case F_RDLCK:
7248 if (!(filp->f_mode & FMODE_READ))
7249 return -EBADF;
7250 break;
7251 case F_WRLCK:
7252 if (!(filp->f_mode & FMODE_WRITE))
7253 return -EBADF;
7254 }
7255
7256 status = nfs4_set_lock_state(state, request);
7257 if (status != 0)
7258 return status;
7259
7260 return nfs4_retry_setlk(state, cmd, request);
7261}
7262
7263int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid)
7264{
7265 struct nfs_server *server = NFS_SERVER(state->inode);
7266 int err;
7267
7268 err = nfs4_set_lock_state(state, fl);
7269 if (err != 0)
7270 return err;
Olivier Deprez0e641232021-09-23 10:07:05 +02007271 do {
7272 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
7273 if (err != -NFS4ERR_DELAY)
7274 break;
7275 ssleep(1);
7276 } while (err == -NFS4ERR_DELAY);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007277 return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err);
7278}
7279
7280struct nfs_release_lockowner_data {
7281 struct nfs4_lock_state *lsp;
7282 struct nfs_server *server;
7283 struct nfs_release_lockowner_args args;
7284 struct nfs_release_lockowner_res res;
7285 unsigned long timestamp;
7286};
7287
7288static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata)
7289{
7290 struct nfs_release_lockowner_data *data = calldata;
7291 struct nfs_server *server = data->server;
7292 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
7293 &data->res.seq_res, task);
7294 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
7295 data->timestamp = jiffies;
7296}
7297
7298static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
7299{
7300 struct nfs_release_lockowner_data *data = calldata;
7301 struct nfs_server *server = data->server;
7302
7303 nfs40_sequence_done(task, &data->res.seq_res);
7304
7305 switch (task->tk_status) {
7306 case 0:
7307 renew_lease(server, data->timestamp);
7308 break;
7309 case -NFS4ERR_STALE_CLIENTID:
7310 case -NFS4ERR_EXPIRED:
7311 nfs4_schedule_lease_recovery(server->nfs_client);
7312 break;
7313 case -NFS4ERR_LEASE_MOVED:
7314 case -NFS4ERR_DELAY:
7315 if (nfs4_async_handle_error(task, server,
7316 NULL, NULL) == -EAGAIN)
7317 rpc_restart_call_prepare(task);
7318 }
7319}
7320
7321static void nfs4_release_lockowner_release(void *calldata)
7322{
7323 struct nfs_release_lockowner_data *data = calldata;
7324 nfs4_free_lock_state(data->server, data->lsp);
7325 kfree(calldata);
7326}
7327
7328static const struct rpc_call_ops nfs4_release_lockowner_ops = {
7329 .rpc_call_prepare = nfs4_release_lockowner_prepare,
7330 .rpc_call_done = nfs4_release_lockowner_done,
7331 .rpc_release = nfs4_release_lockowner_release,
7332};
7333
7334static void
7335nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
7336{
7337 struct nfs_release_lockowner_data *data;
7338 struct rpc_message msg = {
7339 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
7340 };
7341
7342 if (server->nfs_client->cl_mvops->minor_version != 0)
7343 return;
7344
7345 data = kmalloc(sizeof(*data), GFP_NOFS);
7346 if (!data)
7347 return;
7348 data->lsp = lsp;
7349 data->server = server;
7350 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
7351 data->args.lock_owner.id = lsp->ls_seqid.owner_id;
7352 data->args.lock_owner.s_dev = server->s_dev;
7353
7354 msg.rpc_argp = &data->args;
7355 msg.rpc_resp = &data->res;
7356 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0);
7357 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
7358}
7359
7360#define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
7361
7362static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler,
7363 struct dentry *unused, struct inode *inode,
7364 const char *key, const void *buf,
7365 size_t buflen, int flags)
7366{
7367 return nfs4_proc_set_acl(inode, buf, buflen);
7368}
7369
7370static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler,
7371 struct dentry *unused, struct inode *inode,
7372 const char *key, void *buf, size_t buflen)
7373{
7374 return nfs4_proc_get_acl(inode, buf, buflen);
7375}
7376
7377static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry)
7378{
7379 return nfs4_server_supports_acls(NFS_SERVER(d_inode(dentry)));
7380}
7381
7382#ifdef CONFIG_NFS_V4_SECURITY_LABEL
7383
7384static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler,
7385 struct dentry *unused, struct inode *inode,
7386 const char *key, const void *buf,
7387 size_t buflen, int flags)
7388{
7389 if (security_ismaclabel(key))
7390 return nfs4_set_security_label(inode, buf, buflen);
7391
7392 return -EOPNOTSUPP;
7393}
7394
7395static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler,
7396 struct dentry *unused, struct inode *inode,
7397 const char *key, void *buf, size_t buflen)
7398{
7399 if (security_ismaclabel(key))
7400 return nfs4_get_security_label(inode, buf, buflen);
7401 return -EOPNOTSUPP;
7402}
7403
7404static ssize_t
7405nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
7406{
7407 int len = 0;
7408
7409 if (nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) {
7410 len = security_inode_listsecurity(inode, list, list_len);
7411 if (list_len && len > list_len)
7412 return -ERANGE;
7413 }
7414 return len;
7415}
7416
7417static const struct xattr_handler nfs4_xattr_nfs4_label_handler = {
7418 .prefix = XATTR_SECURITY_PREFIX,
7419 .get = nfs4_xattr_get_nfs4_label,
7420 .set = nfs4_xattr_set_nfs4_label,
7421};
7422
7423#else
7424
7425static ssize_t
7426nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
7427{
7428 return 0;
7429}
7430
7431#endif
7432
7433/*
7434 * nfs_fhget will use either the mounted_on_fileid or the fileid
7435 */
7436static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
7437{
7438 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
7439 (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
7440 (fattr->valid & NFS_ATTR_FATTR_FSID) &&
7441 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
7442 return;
7443
7444 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
7445 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
7446 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
7447 fattr->nlink = 2;
7448}
7449
7450static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
7451 const struct qstr *name,
7452 struct nfs4_fs_locations *fs_locations,
7453 struct page *page)
7454{
7455 struct nfs_server *server = NFS_SERVER(dir);
7456 u32 bitmask[3];
7457 struct nfs4_fs_locations_arg args = {
7458 .dir_fh = NFS_FH(dir),
7459 .name = name,
7460 .page = page,
7461 .bitmask = bitmask,
7462 };
7463 struct nfs4_fs_locations_res res = {
7464 .fs_locations = fs_locations,
7465 };
7466 struct rpc_message msg = {
7467 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
7468 .rpc_argp = &args,
7469 .rpc_resp = &res,
7470 };
7471 int status;
7472
7473 dprintk("%s: start\n", __func__);
7474
7475 bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS;
7476 bitmask[1] = nfs4_fattr_bitmap[1];
7477
7478 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
7479 * is not supported */
7480 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
7481 bitmask[0] &= ~FATTR4_WORD0_FILEID;
7482 else
7483 bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
7484
7485 nfs_fattr_init(&fs_locations->fattr);
7486 fs_locations->server = server;
7487 fs_locations->nlocations = 0;
7488 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
7489 dprintk("%s: returned status = %d\n", __func__, status);
7490 return status;
7491}
7492
7493int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
7494 const struct qstr *name,
7495 struct nfs4_fs_locations *fs_locations,
7496 struct page *page)
7497{
David Brazdil0f672f62019-12-10 10:32:29 +00007498 struct nfs4_exception exception = {
7499 .interruptible = true,
7500 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007501 int err;
7502 do {
7503 err = _nfs4_proc_fs_locations(client, dir, name,
7504 fs_locations, page);
7505 trace_nfs4_get_fs_locations(dir, name, err);
7506 err = nfs4_handle_exception(NFS_SERVER(dir), err,
7507 &exception);
7508 } while (exception.retry);
7509 return err;
7510}
7511
7512/*
7513 * This operation also signals the server that this client is
7514 * performing migration recovery. The server can stop returning
7515 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is
7516 * appended to this compound to identify the client ID which is
7517 * performing recovery.
7518 */
7519static int _nfs40_proc_get_locations(struct inode *inode,
7520 struct nfs4_fs_locations *locations,
David Brazdil0f672f62019-12-10 10:32:29 +00007521 struct page *page, const struct cred *cred)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007522{
7523 struct nfs_server *server = NFS_SERVER(inode);
7524 struct rpc_clnt *clnt = server->client;
7525 u32 bitmask[2] = {
7526 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
7527 };
7528 struct nfs4_fs_locations_arg args = {
7529 .clientid = server->nfs_client->cl_clientid,
7530 .fh = NFS_FH(inode),
7531 .page = page,
7532 .bitmask = bitmask,
7533 .migration = 1, /* skip LOOKUP */
7534 .renew = 1, /* append RENEW */
7535 };
7536 struct nfs4_fs_locations_res res = {
7537 .fs_locations = locations,
7538 .migration = 1,
7539 .renew = 1,
7540 };
7541 struct rpc_message msg = {
7542 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
7543 .rpc_argp = &args,
7544 .rpc_resp = &res,
7545 .rpc_cred = cred,
7546 };
7547 unsigned long now = jiffies;
7548 int status;
7549
7550 nfs_fattr_init(&locations->fattr);
7551 locations->server = server;
7552 locations->nlocations = 0;
7553
7554 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
7555 status = nfs4_call_sync_sequence(clnt, server, &msg,
7556 &args.seq_args, &res.seq_res);
7557 if (status)
7558 return status;
7559
7560 renew_lease(server, now);
7561 return 0;
7562}
7563
7564#ifdef CONFIG_NFS_V4_1
7565
7566/*
7567 * This operation also signals the server that this client is
7568 * performing migration recovery. The server can stop asserting
7569 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID
7570 * performing this operation is identified in the SEQUENCE
7571 * operation in this compound.
7572 *
7573 * When the client supports GETATTR(fs_locations_info), it can
7574 * be plumbed in here.
7575 */
7576static int _nfs41_proc_get_locations(struct inode *inode,
7577 struct nfs4_fs_locations *locations,
David Brazdil0f672f62019-12-10 10:32:29 +00007578 struct page *page, const struct cred *cred)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007579{
7580 struct nfs_server *server = NFS_SERVER(inode);
7581 struct rpc_clnt *clnt = server->client;
7582 u32 bitmask[2] = {
7583 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
7584 };
7585 struct nfs4_fs_locations_arg args = {
7586 .fh = NFS_FH(inode),
7587 .page = page,
7588 .bitmask = bitmask,
7589 .migration = 1, /* skip LOOKUP */
7590 };
7591 struct nfs4_fs_locations_res res = {
7592 .fs_locations = locations,
7593 .migration = 1,
7594 };
7595 struct rpc_message msg = {
7596 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
7597 .rpc_argp = &args,
7598 .rpc_resp = &res,
7599 .rpc_cred = cred,
7600 };
7601 int status;
7602
7603 nfs_fattr_init(&locations->fattr);
7604 locations->server = server;
7605 locations->nlocations = 0;
7606
7607 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
7608 status = nfs4_call_sync_sequence(clnt, server, &msg,
7609 &args.seq_args, &res.seq_res);
7610 if (status == NFS4_OK &&
7611 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
7612 status = -NFS4ERR_LEASE_MOVED;
7613 return status;
7614}
7615
7616#endif /* CONFIG_NFS_V4_1 */
7617
7618/**
7619 * nfs4_proc_get_locations - discover locations for a migrated FSID
7620 * @inode: inode on FSID that is migrating
7621 * @locations: result of query
7622 * @page: buffer
7623 * @cred: credential to use for this operation
7624 *
7625 * Returns NFS4_OK on success, a negative NFS4ERR status code if the
7626 * operation failed, or a negative errno if a local error occurred.
7627 *
7628 * On success, "locations" is filled in, but if the server has
7629 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not
7630 * asserted.
7631 *
7632 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases
7633 * from this client that require migration recovery.
7634 */
7635int nfs4_proc_get_locations(struct inode *inode,
7636 struct nfs4_fs_locations *locations,
David Brazdil0f672f62019-12-10 10:32:29 +00007637 struct page *page, const struct cred *cred)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007638{
7639 struct nfs_server *server = NFS_SERVER(inode);
7640 struct nfs_client *clp = server->nfs_client;
7641 const struct nfs4_mig_recovery_ops *ops =
7642 clp->cl_mvops->mig_recovery_ops;
David Brazdil0f672f62019-12-10 10:32:29 +00007643 struct nfs4_exception exception = {
7644 .interruptible = true,
7645 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007646 int status;
7647
7648 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
7649 (unsigned long long)server->fsid.major,
7650 (unsigned long long)server->fsid.minor,
7651 clp->cl_hostname);
7652 nfs_display_fhandle(NFS_FH(inode), __func__);
7653
7654 do {
7655 status = ops->get_locations(inode, locations, page, cred);
7656 if (status != -NFS4ERR_DELAY)
7657 break;
7658 nfs4_handle_exception(server, status, &exception);
7659 } while (exception.retry);
7660 return status;
7661}
7662
7663/*
7664 * This operation also signals the server that this client is
7665 * performing "lease moved" recovery. The server can stop
7666 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation
7667 * is appended to this compound to identify the client ID which is
7668 * performing recovery.
7669 */
David Brazdil0f672f62019-12-10 10:32:29 +00007670static int _nfs40_proc_fsid_present(struct inode *inode, const struct cred *cred)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007671{
7672 struct nfs_server *server = NFS_SERVER(inode);
7673 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
7674 struct rpc_clnt *clnt = server->client;
7675 struct nfs4_fsid_present_arg args = {
7676 .fh = NFS_FH(inode),
7677 .clientid = clp->cl_clientid,
7678 .renew = 1, /* append RENEW */
7679 };
7680 struct nfs4_fsid_present_res res = {
7681 .renew = 1,
7682 };
7683 struct rpc_message msg = {
7684 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
7685 .rpc_argp = &args,
7686 .rpc_resp = &res,
7687 .rpc_cred = cred,
7688 };
7689 unsigned long now = jiffies;
7690 int status;
7691
7692 res.fh = nfs_alloc_fhandle();
7693 if (res.fh == NULL)
7694 return -ENOMEM;
7695
7696 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
7697 status = nfs4_call_sync_sequence(clnt, server, &msg,
7698 &args.seq_args, &res.seq_res);
7699 nfs_free_fhandle(res.fh);
7700 if (status)
7701 return status;
7702
7703 do_renew_lease(clp, now);
7704 return 0;
7705}
7706
7707#ifdef CONFIG_NFS_V4_1
7708
7709/*
7710 * This operation also signals the server that this client is
7711 * performing "lease moved" recovery. The server can stop asserting
7712 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing
7713 * this operation is identified in the SEQUENCE operation in this
7714 * compound.
7715 */
David Brazdil0f672f62019-12-10 10:32:29 +00007716static int _nfs41_proc_fsid_present(struct inode *inode, const struct cred *cred)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007717{
7718 struct nfs_server *server = NFS_SERVER(inode);
7719 struct rpc_clnt *clnt = server->client;
7720 struct nfs4_fsid_present_arg args = {
7721 .fh = NFS_FH(inode),
7722 };
7723 struct nfs4_fsid_present_res res = {
7724 };
7725 struct rpc_message msg = {
7726 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
7727 .rpc_argp = &args,
7728 .rpc_resp = &res,
7729 .rpc_cred = cred,
7730 };
7731 int status;
7732
7733 res.fh = nfs_alloc_fhandle();
7734 if (res.fh == NULL)
7735 return -ENOMEM;
7736
7737 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
7738 status = nfs4_call_sync_sequence(clnt, server, &msg,
7739 &args.seq_args, &res.seq_res);
7740 nfs_free_fhandle(res.fh);
7741 if (status == NFS4_OK &&
7742 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
7743 status = -NFS4ERR_LEASE_MOVED;
7744 return status;
7745}
7746
7747#endif /* CONFIG_NFS_V4_1 */
7748
7749/**
7750 * nfs4_proc_fsid_present - Is this FSID present or absent on server?
7751 * @inode: inode on FSID to check
7752 * @cred: credential to use for this operation
7753 *
7754 * Server indicates whether the FSID is present, moved, or not
7755 * recognized. This operation is necessary to clear a LEASE_MOVED
7756 * condition for this client ID.
7757 *
7758 * Returns NFS4_OK if the FSID is present on this server,
7759 * -NFS4ERR_MOVED if the FSID is no longer present, a negative
7760 * NFS4ERR code if some error occurred on the server, or a
7761 * negative errno if a local failure occurred.
7762 */
David Brazdil0f672f62019-12-10 10:32:29 +00007763int nfs4_proc_fsid_present(struct inode *inode, const struct cred *cred)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007764{
7765 struct nfs_server *server = NFS_SERVER(inode);
7766 struct nfs_client *clp = server->nfs_client;
7767 const struct nfs4_mig_recovery_ops *ops =
7768 clp->cl_mvops->mig_recovery_ops;
David Brazdil0f672f62019-12-10 10:32:29 +00007769 struct nfs4_exception exception = {
7770 .interruptible = true,
7771 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007772 int status;
7773
7774 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
7775 (unsigned long long)server->fsid.major,
7776 (unsigned long long)server->fsid.minor,
7777 clp->cl_hostname);
7778 nfs_display_fhandle(NFS_FH(inode), __func__);
7779
7780 do {
7781 status = ops->fsid_present(inode, cred);
7782 if (status != -NFS4ERR_DELAY)
7783 break;
7784 nfs4_handle_exception(server, status, &exception);
7785 } while (exception.retry);
7786 return status;
7787}
7788
David Brazdil0f672f62019-12-10 10:32:29 +00007789/*
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007790 * If 'use_integrity' is true and the state managment nfs_client
7791 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient
7792 * and the machine credential as per RFC3530bis and RFC5661 Security
7793 * Considerations sections. Otherwise, just use the user cred with the
7794 * filesystem's rpc_client.
7795 */
7796static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity)
7797{
7798 int status;
David Brazdil0f672f62019-12-10 10:32:29 +00007799 struct rpc_clnt *clnt = NFS_SERVER(dir)->client;
7800 struct nfs_client *clp = NFS_SERVER(dir)->nfs_client;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007801 struct nfs4_secinfo_arg args = {
7802 .dir_fh = NFS_FH(dir),
7803 .name = name,
7804 };
7805 struct nfs4_secinfo_res res = {
7806 .flavors = flavors,
7807 };
7808 struct rpc_message msg = {
7809 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
7810 .rpc_argp = &args,
7811 .rpc_resp = &res,
7812 };
David Brazdil0f672f62019-12-10 10:32:29 +00007813 struct nfs4_call_sync_data data = {
7814 .seq_server = NFS_SERVER(dir),
7815 .seq_args = &args.seq_args,
7816 .seq_res = &res.seq_res,
7817 };
7818 struct rpc_task_setup task_setup = {
7819 .rpc_client = clnt,
7820 .rpc_message = &msg,
7821 .callback_ops = clp->cl_mvops->call_sync_ops,
7822 .callback_data = &data,
7823 .flags = RPC_TASK_NO_ROUND_ROBIN,
7824 };
7825 const struct cred *cred = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007826
7827 if (use_integrity) {
David Brazdil0f672f62019-12-10 10:32:29 +00007828 clnt = clp->cl_rpcclient;
7829 task_setup.rpc_client = clnt;
7830
7831 cred = nfs4_get_clid_cred(clp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007832 msg.rpc_cred = cred;
7833 }
7834
7835 dprintk("NFS call secinfo %s\n", name->name);
7836
David Brazdil0f672f62019-12-10 10:32:29 +00007837 nfs4_state_protect(clp, NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg);
7838 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
7839 status = nfs4_call_sync_custom(&task_setup);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007840
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007841 dprintk("NFS reply secinfo: %d\n", status);
7842
David Brazdil0f672f62019-12-10 10:32:29 +00007843 put_cred(cred);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007844 return status;
7845}
7846
7847int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
7848 struct nfs4_secinfo_flavors *flavors)
7849{
David Brazdil0f672f62019-12-10 10:32:29 +00007850 struct nfs4_exception exception = {
7851 .interruptible = true,
7852 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007853 int err;
7854 do {
7855 err = -NFS4ERR_WRONGSEC;
7856
7857 /* try to use integrity protection with machine cred */
7858 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client))
7859 err = _nfs4_proc_secinfo(dir, name, flavors, true);
7860
7861 /*
7862 * if unable to use integrity protection, or SECINFO with
7863 * integrity protection returns NFS4ERR_WRONGSEC (which is
7864 * disallowed by spec, but exists in deployed servers) use
7865 * the current filesystem's rpc_client and the user cred.
7866 */
7867 if (err == -NFS4ERR_WRONGSEC)
7868 err = _nfs4_proc_secinfo(dir, name, flavors, false);
7869
7870 trace_nfs4_secinfo(dir, name, err);
7871 err = nfs4_handle_exception(NFS_SERVER(dir), err,
7872 &exception);
7873 } while (exception.retry);
7874 return err;
7875}
7876
7877#ifdef CONFIG_NFS_V4_1
7878/*
7879 * Check the exchange flags returned by the server for invalid flags, having
7880 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
7881 * DS flags set.
7882 */
Olivier Deprez0e641232021-09-23 10:07:05 +02007883static int nfs4_check_cl_exchange_flags(u32 flags, u32 version)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007884{
Olivier Deprez0e641232021-09-23 10:07:05 +02007885 if (version >= 2 && (flags & ~EXCHGID4_2_FLAG_MASK_R))
7886 goto out_inval;
7887 else if (version < 2 && (flags & ~EXCHGID4_FLAG_MASK_R))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007888 goto out_inval;
7889 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
7890 (flags & EXCHGID4_FLAG_USE_NON_PNFS))
7891 goto out_inval;
7892 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
7893 goto out_inval;
7894 return NFS_OK;
7895out_inval:
7896 return -NFS4ERR_INVAL;
7897}
7898
7899static bool
7900nfs41_same_server_scope(struct nfs41_server_scope *a,
7901 struct nfs41_server_scope *b)
7902{
7903 if (a->server_scope_sz != b->server_scope_sz)
7904 return false;
7905 return memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0;
7906}
7907
7908static void
7909nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata)
7910{
Olivier Deprez0e641232021-09-23 10:07:05 +02007911 struct nfs41_bind_conn_to_session_args *args = task->tk_msg.rpc_argp;
7912 struct nfs41_bind_conn_to_session_res *res = task->tk_msg.rpc_resp;
7913 struct nfs_client *clp = args->client;
7914
7915 switch (task->tk_status) {
7916 case -NFS4ERR_BADSESSION:
7917 case -NFS4ERR_DEADSESSION:
7918 nfs4_schedule_session_recovery(clp->cl_session,
7919 task->tk_status);
7920 }
7921 if (args->dir == NFS4_CDFC4_FORE_OR_BOTH &&
7922 res->dir != NFS4_CDFS4_BOTH) {
7923 rpc_task_close_connection(task);
7924 if (args->retries++ < MAX_BIND_CONN_TO_SESSION_RETRIES)
7925 rpc_restart_call(task);
7926 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007927}
7928
7929static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = {
Olivier Deprez0e641232021-09-23 10:07:05 +02007930 .rpc_call_done = nfs4_bind_one_conn_to_session_done,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007931};
7932
7933/*
7934 * nfs4_proc_bind_one_conn_to_session()
7935 *
7936 * The 4.1 client currently uses the same TCP connection for the
7937 * fore and backchannel.
7938 */
7939static
7940int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt,
7941 struct rpc_xprt *xprt,
7942 struct nfs_client *clp,
David Brazdil0f672f62019-12-10 10:32:29 +00007943 const struct cred *cred)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007944{
7945 int status;
7946 struct nfs41_bind_conn_to_session_args args = {
7947 .client = clp,
7948 .dir = NFS4_CDFC4_FORE_OR_BOTH,
Olivier Deprez0e641232021-09-23 10:07:05 +02007949 .retries = 0,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007950 };
7951 struct nfs41_bind_conn_to_session_res res;
7952 struct rpc_message msg = {
7953 .rpc_proc =
7954 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
7955 .rpc_argp = &args,
7956 .rpc_resp = &res,
7957 .rpc_cred = cred,
7958 };
7959 struct rpc_task_setup task_setup_data = {
7960 .rpc_client = clnt,
7961 .rpc_xprt = xprt,
7962 .callback_ops = &nfs4_bind_one_conn_to_session_ops,
7963 .rpc_message = &msg,
7964 .flags = RPC_TASK_TIMEOUT,
7965 };
7966 struct rpc_task *task;
7967
7968 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id);
7969 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
7970 args.dir = NFS4_CDFC4_FORE;
7971
7972 /* Do not set the backchannel flag unless this is clnt->cl_xprt */
7973 if (xprt != rcu_access_pointer(clnt->cl_xprt))
7974 args.dir = NFS4_CDFC4_FORE;
7975
7976 task = rpc_run_task(&task_setup_data);
7977 if (!IS_ERR(task)) {
7978 status = task->tk_status;
7979 rpc_put_task(task);
7980 } else
7981 status = PTR_ERR(task);
7982 trace_nfs4_bind_conn_to_session(clp, status);
7983 if (status == 0) {
7984 if (memcmp(res.sessionid.data,
7985 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
7986 dprintk("NFS: %s: Session ID mismatch\n", __func__);
7987 return -EIO;
7988 }
7989 if ((res.dir & args.dir) != res.dir || res.dir == 0) {
7990 dprintk("NFS: %s: Unexpected direction from server\n",
7991 __func__);
7992 return -EIO;
7993 }
7994 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) {
7995 dprintk("NFS: %s: Server returned RDMA mode = true\n",
7996 __func__);
7997 return -EIO;
7998 }
7999 }
8000
8001 return status;
8002}
8003
8004struct rpc_bind_conn_calldata {
8005 struct nfs_client *clp;
David Brazdil0f672f62019-12-10 10:32:29 +00008006 const struct cred *cred;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008007};
8008
8009static int
8010nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt *clnt,
8011 struct rpc_xprt *xprt,
8012 void *calldata)
8013{
8014 struct rpc_bind_conn_calldata *p = calldata;
8015
8016 return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred);
8017}
8018
David Brazdil0f672f62019-12-10 10:32:29 +00008019int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, const struct cred *cred)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008020{
8021 struct rpc_bind_conn_calldata data = {
8022 .clp = clp,
8023 .cred = cred,
8024 };
8025 return rpc_clnt_iterate_for_each_xprt(clp->cl_rpcclient,
8026 nfs4_proc_bind_conn_to_session_callback, &data);
8027}
8028
8029/*
8030 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map
8031 * and operations we'd like to see to enable certain features in the allow map
8032 */
8033static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = {
8034 .how = SP4_MACH_CRED,
8035 .enforce.u.words = {
8036 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
8037 1 << (OP_EXCHANGE_ID - 32) |
8038 1 << (OP_CREATE_SESSION - 32) |
8039 1 << (OP_DESTROY_SESSION - 32) |
8040 1 << (OP_DESTROY_CLIENTID - 32)
8041 },
8042 .allow.u.words = {
8043 [0] = 1 << (OP_CLOSE) |
8044 1 << (OP_OPEN_DOWNGRADE) |
8045 1 << (OP_LOCKU) |
8046 1 << (OP_DELEGRETURN) |
8047 1 << (OP_COMMIT),
8048 [1] = 1 << (OP_SECINFO - 32) |
8049 1 << (OP_SECINFO_NO_NAME - 32) |
8050 1 << (OP_LAYOUTRETURN - 32) |
8051 1 << (OP_TEST_STATEID - 32) |
8052 1 << (OP_FREE_STATEID - 32) |
8053 1 << (OP_WRITE - 32)
8054 }
8055};
8056
8057/*
8058 * Select the state protection mode for client `clp' given the server results
8059 * from exchange_id in `sp'.
8060 *
8061 * Returns 0 on success, negative errno otherwise.
8062 */
8063static int nfs4_sp4_select_mode(struct nfs_client *clp,
8064 struct nfs41_state_protection *sp)
8065{
8066 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = {
8067 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
8068 1 << (OP_EXCHANGE_ID - 32) |
8069 1 << (OP_CREATE_SESSION - 32) |
8070 1 << (OP_DESTROY_SESSION - 32) |
8071 1 << (OP_DESTROY_CLIENTID - 32)
8072 };
8073 unsigned long flags = 0;
8074 unsigned int i;
8075 int ret = 0;
8076
8077 if (sp->how == SP4_MACH_CRED) {
8078 /* Print state protect result */
8079 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n");
8080 for (i = 0; i <= LAST_NFS4_OP; i++) {
8081 if (test_bit(i, sp->enforce.u.longs))
8082 dfprintk(MOUNT, " enforce op %d\n", i);
8083 if (test_bit(i, sp->allow.u.longs))
8084 dfprintk(MOUNT, " allow op %d\n", i);
8085 }
8086
8087 /* make sure nothing is on enforce list that isn't supported */
8088 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) {
8089 if (sp->enforce.u.words[i] & ~supported_enforce[i]) {
8090 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
8091 ret = -EINVAL;
8092 goto out;
8093 }
8094 }
8095
8096 /*
8097 * Minimal mode - state operations are allowed to use machine
8098 * credential. Note this already happens by default, so the
8099 * client doesn't have to do anything more than the negotiation.
8100 *
8101 * NOTE: we don't care if EXCHANGE_ID is in the list -
8102 * we're already using the machine cred for exchange_id
8103 * and will never use a different cred.
8104 */
8105 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) &&
8106 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) &&
8107 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) &&
8108 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) {
8109 dfprintk(MOUNT, "sp4_mach_cred:\n");
8110 dfprintk(MOUNT, " minimal mode enabled\n");
8111 __set_bit(NFS_SP4_MACH_CRED_MINIMAL, &flags);
8112 } else {
8113 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
8114 ret = -EINVAL;
8115 goto out;
8116 }
8117
8118 if (test_bit(OP_CLOSE, sp->allow.u.longs) &&
8119 test_bit(OP_OPEN_DOWNGRADE, sp->allow.u.longs) &&
8120 test_bit(OP_DELEGRETURN, sp->allow.u.longs) &&
8121 test_bit(OP_LOCKU, sp->allow.u.longs)) {
8122 dfprintk(MOUNT, " cleanup mode enabled\n");
8123 __set_bit(NFS_SP4_MACH_CRED_CLEANUP, &flags);
8124 }
8125
8126 if (test_bit(OP_LAYOUTRETURN, sp->allow.u.longs)) {
8127 dfprintk(MOUNT, " pnfs cleanup mode enabled\n");
8128 __set_bit(NFS_SP4_MACH_CRED_PNFS_CLEANUP, &flags);
8129 }
8130
8131 if (test_bit(OP_SECINFO, sp->allow.u.longs) &&
8132 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) {
8133 dfprintk(MOUNT, " secinfo mode enabled\n");
8134 __set_bit(NFS_SP4_MACH_CRED_SECINFO, &flags);
8135 }
8136
8137 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) &&
8138 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) {
8139 dfprintk(MOUNT, " stateid mode enabled\n");
8140 __set_bit(NFS_SP4_MACH_CRED_STATEID, &flags);
8141 }
8142
8143 if (test_bit(OP_WRITE, sp->allow.u.longs)) {
8144 dfprintk(MOUNT, " write mode enabled\n");
8145 __set_bit(NFS_SP4_MACH_CRED_WRITE, &flags);
8146 }
8147
8148 if (test_bit(OP_COMMIT, sp->allow.u.longs)) {
8149 dfprintk(MOUNT, " commit mode enabled\n");
8150 __set_bit(NFS_SP4_MACH_CRED_COMMIT, &flags);
8151 }
8152 }
8153out:
8154 clp->cl_sp4_flags = flags;
8155 return ret;
8156}
8157
8158struct nfs41_exchange_id_data {
8159 struct nfs41_exchange_id_res res;
8160 struct nfs41_exchange_id_args args;
8161};
8162
8163static void nfs4_exchange_id_release(void *data)
8164{
8165 struct nfs41_exchange_id_data *cdata =
8166 (struct nfs41_exchange_id_data *)data;
8167
8168 nfs_put_client(cdata->args.client);
8169 kfree(cdata->res.impl_id);
8170 kfree(cdata->res.server_scope);
8171 kfree(cdata->res.server_owner);
8172 kfree(cdata);
8173}
8174
8175static const struct rpc_call_ops nfs4_exchange_id_call_ops = {
8176 .rpc_release = nfs4_exchange_id_release,
8177};
8178
8179/*
8180 * _nfs4_proc_exchange_id()
8181 *
8182 * Wrapper for EXCHANGE_ID operation.
8183 */
8184static struct rpc_task *
David Brazdil0f672f62019-12-10 10:32:29 +00008185nfs4_run_exchange_id(struct nfs_client *clp, const struct cred *cred,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008186 u32 sp4_how, struct rpc_xprt *xprt)
8187{
8188 struct rpc_message msg = {
8189 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
8190 .rpc_cred = cred,
8191 };
8192 struct rpc_task_setup task_setup_data = {
8193 .rpc_client = clp->cl_rpcclient,
8194 .callback_ops = &nfs4_exchange_id_call_ops,
8195 .rpc_message = &msg,
David Brazdil0f672f62019-12-10 10:32:29 +00008196 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008197 };
8198 struct nfs41_exchange_id_data *calldata;
8199 int status;
8200
8201 if (!refcount_inc_not_zero(&clp->cl_count))
8202 return ERR_PTR(-EIO);
8203
8204 status = -ENOMEM;
8205 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
8206 if (!calldata)
8207 goto out;
8208
8209 nfs4_init_boot_verifier(clp, &calldata->args.verifier);
8210
8211 status = nfs4_init_uniform_client_string(clp);
8212 if (status)
8213 goto out_calldata;
8214
8215 calldata->res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
8216 GFP_NOFS);
8217 status = -ENOMEM;
8218 if (unlikely(calldata->res.server_owner == NULL))
8219 goto out_calldata;
8220
8221 calldata->res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
8222 GFP_NOFS);
8223 if (unlikely(calldata->res.server_scope == NULL))
8224 goto out_server_owner;
8225
8226 calldata->res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
8227 if (unlikely(calldata->res.impl_id == NULL))
8228 goto out_server_scope;
8229
8230 switch (sp4_how) {
8231 case SP4_NONE:
8232 calldata->args.state_protect.how = SP4_NONE;
8233 break;
8234
8235 case SP4_MACH_CRED:
8236 calldata->args.state_protect = nfs4_sp4_mach_cred_request;
8237 break;
8238
8239 default:
8240 /* unsupported! */
8241 WARN_ON_ONCE(1);
8242 status = -EINVAL;
8243 goto out_impl_id;
8244 }
8245 if (xprt) {
8246 task_setup_data.rpc_xprt = xprt;
8247 task_setup_data.flags |= RPC_TASK_SOFTCONN;
8248 memcpy(calldata->args.verifier.data, clp->cl_confirm.data,
8249 sizeof(calldata->args.verifier.data));
8250 }
8251 calldata->args.client = clp;
8252 calldata->args.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
8253 EXCHGID4_FLAG_BIND_PRINC_STATEID;
8254#ifdef CONFIG_NFS_V4_1_MIGRATION
8255 calldata->args.flags |= EXCHGID4_FLAG_SUPP_MOVED_MIGR;
8256#endif
8257 msg.rpc_argp = &calldata->args;
8258 msg.rpc_resp = &calldata->res;
8259 task_setup_data.callback_data = calldata;
8260
8261 return rpc_run_task(&task_setup_data);
8262
8263out_impl_id:
8264 kfree(calldata->res.impl_id);
8265out_server_scope:
8266 kfree(calldata->res.server_scope);
8267out_server_owner:
8268 kfree(calldata->res.server_owner);
8269out_calldata:
8270 kfree(calldata);
8271out:
8272 nfs_put_client(clp);
8273 return ERR_PTR(status);
8274}
8275
8276/*
8277 * _nfs4_proc_exchange_id()
8278 *
8279 * Wrapper for EXCHANGE_ID operation.
8280 */
David Brazdil0f672f62019-12-10 10:32:29 +00008281static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008282 u32 sp4_how)
8283{
8284 struct rpc_task *task;
8285 struct nfs41_exchange_id_args *argp;
8286 struct nfs41_exchange_id_res *resp;
Olivier Deprez0e641232021-09-23 10:07:05 +02008287 unsigned long now = jiffies;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008288 int status;
8289
8290 task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL);
8291 if (IS_ERR(task))
8292 return PTR_ERR(task);
8293
8294 argp = task->tk_msg.rpc_argp;
8295 resp = task->tk_msg.rpc_resp;
8296 status = task->tk_status;
8297 if (status != 0)
8298 goto out;
8299
Olivier Deprez0e641232021-09-23 10:07:05 +02008300 status = nfs4_check_cl_exchange_flags(resp->flags,
8301 clp->cl_mvops->minor_version);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008302 if (status != 0)
8303 goto out;
8304
8305 status = nfs4_sp4_select_mode(clp, &resp->state_protect);
8306 if (status != 0)
8307 goto out;
8308
Olivier Deprez0e641232021-09-23 10:07:05 +02008309 do_renew_lease(clp, now);
8310
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008311 clp->cl_clientid = resp->clientid;
8312 clp->cl_exchange_flags = resp->flags;
8313 clp->cl_seqid = resp->seqid;
8314 /* Client ID is not confirmed */
8315 if (!(resp->flags & EXCHGID4_FLAG_CONFIRMED_R))
8316 clear_bit(NFS4_SESSION_ESTABLISHED,
8317 &clp->cl_session->session_state);
8318
8319 if (clp->cl_serverscope != NULL &&
8320 !nfs41_same_server_scope(clp->cl_serverscope,
8321 resp->server_scope)) {
8322 dprintk("%s: server_scope mismatch detected\n",
8323 __func__);
8324 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
8325 }
8326
8327 swap(clp->cl_serverowner, resp->server_owner);
8328 swap(clp->cl_serverscope, resp->server_scope);
8329 swap(clp->cl_implid, resp->impl_id);
8330
8331 /* Save the EXCHANGE_ID verifier session trunk tests */
8332 memcpy(clp->cl_confirm.data, argp->verifier.data,
8333 sizeof(clp->cl_confirm.data));
8334out:
8335 trace_nfs4_exchange_id(clp, status);
8336 rpc_put_task(task);
8337 return status;
8338}
8339
8340/*
8341 * nfs4_proc_exchange_id()
8342 *
8343 * Returns zero, a negative errno, or a negative NFS4ERR status code.
8344 *
8345 * Since the clientid has expired, all compounds using sessions
8346 * associated with the stale clientid will be returning
8347 * NFS4ERR_BADSESSION in the sequence operation, and will therefore
8348 * be in some phase of session reset.
8349 *
8350 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used.
8351 */
David Brazdil0f672f62019-12-10 10:32:29 +00008352int nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008353{
8354 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor;
8355 int status;
8356
8357 /* try SP4_MACH_CRED if krb5i/p */
8358 if (authflavor == RPC_AUTH_GSS_KRB5I ||
8359 authflavor == RPC_AUTH_GSS_KRB5P) {
8360 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED);
8361 if (!status)
8362 return 0;
8363 }
8364
8365 /* try SP4_NONE */
8366 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE);
8367}
8368
8369/**
8370 * nfs4_test_session_trunk
8371 *
8372 * This is an add_xprt_test() test function called from
8373 * rpc_clnt_setup_test_and_add_xprt.
8374 *
8375 * The rpc_xprt_switch is referrenced by rpc_clnt_setup_test_and_add_xprt
8376 * and is dereferrenced in nfs4_exchange_id_release
8377 *
8378 * Upon success, add the new transport to the rpc_clnt
8379 *
8380 * @clnt: struct rpc_clnt to get new transport
8381 * @xprt: the rpc_xprt to test
8382 * @data: call data for _nfs4_proc_exchange_id.
8383 */
David Brazdil0f672f62019-12-10 10:32:29 +00008384void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008385 void *data)
8386{
8387 struct nfs4_add_xprt_data *adata = (struct nfs4_add_xprt_data *)data;
8388 struct rpc_task *task;
8389 int status;
8390
8391 u32 sp4_how;
8392
8393 dprintk("--> %s try %s\n", __func__,
8394 xprt->address_strings[RPC_DISPLAY_ADDR]);
8395
8396 sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED);
8397
8398 /* Test connection for session trunking. Async exchange_id call */
8399 task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt);
8400 if (IS_ERR(task))
David Brazdil0f672f62019-12-10 10:32:29 +00008401 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008402
8403 status = task->tk_status;
8404 if (status == 0)
8405 status = nfs4_detect_session_trunking(adata->clp,
8406 task->tk_msg.rpc_resp, xprt);
8407
David Brazdil0f672f62019-12-10 10:32:29 +00008408 if (status == 0)
8409 rpc_clnt_xprt_switch_add_xprt(clnt, xprt);
8410
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008411 rpc_put_task(task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008412}
8413EXPORT_SYMBOL_GPL(nfs4_test_session_trunk);
8414
8415static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
David Brazdil0f672f62019-12-10 10:32:29 +00008416 const struct cred *cred)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008417{
8418 struct rpc_message msg = {
8419 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
8420 .rpc_argp = clp,
8421 .rpc_cred = cred,
8422 };
8423 int status;
8424
David Brazdil0f672f62019-12-10 10:32:29 +00008425 status = rpc_call_sync(clp->cl_rpcclient, &msg,
8426 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008427 trace_nfs4_destroy_clientid(clp, status);
8428 if (status)
8429 dprintk("NFS: Got error %d from the server %s on "
8430 "DESTROY_CLIENTID.", status, clp->cl_hostname);
8431 return status;
8432}
8433
8434static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
David Brazdil0f672f62019-12-10 10:32:29 +00008435 const struct cred *cred)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008436{
8437 unsigned int loop;
8438 int ret;
8439
8440 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
8441 ret = _nfs4_proc_destroy_clientid(clp, cred);
8442 switch (ret) {
8443 case -NFS4ERR_DELAY:
8444 case -NFS4ERR_CLIENTID_BUSY:
8445 ssleep(1);
8446 break;
8447 default:
8448 return ret;
8449 }
8450 }
8451 return 0;
8452}
8453
8454int nfs4_destroy_clientid(struct nfs_client *clp)
8455{
David Brazdil0f672f62019-12-10 10:32:29 +00008456 const struct cred *cred;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008457 int ret = 0;
8458
8459 if (clp->cl_mvops->minor_version < 1)
8460 goto out;
8461 if (clp->cl_exchange_flags == 0)
8462 goto out;
8463 if (clp->cl_preserve_clid)
8464 goto out;
8465 cred = nfs4_get_clid_cred(clp);
8466 ret = nfs4_proc_destroy_clientid(clp, cred);
David Brazdil0f672f62019-12-10 10:32:29 +00008467 put_cred(cred);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008468 switch (ret) {
8469 case 0:
8470 case -NFS4ERR_STALE_CLIENTID:
8471 clp->cl_exchange_flags = 0;
8472 }
8473out:
8474 return ret;
8475}
8476
David Brazdil0f672f62019-12-10 10:32:29 +00008477#endif /* CONFIG_NFS_V4_1 */
8478
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008479struct nfs4_get_lease_time_data {
8480 struct nfs4_get_lease_time_args *args;
8481 struct nfs4_get_lease_time_res *res;
8482 struct nfs_client *clp;
8483};
8484
8485static void nfs4_get_lease_time_prepare(struct rpc_task *task,
8486 void *calldata)
8487{
8488 struct nfs4_get_lease_time_data *data =
8489 (struct nfs4_get_lease_time_data *)calldata;
8490
8491 dprintk("--> %s\n", __func__);
8492 /* just setup sequence, do not trigger session recovery
8493 since we're invoked within one */
8494 nfs4_setup_sequence(data->clp,
8495 &data->args->la_seq_args,
8496 &data->res->lr_seq_res,
8497 task);
8498 dprintk("<-- %s\n", __func__);
8499}
8500
8501/*
8502 * Called from nfs4_state_manager thread for session setup, so don't recover
8503 * from sequence operation or clientid errors.
8504 */
8505static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
8506{
8507 struct nfs4_get_lease_time_data *data =
8508 (struct nfs4_get_lease_time_data *)calldata;
8509
8510 dprintk("--> %s\n", __func__);
David Brazdil0f672f62019-12-10 10:32:29 +00008511 if (!nfs4_sequence_done(task, &data->res->lr_seq_res))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008512 return;
8513 switch (task->tk_status) {
8514 case -NFS4ERR_DELAY:
8515 case -NFS4ERR_GRACE:
8516 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
8517 rpc_delay(task, NFS4_POLL_RETRY_MIN);
8518 task->tk_status = 0;
8519 /* fall through */
8520 case -NFS4ERR_RETRY_UNCACHED_REP:
8521 rpc_restart_call_prepare(task);
8522 return;
8523 }
8524 dprintk("<-- %s\n", __func__);
8525}
8526
8527static const struct rpc_call_ops nfs4_get_lease_time_ops = {
8528 .rpc_call_prepare = nfs4_get_lease_time_prepare,
8529 .rpc_call_done = nfs4_get_lease_time_done,
8530};
8531
8532int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
8533{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008534 struct nfs4_get_lease_time_args args;
8535 struct nfs4_get_lease_time_res res = {
8536 .lr_fsinfo = fsinfo,
8537 };
8538 struct nfs4_get_lease_time_data data = {
8539 .args = &args,
8540 .res = &res,
8541 .clp = clp,
8542 };
8543 struct rpc_message msg = {
8544 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
8545 .rpc_argp = &args,
8546 .rpc_resp = &res,
8547 };
8548 struct rpc_task_setup task_setup = {
8549 .rpc_client = clp->cl_rpcclient,
8550 .rpc_message = &msg,
8551 .callback_ops = &nfs4_get_lease_time_ops,
8552 .callback_data = &data,
8553 .flags = RPC_TASK_TIMEOUT,
8554 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008555
8556 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0, 1);
David Brazdil0f672f62019-12-10 10:32:29 +00008557 return nfs4_call_sync_custom(&task_setup);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008558}
8559
David Brazdil0f672f62019-12-10 10:32:29 +00008560#ifdef CONFIG_NFS_V4_1
8561
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008562/*
8563 * Initialize the values to be used by the client in CREATE_SESSION
8564 * If nfs4_init_session set the fore channel request and response sizes,
8565 * use them.
8566 *
8567 * Set the back channel max_resp_sz_cached to zero to force the client to
8568 * always set csa_cachethis to FALSE because the current implementation
8569 * of the back channel DRC only supports caching the CB_SEQUENCE operation.
8570 */
8571static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args,
8572 struct rpc_clnt *clnt)
8573{
8574 unsigned int max_rqst_sz, max_resp_sz;
8575 unsigned int max_bc_payload = rpc_max_bc_payload(clnt);
David Brazdil0f672f62019-12-10 10:32:29 +00008576 unsigned int max_bc_slots = rpc_num_bc_slots(clnt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008577
8578 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead;
8579 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead;
8580
8581 /* Fore channel attributes */
8582 args->fc_attrs.max_rqst_sz = max_rqst_sz;
8583 args->fc_attrs.max_resp_sz = max_resp_sz;
8584 args->fc_attrs.max_ops = NFS4_MAX_OPS;
8585 args->fc_attrs.max_reqs = max_session_slots;
8586
8587 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
8588 "max_ops=%u max_reqs=%u\n",
8589 __func__,
8590 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
8591 args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
8592
8593 /* Back channel attributes */
8594 args->bc_attrs.max_rqst_sz = max_bc_payload;
8595 args->bc_attrs.max_resp_sz = max_bc_payload;
8596 args->bc_attrs.max_resp_sz_cached = 0;
8597 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
8598 args->bc_attrs.max_reqs = max_t(unsigned short, max_session_cb_slots, 1);
David Brazdil0f672f62019-12-10 10:32:29 +00008599 if (args->bc_attrs.max_reqs > max_bc_slots)
8600 args->bc_attrs.max_reqs = max_bc_slots;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008601
8602 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
8603 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
8604 __func__,
8605 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
8606 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
8607 args->bc_attrs.max_reqs);
8608}
8609
8610static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args,
8611 struct nfs41_create_session_res *res)
8612{
8613 struct nfs4_channel_attrs *sent = &args->fc_attrs;
8614 struct nfs4_channel_attrs *rcvd = &res->fc_attrs;
8615
8616 if (rcvd->max_resp_sz > sent->max_resp_sz)
8617 return -EINVAL;
8618 /*
8619 * Our requested max_ops is the minimum we need; we're not
8620 * prepared to break up compounds into smaller pieces than that.
8621 * So, no point even trying to continue if the server won't
8622 * cooperate:
8623 */
8624 if (rcvd->max_ops < sent->max_ops)
8625 return -EINVAL;
8626 if (rcvd->max_reqs == 0)
8627 return -EINVAL;
8628 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
8629 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
8630 return 0;
8631}
8632
8633static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args,
8634 struct nfs41_create_session_res *res)
8635{
8636 struct nfs4_channel_attrs *sent = &args->bc_attrs;
8637 struct nfs4_channel_attrs *rcvd = &res->bc_attrs;
8638
8639 if (!(res->flags & SESSION4_BACK_CHAN))
8640 goto out;
8641 if (rcvd->max_rqst_sz > sent->max_rqst_sz)
8642 return -EINVAL;
8643 if (rcvd->max_resp_sz < sent->max_resp_sz)
8644 return -EINVAL;
8645 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
8646 return -EINVAL;
8647 if (rcvd->max_ops > sent->max_ops)
8648 return -EINVAL;
8649 if (rcvd->max_reqs > sent->max_reqs)
8650 return -EINVAL;
8651out:
8652 return 0;
8653}
8654
8655static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
8656 struct nfs41_create_session_res *res)
8657{
8658 int ret;
8659
8660 ret = nfs4_verify_fore_channel_attrs(args, res);
8661 if (ret)
8662 return ret;
8663 return nfs4_verify_back_channel_attrs(args, res);
8664}
8665
8666static void nfs4_update_session(struct nfs4_session *session,
8667 struct nfs41_create_session_res *res)
8668{
8669 nfs4_copy_sessionid(&session->sess_id, &res->sessionid);
8670 /* Mark client id and session as being confirmed */
8671 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
8672 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state);
8673 session->flags = res->flags;
8674 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs));
8675 if (res->flags & SESSION4_BACK_CHAN)
8676 memcpy(&session->bc_attrs, &res->bc_attrs,
8677 sizeof(session->bc_attrs));
8678}
8679
8680static int _nfs4_proc_create_session(struct nfs_client *clp,
David Brazdil0f672f62019-12-10 10:32:29 +00008681 const struct cred *cred)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008682{
8683 struct nfs4_session *session = clp->cl_session;
8684 struct nfs41_create_session_args args = {
8685 .client = clp,
8686 .clientid = clp->cl_clientid,
8687 .seqid = clp->cl_seqid,
8688 .cb_program = NFS4_CALLBACK,
8689 };
8690 struct nfs41_create_session_res res;
8691
8692 struct rpc_message msg = {
8693 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
8694 .rpc_argp = &args,
8695 .rpc_resp = &res,
8696 .rpc_cred = cred,
8697 };
8698 int status;
8699
8700 nfs4_init_channel_attrs(&args, clp->cl_rpcclient);
8701 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
8702
David Brazdil0f672f62019-12-10 10:32:29 +00008703 status = rpc_call_sync(session->clp->cl_rpcclient, &msg,
8704 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008705 trace_nfs4_create_session(clp, status);
8706
8707 switch (status) {
8708 case -NFS4ERR_STALE_CLIENTID:
8709 case -NFS4ERR_DELAY:
8710 case -ETIMEDOUT:
8711 case -EACCES:
8712 case -EAGAIN:
8713 goto out;
8714 };
8715
8716 clp->cl_seqid++;
8717 if (!status) {
8718 /* Verify the session's negotiated channel_attrs values */
8719 status = nfs4_verify_channel_attrs(&args, &res);
8720 /* Increment the clientid slot sequence id */
8721 if (status)
8722 goto out;
8723 nfs4_update_session(session, &res);
8724 }
8725out:
8726 return status;
8727}
8728
8729/*
8730 * Issues a CREATE_SESSION operation to the server.
8731 * It is the responsibility of the caller to verify the session is
8732 * expired before calling this routine.
8733 */
David Brazdil0f672f62019-12-10 10:32:29 +00008734int nfs4_proc_create_session(struct nfs_client *clp, const struct cred *cred)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008735{
8736 int status;
8737 unsigned *ptr;
8738 struct nfs4_session *session = clp->cl_session;
8739
8740 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
8741
8742 status = _nfs4_proc_create_session(clp, cred);
8743 if (status)
8744 goto out;
8745
8746 /* Init or reset the session slot tables */
8747 status = nfs4_setup_session_slot_tables(session);
8748 dprintk("slot table setup returned %d\n", status);
8749 if (status)
8750 goto out;
8751
8752 ptr = (unsigned *)&session->sess_id.data[0];
8753 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
8754 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
8755out:
8756 dprintk("<-- %s\n", __func__);
8757 return status;
8758}
8759
8760/*
8761 * Issue the over-the-wire RPC DESTROY_SESSION.
8762 * The caller must serialize access to this routine.
8763 */
8764int nfs4_proc_destroy_session(struct nfs4_session *session,
David Brazdil0f672f62019-12-10 10:32:29 +00008765 const struct cred *cred)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008766{
8767 struct rpc_message msg = {
8768 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
8769 .rpc_argp = session,
8770 .rpc_cred = cred,
8771 };
8772 int status = 0;
8773
8774 dprintk("--> nfs4_proc_destroy_session\n");
8775
8776 /* session is still being setup */
8777 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state))
8778 return 0;
8779
David Brazdil0f672f62019-12-10 10:32:29 +00008780 status = rpc_call_sync(session->clp->cl_rpcclient, &msg,
8781 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008782 trace_nfs4_destroy_session(session->clp, status);
8783
8784 if (status)
8785 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
8786 "Session has been destroyed regardless...\n", status);
8787
8788 dprintk("<-- nfs4_proc_destroy_session\n");
8789 return status;
8790}
8791
8792/*
8793 * Renew the cl_session lease.
8794 */
8795struct nfs4_sequence_data {
8796 struct nfs_client *clp;
8797 struct nfs4_sequence_args args;
8798 struct nfs4_sequence_res res;
8799};
8800
8801static void nfs41_sequence_release(void *data)
8802{
8803 struct nfs4_sequence_data *calldata = data;
8804 struct nfs_client *clp = calldata->clp;
8805
8806 if (refcount_read(&clp->cl_count) > 1)
8807 nfs4_schedule_state_renewal(clp);
8808 nfs_put_client(clp);
8809 kfree(calldata);
8810}
8811
8812static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
8813{
8814 switch(task->tk_status) {
8815 case -NFS4ERR_DELAY:
8816 rpc_delay(task, NFS4_POLL_RETRY_MAX);
8817 return -EAGAIN;
8818 default:
8819 nfs4_schedule_lease_recovery(clp);
8820 }
8821 return 0;
8822}
8823
8824static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
8825{
8826 struct nfs4_sequence_data *calldata = data;
8827 struct nfs_client *clp = calldata->clp;
8828
8829 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
8830 return;
8831
8832 trace_nfs4_sequence(clp, task->tk_status);
8833 if (task->tk_status < 0) {
8834 dprintk("%s ERROR %d\n", __func__, task->tk_status);
8835 if (refcount_read(&clp->cl_count) == 1)
8836 goto out;
8837
8838 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
8839 rpc_restart_call_prepare(task);
8840 return;
8841 }
8842 }
8843 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
8844out:
8845 dprintk("<-- %s\n", __func__);
8846}
8847
8848static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
8849{
8850 struct nfs4_sequence_data *calldata = data;
8851 struct nfs_client *clp = calldata->clp;
8852 struct nfs4_sequence_args *args;
8853 struct nfs4_sequence_res *res;
8854
8855 args = task->tk_msg.rpc_argp;
8856 res = task->tk_msg.rpc_resp;
8857
8858 nfs4_setup_sequence(clp, args, res, task);
8859}
8860
8861static const struct rpc_call_ops nfs41_sequence_ops = {
8862 .rpc_call_done = nfs41_sequence_call_done,
8863 .rpc_call_prepare = nfs41_sequence_prepare,
8864 .rpc_release = nfs41_sequence_release,
8865};
8866
8867static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
David Brazdil0f672f62019-12-10 10:32:29 +00008868 const struct cred *cred,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008869 struct nfs4_slot *slot,
8870 bool is_privileged)
8871{
8872 struct nfs4_sequence_data *calldata;
8873 struct rpc_message msg = {
8874 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
8875 .rpc_cred = cred,
8876 };
8877 struct rpc_task_setup task_setup_data = {
8878 .rpc_client = clp->cl_rpcclient,
8879 .rpc_message = &msg,
8880 .callback_ops = &nfs41_sequence_ops,
8881 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
8882 };
8883 struct rpc_task *ret;
8884
8885 ret = ERR_PTR(-EIO);
8886 if (!refcount_inc_not_zero(&clp->cl_count))
8887 goto out_err;
8888
8889 ret = ERR_PTR(-ENOMEM);
8890 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
8891 if (calldata == NULL)
8892 goto out_put_clp;
8893 nfs4_init_sequence(&calldata->args, &calldata->res, 0, is_privileged);
8894 nfs4_sequence_attach_slot(&calldata->args, &calldata->res, slot);
8895 msg.rpc_argp = &calldata->args;
8896 msg.rpc_resp = &calldata->res;
8897 calldata->clp = clp;
8898 task_setup_data.callback_data = calldata;
8899
8900 ret = rpc_run_task(&task_setup_data);
8901 if (IS_ERR(ret))
8902 goto out_err;
8903 return ret;
8904out_put_clp:
8905 nfs_put_client(clp);
8906out_err:
8907 nfs41_release_slot(slot);
8908 return ret;
8909}
8910
David Brazdil0f672f62019-12-10 10:32:29 +00008911static int nfs41_proc_async_sequence(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008912{
8913 struct rpc_task *task;
8914 int ret = 0;
8915
8916 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
8917 return -EAGAIN;
8918 task = _nfs41_proc_sequence(clp, cred, NULL, false);
8919 if (IS_ERR(task))
8920 ret = PTR_ERR(task);
8921 else
8922 rpc_put_task_async(task);
8923 dprintk("<-- %s status=%d\n", __func__, ret);
8924 return ret;
8925}
8926
David Brazdil0f672f62019-12-10 10:32:29 +00008927static int nfs4_proc_sequence(struct nfs_client *clp, const struct cred *cred)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008928{
8929 struct rpc_task *task;
8930 int ret;
8931
8932 task = _nfs41_proc_sequence(clp, cred, NULL, true);
8933 if (IS_ERR(task)) {
8934 ret = PTR_ERR(task);
8935 goto out;
8936 }
8937 ret = rpc_wait_for_completion_task(task);
8938 if (!ret)
8939 ret = task->tk_status;
8940 rpc_put_task(task);
8941out:
8942 dprintk("<-- %s status=%d\n", __func__, ret);
8943 return ret;
8944}
8945
8946struct nfs4_reclaim_complete_data {
8947 struct nfs_client *clp;
8948 struct nfs41_reclaim_complete_args arg;
8949 struct nfs41_reclaim_complete_res res;
8950};
8951
8952static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
8953{
8954 struct nfs4_reclaim_complete_data *calldata = data;
8955
8956 nfs4_setup_sequence(calldata->clp,
8957 &calldata->arg.seq_args,
8958 &calldata->res.seq_res,
8959 task);
8960}
8961
8962static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
8963{
8964 switch(task->tk_status) {
8965 case 0:
8966 wake_up_all(&clp->cl_lock_waitq);
8967 /* Fallthrough */
8968 case -NFS4ERR_COMPLETE_ALREADY:
8969 case -NFS4ERR_WRONG_CRED: /* What to do here? */
8970 break;
8971 case -NFS4ERR_DELAY:
8972 rpc_delay(task, NFS4_POLL_RETRY_MAX);
8973 /* fall through */
8974 case -NFS4ERR_RETRY_UNCACHED_REP:
8975 return -EAGAIN;
8976 case -NFS4ERR_BADSESSION:
8977 case -NFS4ERR_DEADSESSION:
8978 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008979 break;
8980 default:
8981 nfs4_schedule_lease_recovery(clp);
8982 }
8983 return 0;
8984}
8985
8986static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
8987{
8988 struct nfs4_reclaim_complete_data *calldata = data;
8989 struct nfs_client *clp = calldata->clp;
8990 struct nfs4_sequence_res *res = &calldata->res.seq_res;
8991
8992 dprintk("--> %s\n", __func__);
8993 if (!nfs41_sequence_done(task, res))
8994 return;
8995
8996 trace_nfs4_reclaim_complete(clp, task->tk_status);
8997 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
8998 rpc_restart_call_prepare(task);
8999 return;
9000 }
9001 dprintk("<-- %s\n", __func__);
9002}
9003
9004static void nfs4_free_reclaim_complete_data(void *data)
9005{
9006 struct nfs4_reclaim_complete_data *calldata = data;
9007
9008 kfree(calldata);
9009}
9010
9011static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
9012 .rpc_call_prepare = nfs4_reclaim_complete_prepare,
9013 .rpc_call_done = nfs4_reclaim_complete_done,
9014 .rpc_release = nfs4_free_reclaim_complete_data,
9015};
9016
9017/*
9018 * Issue a global reclaim complete.
9019 */
9020static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
David Brazdil0f672f62019-12-10 10:32:29 +00009021 const struct cred *cred)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009022{
9023 struct nfs4_reclaim_complete_data *calldata;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009024 struct rpc_message msg = {
9025 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
9026 .rpc_cred = cred,
9027 };
9028 struct rpc_task_setup task_setup_data = {
9029 .rpc_client = clp->cl_rpcclient,
9030 .rpc_message = &msg,
9031 .callback_ops = &nfs4_reclaim_complete_call_ops,
David Brazdil0f672f62019-12-10 10:32:29 +00009032 .flags = RPC_TASK_NO_ROUND_ROBIN,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009033 };
9034 int status = -ENOMEM;
9035
9036 dprintk("--> %s\n", __func__);
9037 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
9038 if (calldata == NULL)
9039 goto out;
9040 calldata->clp = clp;
9041 calldata->arg.one_fs = 0;
9042
9043 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0, 1);
9044 msg.rpc_argp = &calldata->arg;
9045 msg.rpc_resp = &calldata->res;
9046 task_setup_data.callback_data = calldata;
David Brazdil0f672f62019-12-10 10:32:29 +00009047 status = nfs4_call_sync_custom(&task_setup_data);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009048out:
9049 dprintk("<-- %s status=%d\n", __func__, status);
9050 return status;
9051}
9052
9053static void
9054nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
9055{
9056 struct nfs4_layoutget *lgp = calldata;
9057 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
9058
9059 dprintk("--> %s\n", __func__);
9060 nfs4_setup_sequence(server->nfs_client, &lgp->args.seq_args,
9061 &lgp->res.seq_res, task);
9062 dprintk("<-- %s\n", __func__);
9063}
9064
9065static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
9066{
9067 struct nfs4_layoutget *lgp = calldata;
9068
9069 dprintk("--> %s\n", __func__);
9070 nfs41_sequence_process(task, &lgp->res.seq_res);
9071 dprintk("<-- %s\n", __func__);
9072}
9073
9074static int
9075nfs4_layoutget_handle_exception(struct rpc_task *task,
9076 struct nfs4_layoutget *lgp, struct nfs4_exception *exception)
9077{
9078 struct inode *inode = lgp->args.inode;
9079 struct nfs_server *server = NFS_SERVER(inode);
9080 struct pnfs_layout_hdr *lo;
9081 int nfs4err = task->tk_status;
9082 int err, status = 0;
9083 LIST_HEAD(head);
9084
9085 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
9086
9087 nfs4_sequence_free_slot(&lgp->res.seq_res);
9088
9089 switch (nfs4err) {
9090 case 0:
9091 goto out;
9092
9093 /*
9094 * NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs
9095 * on the file. set tk_status to -ENODATA to tell upper layer to
9096 * retry go inband.
9097 */
9098 case -NFS4ERR_LAYOUTUNAVAILABLE:
9099 status = -ENODATA;
9100 goto out;
9101 /*
9102 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of
9103 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3).
9104 */
9105 case -NFS4ERR_BADLAYOUT:
9106 status = -EOVERFLOW;
9107 goto out;
9108 /*
9109 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client
9110 * (or clients) writing to the same RAID stripe except when
9111 * the minlength argument is 0 (see RFC5661 section 18.43.3).
9112 *
9113 * Treat it like we would RECALLCONFLICT -- we retry for a little
9114 * while, and then eventually give up.
9115 */
9116 case -NFS4ERR_LAYOUTTRYLATER:
9117 if (lgp->args.minlength == 0) {
9118 status = -EOVERFLOW;
9119 goto out;
9120 }
9121 status = -EBUSY;
9122 break;
9123 case -NFS4ERR_RECALLCONFLICT:
9124 status = -ERECALLCONFLICT;
9125 break;
9126 case -NFS4ERR_DELEG_REVOKED:
9127 case -NFS4ERR_ADMIN_REVOKED:
9128 case -NFS4ERR_EXPIRED:
9129 case -NFS4ERR_BAD_STATEID:
9130 exception->timeout = 0;
9131 spin_lock(&inode->i_lock);
9132 lo = NFS_I(inode)->layout;
9133 /* If the open stateid was bad, then recover it. */
9134 if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) ||
9135 !nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) {
9136 spin_unlock(&inode->i_lock);
9137 exception->state = lgp->args.ctx->state;
9138 exception->stateid = &lgp->args.stateid;
9139 break;
9140 }
9141
9142 /*
9143 * Mark the bad layout state as invalid, then retry
9144 */
9145 pnfs_mark_layout_stateid_invalid(lo, &head);
9146 spin_unlock(&inode->i_lock);
9147 nfs_commit_inode(inode, 0);
9148 pnfs_free_lseg_list(&head);
9149 status = -EAGAIN;
9150 goto out;
9151 }
9152
9153 err = nfs4_handle_exception(server, nfs4err, exception);
9154 if (!status) {
9155 if (exception->retry)
9156 status = -EAGAIN;
9157 else
9158 status = err;
9159 }
9160out:
9161 dprintk("<-- %s\n", __func__);
9162 return status;
9163}
9164
9165size_t max_response_pages(struct nfs_server *server)
9166{
9167 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
9168 return nfs_page_array_len(0, max_resp_sz);
9169}
9170
9171static void nfs4_layoutget_release(void *calldata)
9172{
9173 struct nfs4_layoutget *lgp = calldata;
9174
9175 dprintk("--> %s\n", __func__);
9176 nfs4_sequence_free_slot(&lgp->res.seq_res);
9177 pnfs_layoutget_free(lgp);
9178 dprintk("<-- %s\n", __func__);
9179}
9180
9181static const struct rpc_call_ops nfs4_layoutget_call_ops = {
9182 .rpc_call_prepare = nfs4_layoutget_prepare,
9183 .rpc_call_done = nfs4_layoutget_done,
9184 .rpc_release = nfs4_layoutget_release,
9185};
9186
9187struct pnfs_layout_segment *
9188nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout)
9189{
9190 struct inode *inode = lgp->args.inode;
9191 struct nfs_server *server = NFS_SERVER(inode);
9192 struct rpc_task *task;
9193 struct rpc_message msg = {
9194 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
9195 .rpc_argp = &lgp->args,
9196 .rpc_resp = &lgp->res,
9197 .rpc_cred = lgp->cred,
9198 };
9199 struct rpc_task_setup task_setup_data = {
9200 .rpc_client = server->client,
9201 .rpc_message = &msg,
9202 .callback_ops = &nfs4_layoutget_call_ops,
9203 .callback_data = lgp,
9204 .flags = RPC_TASK_ASYNC,
9205 };
9206 struct pnfs_layout_segment *lseg = NULL;
9207 struct nfs4_exception exception = {
9208 .inode = inode,
9209 .timeout = *timeout,
9210 };
9211 int status = 0;
9212
9213 dprintk("--> %s\n", __func__);
9214
9215 /* nfs4_layoutget_release calls pnfs_put_layout_hdr */
9216 pnfs_get_layout_hdr(NFS_I(inode)->layout);
9217
9218 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0, 0);
9219
9220 task = rpc_run_task(&task_setup_data);
9221 if (IS_ERR(task))
9222 return ERR_CAST(task);
9223 status = rpc_wait_for_completion_task(task);
9224 if (status != 0)
9225 goto out;
9226
David Brazdil0f672f62019-12-10 10:32:29 +00009227 if (task->tk_status < 0) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009228 status = nfs4_layoutget_handle_exception(task, lgp, &exception);
9229 *timeout = exception.timeout;
David Brazdil0f672f62019-12-10 10:32:29 +00009230 } else if (lgp->res.layoutp->len == 0) {
9231 status = -EAGAIN;
9232 *timeout = nfs4_update_delay(&exception.timeout);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009233 } else
9234 lseg = pnfs_layout_process(lgp);
9235out:
9236 trace_nfs4_layoutget(lgp->args.ctx,
9237 &lgp->args.range,
9238 &lgp->res.range,
9239 &lgp->res.stateid,
9240 status);
9241
9242 rpc_put_task(task);
9243 dprintk("<-- %s status=%d\n", __func__, status);
9244 if (status)
9245 return ERR_PTR(status);
9246 return lseg;
9247}
9248
9249static void
9250nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
9251{
9252 struct nfs4_layoutreturn *lrp = calldata;
9253
9254 dprintk("--> %s\n", __func__);
9255 nfs4_setup_sequence(lrp->clp,
9256 &lrp->args.seq_args,
9257 &lrp->res.seq_res,
9258 task);
9259 if (!pnfs_layout_is_valid(lrp->args.layout))
9260 rpc_exit(task, 0);
9261}
9262
9263static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
9264{
9265 struct nfs4_layoutreturn *lrp = calldata;
9266 struct nfs_server *server;
9267
9268 dprintk("--> %s\n", __func__);
9269
9270 if (!nfs41_sequence_process(task, &lrp->res.seq_res))
9271 return;
9272
David Brazdil0f672f62019-12-10 10:32:29 +00009273 /*
9274 * Was there an RPC level error? Assume the call succeeded,
9275 * and that we need to release the layout
9276 */
9277 if (task->tk_rpc_status != 0 && RPC_WAS_SENT(task)) {
9278 lrp->res.lrs_present = 0;
9279 return;
9280 }
9281
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009282 server = NFS_SERVER(lrp->args.inode);
9283 switch (task->tk_status) {
9284 case -NFS4ERR_OLD_STATEID:
David Brazdil0f672f62019-12-10 10:32:29 +00009285 if (nfs4_layout_refresh_old_stateid(&lrp->args.stateid,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009286 &lrp->args.range,
9287 lrp->args.inode))
9288 goto out_restart;
9289 /* Fallthrough */
9290 default:
9291 task->tk_status = 0;
9292 /* Fallthrough */
9293 case 0:
9294 break;
9295 case -NFS4ERR_DELAY:
9296 if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
9297 break;
9298 goto out_restart;
9299 }
9300 dprintk("<-- %s\n", __func__);
9301 return;
9302out_restart:
9303 task->tk_status = 0;
9304 nfs4_sequence_free_slot(&lrp->res.seq_res);
9305 rpc_restart_call_prepare(task);
9306}
9307
9308static void nfs4_layoutreturn_release(void *calldata)
9309{
9310 struct nfs4_layoutreturn *lrp = calldata;
9311 struct pnfs_layout_hdr *lo = lrp->args.layout;
9312
9313 dprintk("--> %s\n", __func__);
9314 pnfs_layoutreturn_free_lsegs(lo, &lrp->args.stateid, &lrp->args.range,
9315 lrp->res.lrs_present ? &lrp->res.stateid : NULL);
9316 nfs4_sequence_free_slot(&lrp->res.seq_res);
9317 if (lrp->ld_private.ops && lrp->ld_private.ops->free)
9318 lrp->ld_private.ops->free(&lrp->ld_private);
9319 pnfs_put_layout_hdr(lrp->args.layout);
9320 nfs_iput_and_deactive(lrp->inode);
9321 kfree(calldata);
9322 dprintk("<-- %s\n", __func__);
9323}
9324
9325static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
9326 .rpc_call_prepare = nfs4_layoutreturn_prepare,
9327 .rpc_call_done = nfs4_layoutreturn_done,
9328 .rpc_release = nfs4_layoutreturn_release,
9329};
9330
9331int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
9332{
9333 struct rpc_task *task;
9334 struct rpc_message msg = {
9335 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
9336 .rpc_argp = &lrp->args,
9337 .rpc_resp = &lrp->res,
9338 .rpc_cred = lrp->cred,
9339 };
9340 struct rpc_task_setup task_setup_data = {
9341 .rpc_client = NFS_SERVER(lrp->args.inode)->client,
9342 .rpc_message = &msg,
9343 .callback_ops = &nfs4_layoutreturn_call_ops,
9344 .callback_data = lrp,
9345 };
9346 int status = 0;
9347
9348 nfs4_state_protect(NFS_SERVER(lrp->args.inode)->nfs_client,
9349 NFS_SP4_MACH_CRED_PNFS_CLEANUP,
9350 &task_setup_data.rpc_client, &msg);
9351
9352 dprintk("--> %s\n", __func__);
Olivier Deprez0e641232021-09-23 10:07:05 +02009353 lrp->inode = nfs_igrab_and_active(lrp->args.inode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009354 if (!sync) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009355 if (!lrp->inode) {
9356 nfs4_layoutreturn_release(lrp);
9357 return -EAGAIN;
9358 }
9359 task_setup_data.flags |= RPC_TASK_ASYNC;
9360 }
Olivier Deprez0e641232021-09-23 10:07:05 +02009361 if (!lrp->inode)
9362 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
9363 1);
9364 else
9365 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
9366 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009367 task = rpc_run_task(&task_setup_data);
9368 if (IS_ERR(task))
9369 return PTR_ERR(task);
9370 if (sync)
9371 status = task->tk_status;
9372 trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status);
9373 dprintk("<-- %s status=%d\n", __func__, status);
9374 rpc_put_task(task);
9375 return status;
9376}
9377
9378static int
9379_nfs4_proc_getdeviceinfo(struct nfs_server *server,
9380 struct pnfs_device *pdev,
David Brazdil0f672f62019-12-10 10:32:29 +00009381 const struct cred *cred)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009382{
9383 struct nfs4_getdeviceinfo_args args = {
9384 .pdev = pdev,
9385 .notify_types = NOTIFY_DEVICEID4_CHANGE |
9386 NOTIFY_DEVICEID4_DELETE,
9387 };
9388 struct nfs4_getdeviceinfo_res res = {
9389 .pdev = pdev,
9390 };
9391 struct rpc_message msg = {
9392 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
9393 .rpc_argp = &args,
9394 .rpc_resp = &res,
9395 .rpc_cred = cred,
9396 };
9397 int status;
9398
9399 dprintk("--> %s\n", __func__);
9400 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
9401 if (res.notification & ~args.notify_types)
9402 dprintk("%s: unsupported notification\n", __func__);
9403 if (res.notification != args.notify_types)
9404 pdev->nocache = 1;
9405
9406 dprintk("<-- %s status=%d\n", __func__, status);
9407
9408 return status;
9409}
9410
9411int nfs4_proc_getdeviceinfo(struct nfs_server *server,
9412 struct pnfs_device *pdev,
David Brazdil0f672f62019-12-10 10:32:29 +00009413 const struct cred *cred)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009414{
9415 struct nfs4_exception exception = { };
9416 int err;
9417
9418 do {
9419 err = nfs4_handle_exception(server,
9420 _nfs4_proc_getdeviceinfo(server, pdev, cred),
9421 &exception);
9422 } while (exception.retry);
9423 return err;
9424}
9425EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
9426
9427static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
9428{
9429 struct nfs4_layoutcommit_data *data = calldata;
9430 struct nfs_server *server = NFS_SERVER(data->args.inode);
9431
9432 nfs4_setup_sequence(server->nfs_client,
9433 &data->args.seq_args,
9434 &data->res.seq_res,
9435 task);
9436}
9437
9438static void
9439nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
9440{
9441 struct nfs4_layoutcommit_data *data = calldata;
9442 struct nfs_server *server = NFS_SERVER(data->args.inode);
9443
9444 if (!nfs41_sequence_done(task, &data->res.seq_res))
9445 return;
9446
9447 switch (task->tk_status) { /* Just ignore these failures */
9448 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
9449 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
9450 case -NFS4ERR_BADLAYOUT: /* no layout */
9451 case -NFS4ERR_GRACE: /* loca_recalim always false */
9452 task->tk_status = 0;
9453 case 0:
9454 break;
9455 default:
9456 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) {
9457 rpc_restart_call_prepare(task);
9458 return;
9459 }
9460 }
9461}
9462
9463static void nfs4_layoutcommit_release(void *calldata)
9464{
9465 struct nfs4_layoutcommit_data *data = calldata;
9466
9467 pnfs_cleanup_layoutcommit(data);
9468 nfs_post_op_update_inode_force_wcc(data->args.inode,
9469 data->res.fattr);
David Brazdil0f672f62019-12-10 10:32:29 +00009470 put_cred(data->cred);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009471 nfs_iput_and_deactive(data->inode);
9472 kfree(data);
9473}
9474
9475static const struct rpc_call_ops nfs4_layoutcommit_ops = {
9476 .rpc_call_prepare = nfs4_layoutcommit_prepare,
9477 .rpc_call_done = nfs4_layoutcommit_done,
9478 .rpc_release = nfs4_layoutcommit_release,
9479};
9480
9481int
9482nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
9483{
9484 struct rpc_message msg = {
9485 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
9486 .rpc_argp = &data->args,
9487 .rpc_resp = &data->res,
9488 .rpc_cred = data->cred,
9489 };
9490 struct rpc_task_setup task_setup_data = {
9491 .task = &data->task,
9492 .rpc_client = NFS_CLIENT(data->args.inode),
9493 .rpc_message = &msg,
9494 .callback_ops = &nfs4_layoutcommit_ops,
9495 .callback_data = data,
9496 };
9497 struct rpc_task *task;
9498 int status = 0;
9499
9500 dprintk("NFS: initiating layoutcommit call. sync %d "
9501 "lbw: %llu inode %lu\n", sync,
9502 data->args.lastbytewritten,
9503 data->args.inode->i_ino);
9504
9505 if (!sync) {
9506 data->inode = nfs_igrab_and_active(data->args.inode);
9507 if (data->inode == NULL) {
9508 nfs4_layoutcommit_release(data);
9509 return -EAGAIN;
9510 }
9511 task_setup_data.flags = RPC_TASK_ASYNC;
9512 }
9513 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
9514 task = rpc_run_task(&task_setup_data);
9515 if (IS_ERR(task))
9516 return PTR_ERR(task);
9517 if (sync)
9518 status = task->tk_status;
9519 trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status);
9520 dprintk("%s: status %d\n", __func__, status);
9521 rpc_put_task(task);
9522 return status;
9523}
9524
David Brazdil0f672f62019-12-10 10:32:29 +00009525/*
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009526 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if
9527 * possible) as per RFC3530bis and RFC5661 Security Considerations sections
9528 */
9529static int
9530_nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
9531 struct nfs_fsinfo *info,
9532 struct nfs4_secinfo_flavors *flavors, bool use_integrity)
9533{
9534 struct nfs41_secinfo_no_name_args args = {
9535 .style = SECINFO_STYLE_CURRENT_FH,
9536 };
9537 struct nfs4_secinfo_res res = {
9538 .flavors = flavors,
9539 };
9540 struct rpc_message msg = {
9541 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
9542 .rpc_argp = &args,
9543 .rpc_resp = &res,
9544 };
9545 struct rpc_clnt *clnt = server->client;
David Brazdil0f672f62019-12-10 10:32:29 +00009546 struct nfs4_call_sync_data data = {
9547 .seq_server = server,
9548 .seq_args = &args.seq_args,
9549 .seq_res = &res.seq_res,
9550 };
9551 struct rpc_task_setup task_setup = {
9552 .rpc_client = server->client,
9553 .rpc_message = &msg,
9554 .callback_ops = server->nfs_client->cl_mvops->call_sync_ops,
9555 .callback_data = &data,
9556 .flags = RPC_TASK_NO_ROUND_ROBIN,
9557 };
9558 const struct cred *cred = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009559 int status;
9560
9561 if (use_integrity) {
9562 clnt = server->nfs_client->cl_rpcclient;
David Brazdil0f672f62019-12-10 10:32:29 +00009563 task_setup.rpc_client = clnt;
9564
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009565 cred = nfs4_get_clid_cred(server->nfs_client);
9566 msg.rpc_cred = cred;
9567 }
9568
9569 dprintk("--> %s\n", __func__);
David Brazdil0f672f62019-12-10 10:32:29 +00009570 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
9571 status = nfs4_call_sync_custom(&task_setup);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009572 dprintk("<-- %s status=%d\n", __func__, status);
9573
David Brazdil0f672f62019-12-10 10:32:29 +00009574 put_cred(cred);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009575
9576 return status;
9577}
9578
9579static int
9580nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
9581 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
9582{
David Brazdil0f672f62019-12-10 10:32:29 +00009583 struct nfs4_exception exception = {
9584 .interruptible = true,
9585 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009586 int err;
9587 do {
9588 /* first try using integrity protection */
9589 err = -NFS4ERR_WRONGSEC;
9590
9591 /* try to use integrity protection with machine cred */
9592 if (_nfs4_is_integrity_protected(server->nfs_client))
9593 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
9594 flavors, true);
9595
9596 /*
9597 * if unable to use integrity protection, or SECINFO with
9598 * integrity protection returns NFS4ERR_WRONGSEC (which is
9599 * disallowed by spec, but exists in deployed servers) use
9600 * the current filesystem's rpc_client and the user cred.
9601 */
9602 if (err == -NFS4ERR_WRONGSEC)
9603 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
9604 flavors, false);
9605
9606 switch (err) {
9607 case 0:
9608 case -NFS4ERR_WRONGSEC:
9609 case -ENOTSUPP:
9610 goto out;
9611 default:
9612 err = nfs4_handle_exception(server, err, &exception);
9613 }
9614 } while (exception.retry);
9615out:
9616 return err;
9617}
9618
9619static int
9620nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
9621 struct nfs_fsinfo *info)
9622{
9623 int err;
9624 struct page *page;
9625 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
9626 struct nfs4_secinfo_flavors *flavors;
9627 struct nfs4_secinfo4 *secinfo;
9628 int i;
9629
9630 page = alloc_page(GFP_KERNEL);
9631 if (!page) {
9632 err = -ENOMEM;
9633 goto out;
9634 }
9635
9636 flavors = page_address(page);
9637 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
9638
9639 /*
9640 * Fall back on "guess and check" method if
9641 * the server doesn't support SECINFO_NO_NAME
9642 */
9643 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
9644 err = nfs4_find_root_sec(server, fhandle, info);
9645 goto out_freepage;
9646 }
9647 if (err)
9648 goto out_freepage;
9649
9650 for (i = 0; i < flavors->num_flavors; i++) {
9651 secinfo = &flavors->flavors[i];
9652
9653 switch (secinfo->flavor) {
9654 case RPC_AUTH_NULL:
9655 case RPC_AUTH_UNIX:
9656 case RPC_AUTH_GSS:
9657 flavor = rpcauth_get_pseudoflavor(secinfo->flavor,
9658 &secinfo->flavor_info);
9659 break;
9660 default:
9661 flavor = RPC_AUTH_MAXFLAVOR;
9662 break;
9663 }
9664
9665 if (!nfs_auth_info_match(&server->auth_info, flavor))
9666 flavor = RPC_AUTH_MAXFLAVOR;
9667
9668 if (flavor != RPC_AUTH_MAXFLAVOR) {
9669 err = nfs4_lookup_root_sec(server, fhandle,
9670 info, flavor);
9671 if (!err)
9672 break;
9673 }
9674 }
9675
9676 if (flavor == RPC_AUTH_MAXFLAVOR)
9677 err = -EPERM;
9678
9679out_freepage:
9680 put_page(page);
9681 if (err == -EACCES)
9682 return -EPERM;
9683out:
9684 return err;
9685}
9686
9687static int _nfs41_test_stateid(struct nfs_server *server,
9688 nfs4_stateid *stateid,
David Brazdil0f672f62019-12-10 10:32:29 +00009689 const struct cred *cred)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009690{
9691 int status;
9692 struct nfs41_test_stateid_args args = {
9693 .stateid = stateid,
9694 };
9695 struct nfs41_test_stateid_res res;
9696 struct rpc_message msg = {
9697 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
9698 .rpc_argp = &args,
9699 .rpc_resp = &res,
9700 .rpc_cred = cred,
9701 };
9702 struct rpc_clnt *rpc_client = server->client;
9703
9704 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
9705 &rpc_client, &msg);
9706
9707 dprintk("NFS call test_stateid %p\n", stateid);
9708 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
9709 status = nfs4_call_sync_sequence(rpc_client, server, &msg,
9710 &args.seq_args, &res.seq_res);
9711 if (status != NFS_OK) {
9712 dprintk("NFS reply test_stateid: failed, %d\n", status);
9713 return status;
9714 }
9715 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status);
9716 return -res.status;
9717}
9718
9719static void nfs4_handle_delay_or_session_error(struct nfs_server *server,
9720 int err, struct nfs4_exception *exception)
9721{
9722 exception->retry = 0;
9723 switch(err) {
9724 case -NFS4ERR_DELAY:
9725 case -NFS4ERR_RETRY_UNCACHED_REP:
9726 nfs4_handle_exception(server, err, exception);
9727 break;
9728 case -NFS4ERR_BADSESSION:
9729 case -NFS4ERR_BADSLOT:
9730 case -NFS4ERR_BAD_HIGH_SLOT:
9731 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
9732 case -NFS4ERR_DEADSESSION:
9733 nfs4_do_handle_exception(server, err, exception);
9734 }
9735}
9736
9737/**
9738 * nfs41_test_stateid - perform a TEST_STATEID operation
9739 *
9740 * @server: server / transport on which to perform the operation
9741 * @stateid: state ID to test
9742 * @cred: credential
9743 *
9744 * Returns NFS_OK if the server recognizes that "stateid" is valid.
9745 * Otherwise a negative NFS4ERR value is returned if the operation
9746 * failed or the state ID is not currently valid.
9747 */
9748static int nfs41_test_stateid(struct nfs_server *server,
9749 nfs4_stateid *stateid,
David Brazdil0f672f62019-12-10 10:32:29 +00009750 const struct cred *cred)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009751{
David Brazdil0f672f62019-12-10 10:32:29 +00009752 struct nfs4_exception exception = {
9753 .interruptible = true,
9754 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009755 int err;
9756 do {
9757 err = _nfs41_test_stateid(server, stateid, cred);
9758 nfs4_handle_delay_or_session_error(server, err, &exception);
9759 } while (exception.retry);
9760 return err;
9761}
9762
9763struct nfs_free_stateid_data {
9764 struct nfs_server *server;
9765 struct nfs41_free_stateid_args args;
9766 struct nfs41_free_stateid_res res;
9767};
9768
9769static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata)
9770{
9771 struct nfs_free_stateid_data *data = calldata;
9772 nfs4_setup_sequence(data->server->nfs_client,
9773 &data->args.seq_args,
9774 &data->res.seq_res,
9775 task);
9776}
9777
9778static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata)
9779{
9780 struct nfs_free_stateid_data *data = calldata;
9781
9782 nfs41_sequence_done(task, &data->res.seq_res);
9783
9784 switch (task->tk_status) {
9785 case -NFS4ERR_DELAY:
9786 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN)
9787 rpc_restart_call_prepare(task);
9788 }
9789}
9790
9791static void nfs41_free_stateid_release(void *calldata)
9792{
9793 kfree(calldata);
9794}
9795
9796static const struct rpc_call_ops nfs41_free_stateid_ops = {
9797 .rpc_call_prepare = nfs41_free_stateid_prepare,
9798 .rpc_call_done = nfs41_free_stateid_done,
9799 .rpc_release = nfs41_free_stateid_release,
9800};
9801
9802/**
9803 * nfs41_free_stateid - perform a FREE_STATEID operation
9804 *
9805 * @server: server / transport on which to perform the operation
9806 * @stateid: state ID to release
9807 * @cred: credential
David Brazdil0f672f62019-12-10 10:32:29 +00009808 * @privileged: set to true if this call needs to be privileged
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009809 *
9810 * Note: this function is always asynchronous.
9811 */
9812static int nfs41_free_stateid(struct nfs_server *server,
9813 const nfs4_stateid *stateid,
David Brazdil0f672f62019-12-10 10:32:29 +00009814 const struct cred *cred,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009815 bool privileged)
9816{
9817 struct rpc_message msg = {
9818 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
9819 .rpc_cred = cred,
9820 };
9821 struct rpc_task_setup task_setup = {
9822 .rpc_client = server->client,
9823 .rpc_message = &msg,
9824 .callback_ops = &nfs41_free_stateid_ops,
9825 .flags = RPC_TASK_ASYNC,
9826 };
9827 struct nfs_free_stateid_data *data;
9828 struct rpc_task *task;
9829
9830 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
9831 &task_setup.rpc_client, &msg);
9832
9833 dprintk("NFS call free_stateid %p\n", stateid);
9834 data = kmalloc(sizeof(*data), GFP_NOFS);
9835 if (!data)
9836 return -ENOMEM;
9837 data->server = server;
9838 nfs4_stateid_copy(&data->args.stateid, stateid);
9839
9840 task_setup.callback_data = data;
9841
9842 msg.rpc_argp = &data->args;
9843 msg.rpc_resp = &data->res;
9844 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, privileged);
9845 task = rpc_run_task(&task_setup);
9846 if (IS_ERR(task))
9847 return PTR_ERR(task);
9848 rpc_put_task(task);
9849 return 0;
9850}
9851
9852static void
9853nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
9854{
David Brazdil0f672f62019-12-10 10:32:29 +00009855 const struct cred *cred = lsp->ls_state->owner->so_cred;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009856
9857 nfs41_free_stateid(server, &lsp->ls_stateid, cred, false);
9858 nfs4_free_lock_state(server, lsp);
9859}
9860
9861static bool nfs41_match_stateid(const nfs4_stateid *s1,
9862 const nfs4_stateid *s2)
9863{
9864 if (s1->type != s2->type)
9865 return false;
9866
9867 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
9868 return false;
9869
9870 if (s1->seqid == s2->seqid)
9871 return true;
9872
9873 return s1->seqid == 0 || s2->seqid == 0;
9874}
9875
9876#endif /* CONFIG_NFS_V4_1 */
9877
9878static bool nfs4_match_stateid(const nfs4_stateid *s1,
9879 const nfs4_stateid *s2)
9880{
9881 return nfs4_stateid_match(s1, s2);
9882}
9883
9884
9885static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
9886 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
9887 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
9888 .recover_open = nfs4_open_reclaim,
9889 .recover_lock = nfs4_lock_reclaim,
9890 .establish_clid = nfs4_init_clientid,
9891 .detect_trunking = nfs40_discover_server_trunking,
9892};
9893
9894#if defined(CONFIG_NFS_V4_1)
9895static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
9896 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
9897 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
9898 .recover_open = nfs4_open_reclaim,
9899 .recover_lock = nfs4_lock_reclaim,
9900 .establish_clid = nfs41_init_clientid,
9901 .reclaim_complete = nfs41_proc_reclaim_complete,
9902 .detect_trunking = nfs41_discover_server_trunking,
9903};
9904#endif /* CONFIG_NFS_V4_1 */
9905
9906static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
9907 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
9908 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
9909 .recover_open = nfs40_open_expired,
9910 .recover_lock = nfs4_lock_expired,
9911 .establish_clid = nfs4_init_clientid,
9912};
9913
9914#if defined(CONFIG_NFS_V4_1)
9915static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
9916 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
9917 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
9918 .recover_open = nfs41_open_expired,
9919 .recover_lock = nfs41_lock_expired,
9920 .establish_clid = nfs41_init_clientid,
9921};
9922#endif /* CONFIG_NFS_V4_1 */
9923
9924static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
9925 .sched_state_renewal = nfs4_proc_async_renew,
David Brazdil0f672f62019-12-10 10:32:29 +00009926 .get_state_renewal_cred = nfs4_get_renew_cred,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009927 .renew_lease = nfs4_proc_renew,
9928};
9929
9930#if defined(CONFIG_NFS_V4_1)
9931static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
9932 .sched_state_renewal = nfs41_proc_async_sequence,
David Brazdil0f672f62019-12-10 10:32:29 +00009933 .get_state_renewal_cred = nfs4_get_machine_cred,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009934 .renew_lease = nfs4_proc_sequence,
9935};
9936#endif
9937
9938static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = {
9939 .get_locations = _nfs40_proc_get_locations,
9940 .fsid_present = _nfs40_proc_fsid_present,
9941};
9942
9943#if defined(CONFIG_NFS_V4_1)
9944static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = {
9945 .get_locations = _nfs41_proc_get_locations,
9946 .fsid_present = _nfs41_proc_fsid_present,
9947};
9948#endif /* CONFIG_NFS_V4_1 */
9949
9950static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
9951 .minor_version = 0,
9952 .init_caps = NFS_CAP_READDIRPLUS
9953 | NFS_CAP_ATOMIC_OPEN
9954 | NFS_CAP_POSIX_LOCK,
9955 .init_client = nfs40_init_client,
9956 .shutdown_client = nfs40_shutdown_client,
9957 .match_stateid = nfs4_match_stateid,
9958 .find_root_sec = nfs4_find_root_sec,
9959 .free_lock_state = nfs4_release_lockowner,
9960 .test_and_free_expired = nfs40_test_and_free_expired_stateid,
9961 .alloc_seqid = nfs_alloc_seqid,
9962 .call_sync_ops = &nfs40_call_sync_ops,
9963 .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
9964 .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
9965 .state_renewal_ops = &nfs40_state_renewal_ops,
9966 .mig_recovery_ops = &nfs40_mig_recovery_ops,
9967};
9968
9969#if defined(CONFIG_NFS_V4_1)
9970static struct nfs_seqid *
9971nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2)
9972{
9973 return NULL;
9974}
9975
9976static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
9977 .minor_version = 1,
9978 .init_caps = NFS_CAP_READDIRPLUS
9979 | NFS_CAP_ATOMIC_OPEN
9980 | NFS_CAP_POSIX_LOCK
9981 | NFS_CAP_STATEID_NFSV41
9982 | NFS_CAP_ATOMIC_OPEN_V1
9983 | NFS_CAP_LGOPEN,
9984 .init_client = nfs41_init_client,
9985 .shutdown_client = nfs41_shutdown_client,
9986 .match_stateid = nfs41_match_stateid,
9987 .find_root_sec = nfs41_find_root_sec,
9988 .free_lock_state = nfs41_free_lock_state,
9989 .test_and_free_expired = nfs41_test_and_free_expired_stateid,
9990 .alloc_seqid = nfs_alloc_no_seqid,
9991 .session_trunk = nfs4_test_session_trunk,
9992 .call_sync_ops = &nfs41_call_sync_ops,
9993 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
9994 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
9995 .state_renewal_ops = &nfs41_state_renewal_ops,
9996 .mig_recovery_ops = &nfs41_mig_recovery_ops,
9997};
9998#endif
9999
10000#if defined(CONFIG_NFS_V4_2)
10001static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
10002 .minor_version = 2,
10003 .init_caps = NFS_CAP_READDIRPLUS
10004 | NFS_CAP_ATOMIC_OPEN
10005 | NFS_CAP_POSIX_LOCK
10006 | NFS_CAP_STATEID_NFSV41
10007 | NFS_CAP_ATOMIC_OPEN_V1
10008 | NFS_CAP_LGOPEN
10009 | NFS_CAP_ALLOCATE
10010 | NFS_CAP_COPY
10011 | NFS_CAP_OFFLOAD_CANCEL
10012 | NFS_CAP_DEALLOCATE
10013 | NFS_CAP_SEEK
10014 | NFS_CAP_LAYOUTSTATS
David Brazdil0f672f62019-12-10 10:32:29 +000010015 | NFS_CAP_CLONE
10016 | NFS_CAP_LAYOUTERROR,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010017 .init_client = nfs41_init_client,
10018 .shutdown_client = nfs41_shutdown_client,
10019 .match_stateid = nfs41_match_stateid,
10020 .find_root_sec = nfs41_find_root_sec,
10021 .free_lock_state = nfs41_free_lock_state,
10022 .call_sync_ops = &nfs41_call_sync_ops,
10023 .test_and_free_expired = nfs41_test_and_free_expired_stateid,
10024 .alloc_seqid = nfs_alloc_no_seqid,
10025 .session_trunk = nfs4_test_session_trunk,
10026 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
10027 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
10028 .state_renewal_ops = &nfs41_state_renewal_ops,
10029 .mig_recovery_ops = &nfs41_mig_recovery_ops,
10030};
10031#endif
10032
10033const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
10034 [0] = &nfs_v4_0_minor_ops,
10035#if defined(CONFIG_NFS_V4_1)
10036 [1] = &nfs_v4_1_minor_ops,
10037#endif
10038#if defined(CONFIG_NFS_V4_2)
10039 [2] = &nfs_v4_2_minor_ops,
10040#endif
10041};
10042
10043static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
10044{
10045 ssize_t error, error2;
10046
10047 error = generic_listxattr(dentry, list, size);
10048 if (error < 0)
10049 return error;
10050 if (list) {
10051 list += error;
10052 size -= error;
10053 }
10054
10055 error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, size);
10056 if (error2 < 0)
10057 return error2;
10058 return error + error2;
10059}
10060
10061static const struct inode_operations nfs4_dir_inode_operations = {
10062 .create = nfs_create,
10063 .lookup = nfs_lookup,
10064 .atomic_open = nfs_atomic_open,
10065 .link = nfs_link,
10066 .unlink = nfs_unlink,
10067 .symlink = nfs_symlink,
10068 .mkdir = nfs_mkdir,
10069 .rmdir = nfs_rmdir,
10070 .mknod = nfs_mknod,
10071 .rename = nfs_rename,
10072 .permission = nfs_permission,
10073 .getattr = nfs_getattr,
10074 .setattr = nfs_setattr,
10075 .listxattr = nfs4_listxattr,
10076};
10077
10078static const struct inode_operations nfs4_file_inode_operations = {
10079 .permission = nfs_permission,
10080 .getattr = nfs_getattr,
10081 .setattr = nfs_setattr,
10082 .listxattr = nfs4_listxattr,
10083};
10084
10085const struct nfs_rpc_ops nfs_v4_clientops = {
10086 .version = 4, /* protocol version */
10087 .dentry_ops = &nfs4_dentry_operations,
10088 .dir_inode_ops = &nfs4_dir_inode_operations,
10089 .file_inode_ops = &nfs4_file_inode_operations,
10090 .file_ops = &nfs4_file_operations,
10091 .getroot = nfs4_proc_get_root,
10092 .submount = nfs4_submount,
10093 .try_mount = nfs4_try_mount,
10094 .getattr = nfs4_proc_getattr,
10095 .setattr = nfs4_proc_setattr,
10096 .lookup = nfs4_proc_lookup,
10097 .lookupp = nfs4_proc_lookupp,
10098 .access = nfs4_proc_access,
10099 .readlink = nfs4_proc_readlink,
10100 .create = nfs4_proc_create,
10101 .remove = nfs4_proc_remove,
10102 .unlink_setup = nfs4_proc_unlink_setup,
10103 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
10104 .unlink_done = nfs4_proc_unlink_done,
10105 .rename_setup = nfs4_proc_rename_setup,
10106 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
10107 .rename_done = nfs4_proc_rename_done,
10108 .link = nfs4_proc_link,
10109 .symlink = nfs4_proc_symlink,
10110 .mkdir = nfs4_proc_mkdir,
10111 .rmdir = nfs4_proc_rmdir,
10112 .readdir = nfs4_proc_readdir,
10113 .mknod = nfs4_proc_mknod,
10114 .statfs = nfs4_proc_statfs,
10115 .fsinfo = nfs4_proc_fsinfo,
10116 .pathconf = nfs4_proc_pathconf,
10117 .set_capabilities = nfs4_server_capabilities,
10118 .decode_dirent = nfs4_decode_dirent,
10119 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare,
10120 .read_setup = nfs4_proc_read_setup,
10121 .read_done = nfs4_read_done,
10122 .write_setup = nfs4_proc_write_setup,
10123 .write_done = nfs4_write_done,
10124 .commit_setup = nfs4_proc_commit_setup,
10125 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
10126 .commit_done = nfs4_commit_done,
10127 .lock = nfs4_proc_lock,
10128 .clear_acl_cache = nfs4_zap_acl_attr,
10129 .close_context = nfs4_close_context,
10130 .open_context = nfs4_atomic_open,
10131 .have_delegation = nfs4_have_delegation,
10132 .alloc_client = nfs4_alloc_client,
10133 .init_client = nfs4_init_client,
10134 .free_client = nfs4_free_client,
10135 .create_server = nfs4_create_server,
10136 .clone_server = nfs_clone_server,
10137};
10138
10139static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
10140 .name = XATTR_NAME_NFSV4_ACL,
10141 .list = nfs4_xattr_list_nfs4_acl,
10142 .get = nfs4_xattr_get_nfs4_acl,
10143 .set = nfs4_xattr_set_nfs4_acl,
10144};
10145
10146const struct xattr_handler *nfs4_xattr_handlers[] = {
10147 &nfs4_xattr_nfs4_acl_handler,
10148#ifdef CONFIG_NFS_V4_SECURITY_LABEL
10149 &nfs4_xattr_nfs4_label_handler,
10150#endif
10151 NULL
10152};
10153
10154/*
10155 * Local variables:
10156 * c-basic-offset: 8
10157 * End:
10158 */