blob: 1aac8d38f887d51f7e05cf73f03d7038b9a69865 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
5 *
6 * Copyright (C) International Business Machines Corp., 2002,2010
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 * Jeremy Allison (jra@samba.org)
9 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
25#include <linux/backing-dev.h>
26#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
30#include <linux/writeback.h>
31#include <linux/task_io_accounting_ops.h>
32#include <linux/delay.h>
33#include <linux/mount.h>
34#include <linux/slab.h>
35#include <linux/swap.h>
David Brazdil0f672f62019-12-10 10:32:29 +000036#include <linux/mm.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000037#include <asm/div64.h>
38#include "cifsfs.h"
39#include "cifspdu.h"
40#include "cifsglob.h"
41#include "cifsproto.h"
42#include "cifs_unicode.h"
43#include "cifs_debug.h"
44#include "cifs_fs_sb.h"
45#include "fscache.h"
46#include "smbdirect.h"
47
48static inline int cifs_convert_flags(unsigned int flags)
49{
50 if ((flags & O_ACCMODE) == O_RDONLY)
51 return GENERIC_READ;
52 else if ((flags & O_ACCMODE) == O_WRONLY)
53 return GENERIC_WRITE;
54 else if ((flags & O_ACCMODE) == O_RDWR) {
55 /* GENERIC_ALL is too much permission to request
56 can cause unnecessary access denied on create */
57 /* return GENERIC_ALL; */
58 return (GENERIC_READ | GENERIC_WRITE);
59 }
60
61 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
62 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
63 FILE_READ_DATA);
64}
65
66static u32 cifs_posix_convert_flags(unsigned int flags)
67{
68 u32 posix_flags = 0;
69
70 if ((flags & O_ACCMODE) == O_RDONLY)
71 posix_flags = SMB_O_RDONLY;
72 else if ((flags & O_ACCMODE) == O_WRONLY)
73 posix_flags = SMB_O_WRONLY;
74 else if ((flags & O_ACCMODE) == O_RDWR)
75 posix_flags = SMB_O_RDWR;
76
77 if (flags & O_CREAT) {
78 posix_flags |= SMB_O_CREAT;
79 if (flags & O_EXCL)
80 posix_flags |= SMB_O_EXCL;
81 } else if (flags & O_EXCL)
82 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
83 current->comm, current->tgid);
84
85 if (flags & O_TRUNC)
86 posix_flags |= SMB_O_TRUNC;
87 /* be safe and imply O_SYNC for O_DSYNC */
88 if (flags & O_DSYNC)
89 posix_flags |= SMB_O_SYNC;
90 if (flags & O_DIRECTORY)
91 posix_flags |= SMB_O_DIRECTORY;
92 if (flags & O_NOFOLLOW)
93 posix_flags |= SMB_O_NOFOLLOW;
94 if (flags & O_DIRECT)
95 posix_flags |= SMB_O_DIRECT;
96
97 return posix_flags;
98}
99
100static inline int cifs_get_disposition(unsigned int flags)
101{
102 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
103 return FILE_CREATE;
104 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
105 return FILE_OVERWRITE_IF;
106 else if ((flags & O_CREAT) == O_CREAT)
107 return FILE_OPEN_IF;
108 else if ((flags & O_TRUNC) == O_TRUNC)
109 return FILE_OVERWRITE;
110 else
111 return FILE_OPEN;
112}
113
114int cifs_posix_open(char *full_path, struct inode **pinode,
115 struct super_block *sb, int mode, unsigned int f_flags,
116 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
117{
118 int rc;
119 FILE_UNIX_BASIC_INFO *presp_data;
120 __u32 posix_flags = 0;
121 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
122 struct cifs_fattr fattr;
123 struct tcon_link *tlink;
124 struct cifs_tcon *tcon;
125
126 cifs_dbg(FYI, "posix open %s\n", full_path);
127
128 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
129 if (presp_data == NULL)
130 return -ENOMEM;
131
132 tlink = cifs_sb_tlink(cifs_sb);
133 if (IS_ERR(tlink)) {
134 rc = PTR_ERR(tlink);
135 goto posix_open_ret;
136 }
137
138 tcon = tlink_tcon(tlink);
139 mode &= ~current_umask();
140
141 posix_flags = cifs_posix_convert_flags(f_flags);
142 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
143 poplock, full_path, cifs_sb->local_nls,
144 cifs_remap(cifs_sb));
145 cifs_put_tlink(tlink);
146
147 if (rc)
148 goto posix_open_ret;
149
150 if (presp_data->Type == cpu_to_le32(-1))
151 goto posix_open_ret; /* open ok, caller does qpathinfo */
152
153 if (!pinode)
154 goto posix_open_ret; /* caller does not need info */
155
156 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
157
158 /* get new inode and set it up */
159 if (*pinode == NULL) {
160 cifs_fill_uniqueid(sb, &fattr);
161 *pinode = cifs_iget(sb, &fattr);
162 if (!*pinode) {
163 rc = -ENOMEM;
164 goto posix_open_ret;
165 }
166 } else {
Olivier Deprez0e641232021-09-23 10:07:05 +0200167 cifs_revalidate_mapping(*pinode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000168 cifs_fattr_to_inode(*pinode, &fattr);
169 }
170
171posix_open_ret:
172 kfree(presp_data);
173 return rc;
174}
175
176static int
177cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
178 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
179 struct cifs_fid *fid, unsigned int xid)
180{
181 int rc;
182 int desired_access;
183 int disposition;
184 int create_options = CREATE_NOT_DIR;
185 FILE_ALL_INFO *buf;
186 struct TCP_Server_Info *server = tcon->ses->server;
187 struct cifs_open_parms oparms;
188
189 if (!server->ops->open)
190 return -ENOSYS;
191
192 desired_access = cifs_convert_flags(f_flags);
193
194/*********************************************************************
195 * open flag mapping table:
196 *
197 * POSIX Flag CIFS Disposition
198 * ---------- ----------------
199 * O_CREAT FILE_OPEN_IF
200 * O_CREAT | O_EXCL FILE_CREATE
201 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
202 * O_TRUNC FILE_OVERWRITE
203 * none of the above FILE_OPEN
204 *
205 * Note that there is not a direct match between disposition
206 * FILE_SUPERSEDE (ie create whether or not file exists although
207 * O_CREAT | O_TRUNC is similar but truncates the existing
208 * file rather than creating a new file as FILE_SUPERSEDE does
209 * (which uses the attributes / metadata passed in on open call)
210 *?
211 *? O_SYNC is a reasonable match to CIFS writethrough flag
212 *? and the read write flags match reasonably. O_LARGEFILE
213 *? is irrelevant because largefile support is always used
214 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
215 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
216 *********************************************************************/
217
218 disposition = cifs_get_disposition(f_flags);
219
220 /* BB pass O_SYNC flag through on file attributes .. BB */
221
222 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
223 if (!buf)
224 return -ENOMEM;
225
226 if (backup_cred(cifs_sb))
227 create_options |= CREATE_OPEN_BACKUP_INTENT;
228
229 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
230 if (f_flags & O_SYNC)
231 create_options |= CREATE_WRITE_THROUGH;
232
233 if (f_flags & O_DIRECT)
234 create_options |= CREATE_NO_BUFFER;
235
236 oparms.tcon = tcon;
237 oparms.cifs_sb = cifs_sb;
238 oparms.desired_access = desired_access;
239 oparms.create_options = create_options;
240 oparms.disposition = disposition;
241 oparms.path = full_path;
242 oparms.fid = fid;
243 oparms.reconnect = false;
244
245 rc = server->ops->open(xid, &oparms, oplock, buf);
246
247 if (rc)
248 goto out;
249
250 if (tcon->unix_ext)
251 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
252 xid);
253 else
254 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
255 xid, fid);
256
David Brazdil0f672f62019-12-10 10:32:29 +0000257 if (rc) {
258 server->ops->close(xid, tcon, fid);
259 if (rc == -ESTALE)
260 rc = -EOPENSTALE;
261 }
262
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000263out:
264 kfree(buf);
265 return rc;
266}
267
268static bool
269cifs_has_mand_locks(struct cifsInodeInfo *cinode)
270{
271 struct cifs_fid_locks *cur;
272 bool has_locks = false;
273
274 down_read(&cinode->lock_sem);
275 list_for_each_entry(cur, &cinode->llist, llist) {
276 if (!list_empty(&cur->locks)) {
277 has_locks = true;
278 break;
279 }
280 }
281 up_read(&cinode->lock_sem);
282 return has_locks;
283}
284
David Brazdil0f672f62019-12-10 10:32:29 +0000285void
286cifs_down_write(struct rw_semaphore *sem)
287{
288 while (!down_write_trylock(sem))
289 msleep(10);
290}
291
Olivier Deprez0e641232021-09-23 10:07:05 +0200292static void cifsFileInfo_put_work(struct work_struct *work);
293
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000294struct cifsFileInfo *
295cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
296 struct tcon_link *tlink, __u32 oplock)
297{
298 struct dentry *dentry = file_dentry(file);
299 struct inode *inode = d_inode(dentry);
300 struct cifsInodeInfo *cinode = CIFS_I(inode);
301 struct cifsFileInfo *cfile;
302 struct cifs_fid_locks *fdlocks;
303 struct cifs_tcon *tcon = tlink_tcon(tlink);
304 struct TCP_Server_Info *server = tcon->ses->server;
305
306 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
307 if (cfile == NULL)
308 return cfile;
309
310 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
311 if (!fdlocks) {
312 kfree(cfile);
313 return NULL;
314 }
315
316 INIT_LIST_HEAD(&fdlocks->locks);
317 fdlocks->cfile = cfile;
318 cfile->llist = fdlocks;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000319
320 cfile->count = 1;
321 cfile->pid = current->tgid;
322 cfile->uid = current_fsuid();
323 cfile->dentry = dget(dentry);
324 cfile->f_flags = file->f_flags;
325 cfile->invalidHandle = false;
326 cfile->tlink = cifs_get_tlink(tlink);
327 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Olivier Deprez0e641232021-09-23 10:07:05 +0200328 INIT_WORK(&cfile->put, cifsFileInfo_put_work);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000329 mutex_init(&cfile->fh_mutex);
330 spin_lock_init(&cfile->file_info_lock);
331
332 cifs_sb_active(inode->i_sb);
333
334 /*
335 * If the server returned a read oplock and we have mandatory brlocks,
336 * set oplock level to None.
337 */
338 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
339 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
340 oplock = 0;
341 }
342
Olivier Deprez0e641232021-09-23 10:07:05 +0200343 cifs_down_write(&cinode->lock_sem);
344 list_add(&fdlocks->llist, &cinode->llist);
345 up_write(&cinode->lock_sem);
346
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000347 spin_lock(&tcon->open_file_lock);
348 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
349 oplock = fid->pending_open->oplock;
350 list_del(&fid->pending_open->olist);
351
352 fid->purge_cache = false;
353 server->ops->set_fid(cfile, fid, oplock);
354
355 list_add(&cfile->tlist, &tcon->openFileList);
David Brazdil0f672f62019-12-10 10:32:29 +0000356 atomic_inc(&tcon->num_local_opens);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000357
358 /* if readable file instance put first in list*/
David Brazdil0f672f62019-12-10 10:32:29 +0000359 spin_lock(&cinode->open_file_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000360 if (file->f_mode & FMODE_READ)
361 list_add(&cfile->flist, &cinode->openFileList);
362 else
363 list_add_tail(&cfile->flist, &cinode->openFileList);
David Brazdil0f672f62019-12-10 10:32:29 +0000364 spin_unlock(&cinode->open_file_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000365 spin_unlock(&tcon->open_file_lock);
366
367 if (fid->purge_cache)
368 cifs_zap_mapping(inode);
369
370 file->private_data = cfile;
371 return cfile;
372}
373
374struct cifsFileInfo *
375cifsFileInfo_get(struct cifsFileInfo *cifs_file)
376{
377 spin_lock(&cifs_file->file_info_lock);
378 cifsFileInfo_get_locked(cifs_file);
379 spin_unlock(&cifs_file->file_info_lock);
380 return cifs_file;
381}
382
Olivier Deprez0e641232021-09-23 10:07:05 +0200383static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
384{
385 struct inode *inode = d_inode(cifs_file->dentry);
386 struct cifsInodeInfo *cifsi = CIFS_I(inode);
387 struct cifsLockInfo *li, *tmp;
388 struct super_block *sb = inode->i_sb;
389
390 /*
391 * Delete any outstanding lock records. We'll lose them when the file
392 * is closed anyway.
393 */
394 cifs_down_write(&cifsi->lock_sem);
395 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
396 list_del(&li->llist);
397 cifs_del_lock_waiters(li);
398 kfree(li);
399 }
400 list_del(&cifs_file->llist->llist);
401 kfree(cifs_file->llist);
402 up_write(&cifsi->lock_sem);
403
404 cifs_put_tlink(cifs_file->tlink);
405 dput(cifs_file->dentry);
406 cifs_sb_deactive(sb);
407 kfree(cifs_file);
408}
409
410static void cifsFileInfo_put_work(struct work_struct *work)
411{
412 struct cifsFileInfo *cifs_file = container_of(work,
413 struct cifsFileInfo, put);
414
415 cifsFileInfo_put_final(cifs_file);
416}
417
David Brazdil0f672f62019-12-10 10:32:29 +0000418/**
419 * cifsFileInfo_put - release a reference of file priv data
420 *
421 * Always potentially wait for oplock handler. See _cifsFileInfo_put().
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000422 */
423void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
424{
Olivier Deprez0e641232021-09-23 10:07:05 +0200425 _cifsFileInfo_put(cifs_file, true, true);
David Brazdil0f672f62019-12-10 10:32:29 +0000426}
427
428/**
429 * _cifsFileInfo_put - release a reference of file priv data
430 *
431 * This may involve closing the filehandle @cifs_file out on the
Olivier Deprez0e641232021-09-23 10:07:05 +0200432 * server. Must be called without holding tcon->open_file_lock,
433 * cinode->open_file_lock and cifs_file->file_info_lock.
David Brazdil0f672f62019-12-10 10:32:29 +0000434 *
435 * If @wait_for_oplock_handler is true and we are releasing the last
436 * reference, wait for any running oplock break handler of the file
437 * and cancel any pending one. If calling this function from the
438 * oplock break handler, you need to pass false.
439 *
440 */
Olivier Deprez0e641232021-09-23 10:07:05 +0200441void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
442 bool wait_oplock_handler, bool offload)
David Brazdil0f672f62019-12-10 10:32:29 +0000443{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000444 struct inode *inode = d_inode(cifs_file->dentry);
445 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
446 struct TCP_Server_Info *server = tcon->ses->server;
447 struct cifsInodeInfo *cifsi = CIFS_I(inode);
448 struct super_block *sb = inode->i_sb;
449 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000450 struct cifs_fid fid;
451 struct cifs_pending_open open;
452 bool oplock_break_cancelled;
453
454 spin_lock(&tcon->open_file_lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000455 spin_lock(&cifsi->open_file_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000456 spin_lock(&cifs_file->file_info_lock);
457 if (--cifs_file->count > 0) {
458 spin_unlock(&cifs_file->file_info_lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000459 spin_unlock(&cifsi->open_file_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000460 spin_unlock(&tcon->open_file_lock);
461 return;
462 }
463 spin_unlock(&cifs_file->file_info_lock);
464
465 if (server->ops->get_lease_key)
466 server->ops->get_lease_key(inode, &fid);
467
468 /* store open in pending opens to make sure we don't miss lease break */
469 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
470
471 /* remove it from the lists */
472 list_del(&cifs_file->flist);
473 list_del(&cifs_file->tlist);
David Brazdil0f672f62019-12-10 10:32:29 +0000474 atomic_dec(&tcon->num_local_opens);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000475
476 if (list_empty(&cifsi->openFileList)) {
477 cifs_dbg(FYI, "closing last open instance for inode %p\n",
478 d_inode(cifs_file->dentry));
479 /*
480 * In strict cache mode we need invalidate mapping on the last
481 * close because it may cause a error when we open this file
482 * again and get at least level II oplock.
483 */
484 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
485 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
486 cifs_set_oplock_level(cifsi, 0);
487 }
488
David Brazdil0f672f62019-12-10 10:32:29 +0000489 spin_unlock(&cifsi->open_file_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000490 spin_unlock(&tcon->open_file_lock);
491
David Brazdil0f672f62019-12-10 10:32:29 +0000492 oplock_break_cancelled = wait_oplock_handler ?
493 cancel_work_sync(&cifs_file->oplock_break) : false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000494
495 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
496 struct TCP_Server_Info *server = tcon->ses->server;
497 unsigned int xid;
498
499 xid = get_xid();
500 if (server->ops->close)
501 server->ops->close(xid, tcon, &cifs_file->fid);
502 _free_xid(xid);
503 }
504
505 if (oplock_break_cancelled)
506 cifs_done_oplock_break(cifsi);
507
508 cifs_del_pending_open(&open);
509
Olivier Deprez0e641232021-09-23 10:07:05 +0200510 if (offload)
511 queue_work(fileinfo_put_wq, &cifs_file->put);
512 else
513 cifsFileInfo_put_final(cifs_file);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000514}
515
516int cifs_open(struct inode *inode, struct file *file)
517
518{
519 int rc = -EACCES;
520 unsigned int xid;
521 __u32 oplock;
522 struct cifs_sb_info *cifs_sb;
523 struct TCP_Server_Info *server;
524 struct cifs_tcon *tcon;
525 struct tcon_link *tlink;
526 struct cifsFileInfo *cfile = NULL;
527 char *full_path = NULL;
528 bool posix_open_ok = false;
529 struct cifs_fid fid;
530 struct cifs_pending_open open;
531
532 xid = get_xid();
533
534 cifs_sb = CIFS_SB(inode->i_sb);
535 tlink = cifs_sb_tlink(cifs_sb);
536 if (IS_ERR(tlink)) {
537 free_xid(xid);
538 return PTR_ERR(tlink);
539 }
540 tcon = tlink_tcon(tlink);
541 server = tcon->ses->server;
542
543 full_path = build_path_from_dentry(file_dentry(file));
544 if (full_path == NULL) {
545 rc = -ENOMEM;
546 goto out;
547 }
548
549 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
550 inode, file->f_flags, full_path);
551
552 if (file->f_flags & O_DIRECT &&
553 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
554 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
555 file->f_op = &cifs_file_direct_nobrl_ops;
556 else
557 file->f_op = &cifs_file_direct_ops;
558 }
559
560 if (server->oplocks)
561 oplock = REQ_OPLOCK;
562 else
563 oplock = 0;
564
565 if (!tcon->broken_posix_open && tcon->unix_ext &&
566 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
567 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
568 /* can not refresh inode info since size could be stale */
569 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
570 cifs_sb->mnt_file_mode /* ignored */,
571 file->f_flags, &oplock, &fid.netfid, xid);
572 if (rc == 0) {
573 cifs_dbg(FYI, "posix open succeeded\n");
574 posix_open_ok = true;
575 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
576 if (tcon->ses->serverNOS)
577 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
578 tcon->ses->serverName,
579 tcon->ses->serverNOS);
580 tcon->broken_posix_open = true;
581 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
582 (rc != -EOPNOTSUPP)) /* path not found or net err */
583 goto out;
584 /*
585 * Else fallthrough to retry open the old way on network i/o
586 * or DFS errors.
587 */
588 }
589
590 if (server->ops->get_lease_key)
591 server->ops->get_lease_key(inode, &fid);
592
593 cifs_add_pending_open(&fid, tlink, &open);
594
595 if (!posix_open_ok) {
596 if (server->ops->get_lease_key)
597 server->ops->get_lease_key(inode, &fid);
598
599 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
600 file->f_flags, &oplock, &fid, xid);
601 if (rc) {
602 cifs_del_pending_open(&open);
603 goto out;
604 }
605 }
606
607 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
608 if (cfile == NULL) {
609 if (server->ops->close)
610 server->ops->close(xid, tcon, &fid);
611 cifs_del_pending_open(&open);
612 rc = -ENOMEM;
613 goto out;
614 }
615
616 cifs_fscache_set_inode_cookie(inode, file);
617
618 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
619 /*
620 * Time to set mode which we can not set earlier due to
621 * problems creating new read-only files.
622 */
623 struct cifs_unix_set_info_args args = {
624 .mode = inode->i_mode,
625 .uid = INVALID_UID, /* no change */
626 .gid = INVALID_GID, /* no change */
627 .ctime = NO_CHANGE_64,
628 .atime = NO_CHANGE_64,
629 .mtime = NO_CHANGE_64,
630 .device = 0,
631 };
632 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
633 cfile->pid);
634 }
635
636out:
637 kfree(full_path);
638 free_xid(xid);
639 cifs_put_tlink(tlink);
640 return rc;
641}
642
643static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
644
645/*
646 * Try to reacquire byte range locks that were released when session
647 * to server was lost.
648 */
649static int
650cifs_relock_file(struct cifsFileInfo *cfile)
651{
652 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
653 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
654 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
655 int rc = 0;
656
657 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
658 if (cinode->can_cache_brlcks) {
659 /* can cache locks - no need to relock */
660 up_read(&cinode->lock_sem);
661 return rc;
662 }
663
664 if (cap_unix(tcon->ses) &&
665 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
666 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
667 rc = cifs_push_posix_locks(cfile);
668 else
669 rc = tcon->ses->server->ops->push_mand_locks(cfile);
670
671 up_read(&cinode->lock_sem);
672 return rc;
673}
674
675static int
676cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
677{
678 int rc = -EACCES;
679 unsigned int xid;
680 __u32 oplock;
681 struct cifs_sb_info *cifs_sb;
682 struct cifs_tcon *tcon;
683 struct TCP_Server_Info *server;
684 struct cifsInodeInfo *cinode;
685 struct inode *inode;
686 char *full_path = NULL;
687 int desired_access;
688 int disposition = FILE_OPEN;
689 int create_options = CREATE_NOT_DIR;
690 struct cifs_open_parms oparms;
691
692 xid = get_xid();
693 mutex_lock(&cfile->fh_mutex);
694 if (!cfile->invalidHandle) {
695 mutex_unlock(&cfile->fh_mutex);
696 rc = 0;
697 free_xid(xid);
698 return rc;
699 }
700
701 inode = d_inode(cfile->dentry);
702 cifs_sb = CIFS_SB(inode->i_sb);
703 tcon = tlink_tcon(cfile->tlink);
704 server = tcon->ses->server;
705
706 /*
707 * Can not grab rename sem here because various ops, including those
708 * that already have the rename sem can end up causing writepage to get
709 * called and if the server was down that means we end up here, and we
710 * can never tell if the caller already has the rename_sem.
711 */
712 full_path = build_path_from_dentry(cfile->dentry);
713 if (full_path == NULL) {
714 rc = -ENOMEM;
715 mutex_unlock(&cfile->fh_mutex);
716 free_xid(xid);
717 return rc;
718 }
719
720 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
721 inode, cfile->f_flags, full_path);
722
723 if (tcon->ses->server->oplocks)
724 oplock = REQ_OPLOCK;
725 else
726 oplock = 0;
727
728 if (tcon->unix_ext && cap_unix(tcon->ses) &&
729 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
730 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
731 /*
732 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
733 * original open. Must mask them off for a reopen.
734 */
735 unsigned int oflags = cfile->f_flags &
736 ~(O_CREAT | O_EXCL | O_TRUNC);
737
738 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
739 cifs_sb->mnt_file_mode /* ignored */,
740 oflags, &oplock, &cfile->fid.netfid, xid);
741 if (rc == 0) {
742 cifs_dbg(FYI, "posix reopen succeeded\n");
743 oparms.reconnect = true;
744 goto reopen_success;
745 }
746 /*
747 * fallthrough to retry open the old way on errors, especially
748 * in the reconnect path it is important to retry hard
749 */
750 }
751
752 desired_access = cifs_convert_flags(cfile->f_flags);
753
754 if (backup_cred(cifs_sb))
755 create_options |= CREATE_OPEN_BACKUP_INTENT;
756
Olivier Deprez0e641232021-09-23 10:07:05 +0200757 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
758 if (cfile->f_flags & O_SYNC)
759 create_options |= CREATE_WRITE_THROUGH;
760
761 if (cfile->f_flags & O_DIRECT)
762 create_options |= CREATE_NO_BUFFER;
763
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000764 if (server->ops->get_lease_key)
765 server->ops->get_lease_key(inode, &cfile->fid);
766
767 oparms.tcon = tcon;
768 oparms.cifs_sb = cifs_sb;
769 oparms.desired_access = desired_access;
770 oparms.create_options = create_options;
771 oparms.disposition = disposition;
772 oparms.path = full_path;
773 oparms.fid = &cfile->fid;
774 oparms.reconnect = true;
775
776 /*
777 * Can not refresh inode by passing in file_info buf to be returned by
778 * ops->open and then calling get_inode_info with returned buf since
779 * file might have write behind data that needs to be flushed and server
780 * version of file size can be stale. If we knew for sure that inode was
781 * not dirty locally we could do this.
782 */
783 rc = server->ops->open(xid, &oparms, &oplock, NULL);
784 if (rc == -ENOENT && oparms.reconnect == false) {
785 /* durable handle timeout is expired - open the file again */
786 rc = server->ops->open(xid, &oparms, &oplock, NULL);
787 /* indicate that we need to relock the file */
788 oparms.reconnect = true;
789 }
790
791 if (rc) {
792 mutex_unlock(&cfile->fh_mutex);
793 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
794 cifs_dbg(FYI, "oplock: %d\n", oplock);
795 goto reopen_error_exit;
796 }
797
798reopen_success:
799 cfile->invalidHandle = false;
800 mutex_unlock(&cfile->fh_mutex);
801 cinode = CIFS_I(inode);
802
803 if (can_flush) {
804 rc = filemap_write_and_wait(inode->i_mapping);
David Brazdil0f672f62019-12-10 10:32:29 +0000805 if (!is_interrupt_error(rc))
806 mapping_set_error(inode->i_mapping, rc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000807
808 if (tcon->unix_ext)
809 rc = cifs_get_inode_info_unix(&inode, full_path,
810 inode->i_sb, xid);
811 else
812 rc = cifs_get_inode_info(&inode, full_path, NULL,
813 inode->i_sb, xid, NULL);
814 }
815 /*
816 * Else we are writing out data to server already and could deadlock if
817 * we tried to flush data, and since we do not know if we have data that
818 * would invalidate the current end of file on the server we can not go
819 * to the server to get the new inode info.
820 */
821
822 /*
823 * If the server returned a read oplock and we have mandatory brlocks,
824 * set oplock level to None.
825 */
826 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
827 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
828 oplock = 0;
829 }
830
831 server->ops->set_fid(cfile, &cfile->fid, oplock);
832 if (oparms.reconnect)
833 cifs_relock_file(cfile);
834
835reopen_error_exit:
836 kfree(full_path);
837 free_xid(xid);
838 return rc;
839}
840
841int cifs_close(struct inode *inode, struct file *file)
842{
843 if (file->private_data != NULL) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200844 _cifsFileInfo_put(file->private_data, true, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000845 file->private_data = NULL;
846 }
847
848 /* return code from the ->release op is always ignored */
849 return 0;
850}
851
852void
853cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
854{
855 struct cifsFileInfo *open_file;
856 struct list_head *tmp;
857 struct list_head *tmp1;
858 struct list_head tmp_list;
859
860 if (!tcon->use_persistent || !tcon->need_reopen_files)
861 return;
862
863 tcon->need_reopen_files = false;
864
865 cifs_dbg(FYI, "Reopen persistent handles");
866 INIT_LIST_HEAD(&tmp_list);
867
868 /* list all files open on tree connection, reopen resilient handles */
869 spin_lock(&tcon->open_file_lock);
870 list_for_each(tmp, &tcon->openFileList) {
871 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
872 if (!open_file->invalidHandle)
873 continue;
874 cifsFileInfo_get(open_file);
875 list_add_tail(&open_file->rlist, &tmp_list);
876 }
877 spin_unlock(&tcon->open_file_lock);
878
879 list_for_each_safe(tmp, tmp1, &tmp_list) {
880 open_file = list_entry(tmp, struct cifsFileInfo, rlist);
881 if (cifs_reopen_file(open_file, false /* do not flush */))
882 tcon->need_reopen_files = true;
883 list_del_init(&open_file->rlist);
884 cifsFileInfo_put(open_file);
885 }
886}
887
888int cifs_closedir(struct inode *inode, struct file *file)
889{
890 int rc = 0;
891 unsigned int xid;
892 struct cifsFileInfo *cfile = file->private_data;
893 struct cifs_tcon *tcon;
894 struct TCP_Server_Info *server;
895 char *buf;
896
897 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
898
899 if (cfile == NULL)
900 return rc;
901
902 xid = get_xid();
903 tcon = tlink_tcon(cfile->tlink);
904 server = tcon->ses->server;
905
906 cifs_dbg(FYI, "Freeing private data in close dir\n");
907 spin_lock(&cfile->file_info_lock);
908 if (server->ops->dir_needs_close(cfile)) {
909 cfile->invalidHandle = true;
910 spin_unlock(&cfile->file_info_lock);
911 if (server->ops->close_dir)
912 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
913 else
914 rc = -ENOSYS;
915 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
916 /* not much we can do if it fails anyway, ignore rc */
917 rc = 0;
918 } else
919 spin_unlock(&cfile->file_info_lock);
920
921 buf = cfile->srch_inf.ntwrk_buf_start;
922 if (buf) {
923 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
924 cfile->srch_inf.ntwrk_buf_start = NULL;
925 if (cfile->srch_inf.smallBuf)
926 cifs_small_buf_release(buf);
927 else
928 cifs_buf_release(buf);
929 }
930
931 cifs_put_tlink(cfile->tlink);
932 kfree(file->private_data);
933 file->private_data = NULL;
934 /* BB can we lock the filestruct while this is going on? */
935 free_xid(xid);
936 return rc;
937}
938
939static struct cifsLockInfo *
David Brazdil0f672f62019-12-10 10:32:29 +0000940cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000941{
942 struct cifsLockInfo *lock =
943 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
944 if (!lock)
945 return lock;
946 lock->offset = offset;
947 lock->length = length;
948 lock->type = type;
949 lock->pid = current->tgid;
David Brazdil0f672f62019-12-10 10:32:29 +0000950 lock->flags = flags;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000951 INIT_LIST_HEAD(&lock->blist);
952 init_waitqueue_head(&lock->block_q);
953 return lock;
954}
955
956void
957cifs_del_lock_waiters(struct cifsLockInfo *lock)
958{
959 struct cifsLockInfo *li, *tmp;
960 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
961 list_del_init(&li->blist);
962 wake_up(&li->block_q);
963 }
964}
965
966#define CIFS_LOCK_OP 0
967#define CIFS_READ_OP 1
968#define CIFS_WRITE_OP 2
969
970/* @rw_check : 0 - no op, 1 - read, 2 - write */
971static bool
972cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
David Brazdil0f672f62019-12-10 10:32:29 +0000973 __u64 length, __u8 type, __u16 flags,
974 struct cifsFileInfo *cfile,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000975 struct cifsLockInfo **conf_lock, int rw_check)
976{
977 struct cifsLockInfo *li;
978 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
979 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
980
981 list_for_each_entry(li, &fdlocks->locks, llist) {
982 if (offset + length <= li->offset ||
983 offset >= li->offset + li->length)
984 continue;
985 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
986 server->ops->compare_fids(cfile, cur_cfile)) {
987 /* shared lock prevents write op through the same fid */
988 if (!(li->type & server->vals->shared_lock_type) ||
989 rw_check != CIFS_WRITE_OP)
990 continue;
991 }
992 if ((type & server->vals->shared_lock_type) &&
993 ((server->ops->compare_fids(cfile, cur_cfile) &&
994 current->tgid == li->pid) || type == li->type))
995 continue;
David Brazdil0f672f62019-12-10 10:32:29 +0000996 if (rw_check == CIFS_LOCK_OP &&
997 (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
998 server->ops->compare_fids(cfile, cur_cfile))
999 continue;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001000 if (conf_lock)
1001 *conf_lock = li;
1002 return true;
1003 }
1004 return false;
1005}
1006
1007bool
1008cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
David Brazdil0f672f62019-12-10 10:32:29 +00001009 __u8 type, __u16 flags,
1010 struct cifsLockInfo **conf_lock, int rw_check)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001011{
1012 bool rc = false;
1013 struct cifs_fid_locks *cur;
1014 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1015
1016 list_for_each_entry(cur, &cinode->llist, llist) {
1017 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
David Brazdil0f672f62019-12-10 10:32:29 +00001018 flags, cfile, conf_lock,
1019 rw_check);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001020 if (rc)
1021 break;
1022 }
1023
1024 return rc;
1025}
1026
1027/*
1028 * Check if there is another lock that prevents us to set the lock (mandatory
1029 * style). If such a lock exists, update the flock structure with its
1030 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1031 * or leave it the same if we can't. Returns 0 if we don't need to request to
1032 * the server or 1 otherwise.
1033 */
1034static int
1035cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1036 __u8 type, struct file_lock *flock)
1037{
1038 int rc = 0;
1039 struct cifsLockInfo *conf_lock;
1040 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1041 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1042 bool exist;
1043
1044 down_read(&cinode->lock_sem);
1045
1046 exist = cifs_find_lock_conflict(cfile, offset, length, type,
David Brazdil0f672f62019-12-10 10:32:29 +00001047 flock->fl_flags, &conf_lock,
1048 CIFS_LOCK_OP);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001049 if (exist) {
1050 flock->fl_start = conf_lock->offset;
1051 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1052 flock->fl_pid = conf_lock->pid;
1053 if (conf_lock->type & server->vals->shared_lock_type)
1054 flock->fl_type = F_RDLCK;
1055 else
1056 flock->fl_type = F_WRLCK;
1057 } else if (!cinode->can_cache_brlcks)
1058 rc = 1;
1059 else
1060 flock->fl_type = F_UNLCK;
1061
1062 up_read(&cinode->lock_sem);
1063 return rc;
1064}
1065
1066static void
1067cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1068{
1069 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
David Brazdil0f672f62019-12-10 10:32:29 +00001070 cifs_down_write(&cinode->lock_sem);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001071 list_add_tail(&lock->llist, &cfile->llist->locks);
1072 up_write(&cinode->lock_sem);
1073}
1074
1075/*
1076 * Set the byte-range lock (mandatory style). Returns:
1077 * 1) 0, if we set the lock and don't need to request to the server;
1078 * 2) 1, if no locks prevent us but we need to request to the server;
David Brazdil0f672f62019-12-10 10:32:29 +00001079 * 3) -EACCES, if there is a lock that prevents us and wait is false.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001080 */
1081static int
1082cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1083 bool wait)
1084{
1085 struct cifsLockInfo *conf_lock;
1086 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1087 bool exist;
1088 int rc = 0;
1089
1090try_again:
1091 exist = false;
David Brazdil0f672f62019-12-10 10:32:29 +00001092 cifs_down_write(&cinode->lock_sem);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001093
1094 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
David Brazdil0f672f62019-12-10 10:32:29 +00001095 lock->type, lock->flags, &conf_lock,
1096 CIFS_LOCK_OP);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001097 if (!exist && cinode->can_cache_brlcks) {
1098 list_add_tail(&lock->llist, &cfile->llist->locks);
1099 up_write(&cinode->lock_sem);
1100 return rc;
1101 }
1102
1103 if (!exist)
1104 rc = 1;
1105 else if (!wait)
1106 rc = -EACCES;
1107 else {
1108 list_add_tail(&lock->blist, &conf_lock->blist);
1109 up_write(&cinode->lock_sem);
1110 rc = wait_event_interruptible(lock->block_q,
1111 (lock->blist.prev == &lock->blist) &&
1112 (lock->blist.next == &lock->blist));
1113 if (!rc)
1114 goto try_again;
David Brazdil0f672f62019-12-10 10:32:29 +00001115 cifs_down_write(&cinode->lock_sem);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001116 list_del_init(&lock->blist);
1117 }
1118
1119 up_write(&cinode->lock_sem);
1120 return rc;
1121}
1122
1123/*
1124 * Check if there is another lock that prevents us to set the lock (posix
1125 * style). If such a lock exists, update the flock structure with its
1126 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1127 * or leave it the same if we can't. Returns 0 if we don't need to request to
1128 * the server or 1 otherwise.
1129 */
1130static int
1131cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1132{
1133 int rc = 0;
1134 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1135 unsigned char saved_type = flock->fl_type;
1136
1137 if ((flock->fl_flags & FL_POSIX) == 0)
1138 return 1;
1139
1140 down_read(&cinode->lock_sem);
1141 posix_test_lock(file, flock);
1142
1143 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1144 flock->fl_type = saved_type;
1145 rc = 1;
1146 }
1147
1148 up_read(&cinode->lock_sem);
1149 return rc;
1150}
1151
1152/*
1153 * Set the byte-range lock (posix style). Returns:
1154 * 1) 0, if we set the lock and don't need to request to the server;
1155 * 2) 1, if we need to request to the server;
1156 * 3) <0, if the error occurs while setting the lock.
1157 */
1158static int
1159cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1160{
1161 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1162 int rc = 1;
1163
1164 if ((flock->fl_flags & FL_POSIX) == 0)
1165 return rc;
1166
1167try_again:
David Brazdil0f672f62019-12-10 10:32:29 +00001168 cifs_down_write(&cinode->lock_sem);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001169 if (!cinode->can_cache_brlcks) {
1170 up_write(&cinode->lock_sem);
1171 return rc;
1172 }
1173
1174 rc = posix_lock_file(file, flock, NULL);
1175 up_write(&cinode->lock_sem);
1176 if (rc == FILE_LOCK_DEFERRED) {
Olivier Deprez0e641232021-09-23 10:07:05 +02001177 rc = wait_event_interruptible(flock->fl_wait,
1178 list_empty(&flock->fl_blocked_member));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001179 if (!rc)
1180 goto try_again;
David Brazdil0f672f62019-12-10 10:32:29 +00001181 locks_delete_block(flock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001182 }
1183 return rc;
1184}
1185
1186int
1187cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1188{
1189 unsigned int xid;
1190 int rc = 0, stored_rc;
1191 struct cifsLockInfo *li, *tmp;
1192 struct cifs_tcon *tcon;
1193 unsigned int num, max_num, max_buf;
1194 LOCKING_ANDX_RANGE *buf, *cur;
1195 static const int types[] = {
1196 LOCKING_ANDX_LARGE_FILES,
1197 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1198 };
1199 int i;
1200
1201 xid = get_xid();
1202 tcon = tlink_tcon(cfile->tlink);
1203
1204 /*
1205 * Accessing maxBuf is racy with cifs_reconnect - need to store value
David Brazdil0f672f62019-12-10 10:32:29 +00001206 * and check it before using.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001207 */
1208 max_buf = tcon->ses->server->maxBuf;
David Brazdil0f672f62019-12-10 10:32:29 +00001209 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001210 free_xid(xid);
1211 return -EINVAL;
1212 }
1213
David Brazdil0f672f62019-12-10 10:32:29 +00001214 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1215 PAGE_SIZE);
1216 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1217 PAGE_SIZE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001218 max_num = (max_buf - sizeof(struct smb_hdr)) /
1219 sizeof(LOCKING_ANDX_RANGE);
1220 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1221 if (!buf) {
1222 free_xid(xid);
1223 return -ENOMEM;
1224 }
1225
1226 for (i = 0; i < 2; i++) {
1227 cur = buf;
1228 num = 0;
1229 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1230 if (li->type != types[i])
1231 continue;
1232 cur->Pid = cpu_to_le16(li->pid);
1233 cur->LengthLow = cpu_to_le32((u32)li->length);
1234 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1235 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1236 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1237 if (++num == max_num) {
1238 stored_rc = cifs_lockv(xid, tcon,
1239 cfile->fid.netfid,
1240 (__u8)li->type, 0, num,
1241 buf);
1242 if (stored_rc)
1243 rc = stored_rc;
1244 cur = buf;
1245 num = 0;
1246 } else
1247 cur++;
1248 }
1249
1250 if (num) {
1251 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1252 (__u8)types[i], 0, num, buf);
1253 if (stored_rc)
1254 rc = stored_rc;
1255 }
1256 }
1257
1258 kfree(buf);
1259 free_xid(xid);
1260 return rc;
1261}
1262
1263static __u32
1264hash_lockowner(fl_owner_t owner)
1265{
1266 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1267}
1268
1269struct lock_to_push {
1270 struct list_head llist;
1271 __u64 offset;
1272 __u64 length;
1273 __u32 pid;
1274 __u16 netfid;
1275 __u8 type;
1276};
1277
1278static int
1279cifs_push_posix_locks(struct cifsFileInfo *cfile)
1280{
1281 struct inode *inode = d_inode(cfile->dentry);
1282 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1283 struct file_lock *flock;
1284 struct file_lock_context *flctx = inode->i_flctx;
1285 unsigned int count = 0, i;
1286 int rc = 0, xid, type;
1287 struct list_head locks_to_send, *el;
1288 struct lock_to_push *lck, *tmp;
1289 __u64 length;
1290
1291 xid = get_xid();
1292
1293 if (!flctx)
1294 goto out;
1295
1296 spin_lock(&flctx->flc_lock);
1297 list_for_each(el, &flctx->flc_posix) {
1298 count++;
1299 }
1300 spin_unlock(&flctx->flc_lock);
1301
1302 INIT_LIST_HEAD(&locks_to_send);
1303
1304 /*
1305 * Allocating count locks is enough because no FL_POSIX locks can be
1306 * added to the list while we are holding cinode->lock_sem that
1307 * protects locking operations of this inode.
1308 */
1309 for (i = 0; i < count; i++) {
1310 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1311 if (!lck) {
1312 rc = -ENOMEM;
1313 goto err_out;
1314 }
1315 list_add_tail(&lck->llist, &locks_to_send);
1316 }
1317
1318 el = locks_to_send.next;
1319 spin_lock(&flctx->flc_lock);
1320 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
1321 if (el == &locks_to_send) {
1322 /*
1323 * The list ended. We don't have enough allocated
1324 * structures - something is really wrong.
1325 */
1326 cifs_dbg(VFS, "Can't push all brlocks!\n");
1327 break;
1328 }
1329 length = 1 + flock->fl_end - flock->fl_start;
1330 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1331 type = CIFS_RDLCK;
1332 else
1333 type = CIFS_WRLCK;
1334 lck = list_entry(el, struct lock_to_push, llist);
1335 lck->pid = hash_lockowner(flock->fl_owner);
1336 lck->netfid = cfile->fid.netfid;
1337 lck->length = length;
1338 lck->type = type;
1339 lck->offset = flock->fl_start;
1340 }
1341 spin_unlock(&flctx->flc_lock);
1342
1343 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1344 int stored_rc;
1345
1346 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1347 lck->offset, lck->length, NULL,
1348 lck->type, 0);
1349 if (stored_rc)
1350 rc = stored_rc;
1351 list_del(&lck->llist);
1352 kfree(lck);
1353 }
1354
1355out:
1356 free_xid(xid);
1357 return rc;
1358err_out:
1359 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1360 list_del(&lck->llist);
1361 kfree(lck);
1362 }
1363 goto out;
1364}
1365
1366static int
1367cifs_push_locks(struct cifsFileInfo *cfile)
1368{
1369 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1370 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1371 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1372 int rc = 0;
1373
1374 /* we are going to update can_cache_brlcks here - need a write access */
David Brazdil0f672f62019-12-10 10:32:29 +00001375 cifs_down_write(&cinode->lock_sem);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001376 if (!cinode->can_cache_brlcks) {
1377 up_write(&cinode->lock_sem);
1378 return rc;
1379 }
1380
1381 if (cap_unix(tcon->ses) &&
1382 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1383 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1384 rc = cifs_push_posix_locks(cfile);
1385 else
1386 rc = tcon->ses->server->ops->push_mand_locks(cfile);
1387
1388 cinode->can_cache_brlcks = false;
1389 up_write(&cinode->lock_sem);
1390 return rc;
1391}
1392
1393static void
1394cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1395 bool *wait_flag, struct TCP_Server_Info *server)
1396{
1397 if (flock->fl_flags & FL_POSIX)
1398 cifs_dbg(FYI, "Posix\n");
1399 if (flock->fl_flags & FL_FLOCK)
1400 cifs_dbg(FYI, "Flock\n");
1401 if (flock->fl_flags & FL_SLEEP) {
1402 cifs_dbg(FYI, "Blocking lock\n");
1403 *wait_flag = true;
1404 }
1405 if (flock->fl_flags & FL_ACCESS)
1406 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
1407 if (flock->fl_flags & FL_LEASE)
1408 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
1409 if (flock->fl_flags &
1410 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
David Brazdil0f672f62019-12-10 10:32:29 +00001411 FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001412 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
1413
1414 *type = server->vals->large_lock_type;
1415 if (flock->fl_type == F_WRLCK) {
1416 cifs_dbg(FYI, "F_WRLCK\n");
1417 *type |= server->vals->exclusive_lock_type;
1418 *lock = 1;
1419 } else if (flock->fl_type == F_UNLCK) {
1420 cifs_dbg(FYI, "F_UNLCK\n");
1421 *type |= server->vals->unlock_lock_type;
1422 *unlock = 1;
1423 /* Check if unlock includes more than one lock range */
1424 } else if (flock->fl_type == F_RDLCK) {
1425 cifs_dbg(FYI, "F_RDLCK\n");
1426 *type |= server->vals->shared_lock_type;
1427 *lock = 1;
1428 } else if (flock->fl_type == F_EXLCK) {
1429 cifs_dbg(FYI, "F_EXLCK\n");
1430 *type |= server->vals->exclusive_lock_type;
1431 *lock = 1;
1432 } else if (flock->fl_type == F_SHLCK) {
1433 cifs_dbg(FYI, "F_SHLCK\n");
1434 *type |= server->vals->shared_lock_type;
1435 *lock = 1;
1436 } else
1437 cifs_dbg(FYI, "Unknown type of lock\n");
1438}
1439
1440static int
1441cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
1442 bool wait_flag, bool posix_lck, unsigned int xid)
1443{
1444 int rc = 0;
1445 __u64 length = 1 + flock->fl_end - flock->fl_start;
1446 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1447 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1448 struct TCP_Server_Info *server = tcon->ses->server;
1449 __u16 netfid = cfile->fid.netfid;
1450
1451 if (posix_lck) {
1452 int posix_lock_type;
1453
1454 rc = cifs_posix_lock_test(file, flock);
1455 if (!rc)
1456 return rc;
1457
1458 if (type & server->vals->shared_lock_type)
1459 posix_lock_type = CIFS_RDLCK;
1460 else
1461 posix_lock_type = CIFS_WRLCK;
1462 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1463 hash_lockowner(flock->fl_owner),
1464 flock->fl_start, length, flock,
1465 posix_lock_type, wait_flag);
1466 return rc;
1467 }
1468
1469 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
1470 if (!rc)
1471 return rc;
1472
1473 /* BB we could chain these into one lock request BB */
1474 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1475 1, 0, false);
1476 if (rc == 0) {
1477 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1478 type, 0, 1, false);
1479 flock->fl_type = F_UNLCK;
1480 if (rc != 0)
1481 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1482 rc);
1483 return 0;
1484 }
1485
1486 if (type & server->vals->shared_lock_type) {
1487 flock->fl_type = F_WRLCK;
1488 return 0;
1489 }
1490
1491 type &= ~server->vals->exclusive_lock_type;
1492
1493 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1494 type | server->vals->shared_lock_type,
1495 1, 0, false);
1496 if (rc == 0) {
1497 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1498 type | server->vals->shared_lock_type, 0, 1, false);
1499 flock->fl_type = F_RDLCK;
1500 if (rc != 0)
1501 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1502 rc);
1503 } else
1504 flock->fl_type = F_WRLCK;
1505
1506 return 0;
1507}
1508
1509void
1510cifs_move_llist(struct list_head *source, struct list_head *dest)
1511{
1512 struct list_head *li, *tmp;
1513 list_for_each_safe(li, tmp, source)
1514 list_move(li, dest);
1515}
1516
1517void
1518cifs_free_llist(struct list_head *llist)
1519{
1520 struct cifsLockInfo *li, *tmp;
1521 list_for_each_entry_safe(li, tmp, llist, llist) {
1522 cifs_del_lock_waiters(li);
1523 list_del(&li->llist);
1524 kfree(li);
1525 }
1526}
1527
1528int
1529cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1530 unsigned int xid)
1531{
1532 int rc = 0, stored_rc;
1533 static const int types[] = {
1534 LOCKING_ANDX_LARGE_FILES,
1535 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1536 };
1537 unsigned int i;
1538 unsigned int max_num, num, max_buf;
1539 LOCKING_ANDX_RANGE *buf, *cur;
1540 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1541 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1542 struct cifsLockInfo *li, *tmp;
1543 __u64 length = 1 + flock->fl_end - flock->fl_start;
1544 struct list_head tmp_llist;
1545
1546 INIT_LIST_HEAD(&tmp_llist);
1547
1548 /*
1549 * Accessing maxBuf is racy with cifs_reconnect - need to store value
David Brazdil0f672f62019-12-10 10:32:29 +00001550 * and check it before using.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001551 */
1552 max_buf = tcon->ses->server->maxBuf;
David Brazdil0f672f62019-12-10 10:32:29 +00001553 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001554 return -EINVAL;
1555
David Brazdil0f672f62019-12-10 10:32:29 +00001556 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1557 PAGE_SIZE);
1558 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1559 PAGE_SIZE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001560 max_num = (max_buf - sizeof(struct smb_hdr)) /
1561 sizeof(LOCKING_ANDX_RANGE);
1562 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1563 if (!buf)
1564 return -ENOMEM;
1565
David Brazdil0f672f62019-12-10 10:32:29 +00001566 cifs_down_write(&cinode->lock_sem);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001567 for (i = 0; i < 2; i++) {
1568 cur = buf;
1569 num = 0;
1570 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1571 if (flock->fl_start > li->offset ||
1572 (flock->fl_start + length) <
1573 (li->offset + li->length))
1574 continue;
1575 if (current->tgid != li->pid)
1576 continue;
1577 if (types[i] != li->type)
1578 continue;
1579 if (cinode->can_cache_brlcks) {
1580 /*
1581 * We can cache brlock requests - simply remove
1582 * a lock from the file's list.
1583 */
1584 list_del(&li->llist);
1585 cifs_del_lock_waiters(li);
1586 kfree(li);
1587 continue;
1588 }
1589 cur->Pid = cpu_to_le16(li->pid);
1590 cur->LengthLow = cpu_to_le32((u32)li->length);
1591 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1592 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1593 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1594 /*
1595 * We need to save a lock here to let us add it again to
1596 * the file's list if the unlock range request fails on
1597 * the server.
1598 */
1599 list_move(&li->llist, &tmp_llist);
1600 if (++num == max_num) {
1601 stored_rc = cifs_lockv(xid, tcon,
1602 cfile->fid.netfid,
1603 li->type, num, 0, buf);
1604 if (stored_rc) {
1605 /*
1606 * We failed on the unlock range
1607 * request - add all locks from the tmp
1608 * list to the head of the file's list.
1609 */
1610 cifs_move_llist(&tmp_llist,
1611 &cfile->llist->locks);
1612 rc = stored_rc;
1613 } else
1614 /*
1615 * The unlock range request succeed -
1616 * free the tmp list.
1617 */
1618 cifs_free_llist(&tmp_llist);
1619 cur = buf;
1620 num = 0;
1621 } else
1622 cur++;
1623 }
1624 if (num) {
1625 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1626 types[i], num, 0, buf);
1627 if (stored_rc) {
1628 cifs_move_llist(&tmp_llist,
1629 &cfile->llist->locks);
1630 rc = stored_rc;
1631 } else
1632 cifs_free_llist(&tmp_llist);
1633 }
1634 }
1635
1636 up_write(&cinode->lock_sem);
1637 kfree(buf);
1638 return rc;
1639}
1640
1641static int
1642cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
1643 bool wait_flag, bool posix_lck, int lock, int unlock,
1644 unsigned int xid)
1645{
1646 int rc = 0;
1647 __u64 length = 1 + flock->fl_end - flock->fl_start;
1648 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1649 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1650 struct TCP_Server_Info *server = tcon->ses->server;
1651 struct inode *inode = d_inode(cfile->dentry);
1652
1653 if (posix_lck) {
1654 int posix_lock_type;
1655
1656 rc = cifs_posix_lock_set(file, flock);
1657 if (!rc || rc < 0)
1658 return rc;
1659
1660 if (type & server->vals->shared_lock_type)
1661 posix_lock_type = CIFS_RDLCK;
1662 else
1663 posix_lock_type = CIFS_WRLCK;
1664
1665 if (unlock == 1)
1666 posix_lock_type = CIFS_UNLCK;
1667
1668 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1669 hash_lockowner(flock->fl_owner),
1670 flock->fl_start, length,
1671 NULL, posix_lock_type, wait_flag);
1672 goto out;
1673 }
1674
1675 if (lock) {
1676 struct cifsLockInfo *lock;
1677
David Brazdil0f672f62019-12-10 10:32:29 +00001678 lock = cifs_lock_init(flock->fl_start, length, type,
1679 flock->fl_flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001680 if (!lock)
1681 return -ENOMEM;
1682
1683 rc = cifs_lock_add_if(cfile, lock, wait_flag);
1684 if (rc < 0) {
1685 kfree(lock);
1686 return rc;
1687 }
1688 if (!rc)
1689 goto out;
1690
1691 /*
1692 * Windows 7 server can delay breaking lease from read to None
1693 * if we set a byte-range lock on a file - break it explicitly
1694 * before sending the lock to the server to be sure the next
1695 * read won't conflict with non-overlapted locks due to
1696 * pagereading.
1697 */
1698 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1699 CIFS_CACHE_READ(CIFS_I(inode))) {
1700 cifs_zap_mapping(inode);
1701 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1702 inode);
1703 CIFS_I(inode)->oplock = 0;
1704 }
1705
1706 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1707 type, 1, 0, wait_flag);
1708 if (rc) {
1709 kfree(lock);
1710 return rc;
1711 }
1712
1713 cifs_lock_add(cfile, lock);
1714 } else if (unlock)
1715 rc = server->ops->mand_unlock_range(cfile, flock, xid);
1716
1717out:
David Brazdil0f672f62019-12-10 10:32:29 +00001718 if (flock->fl_flags & FL_POSIX) {
1719 /*
1720 * If this is a request to remove all locks because we
1721 * are closing the file, it doesn't matter if the
1722 * unlocking failed as both cifs.ko and the SMB server
1723 * remove the lock on file close
1724 */
1725 if (rc) {
1726 cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
1727 if (!(flock->fl_flags & FL_CLOSE))
1728 return rc;
1729 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001730 rc = locks_lock_file_wait(file, flock);
David Brazdil0f672f62019-12-10 10:32:29 +00001731 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001732 return rc;
1733}
1734
1735int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1736{
1737 int rc, xid;
1738 int lock = 0, unlock = 0;
1739 bool wait_flag = false;
1740 bool posix_lck = false;
1741 struct cifs_sb_info *cifs_sb;
1742 struct cifs_tcon *tcon;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001743 struct cifsFileInfo *cfile;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001744 __u32 type;
1745
1746 rc = -EACCES;
1747 xid = get_xid();
1748
1749 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1750 cmd, flock->fl_flags, flock->fl_type,
1751 flock->fl_start, flock->fl_end);
1752
1753 cfile = (struct cifsFileInfo *)file->private_data;
1754 tcon = tlink_tcon(cfile->tlink);
1755
1756 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1757 tcon->ses->server);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001758 cifs_sb = CIFS_FILE_SB(file);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001759
1760 if (cap_unix(tcon->ses) &&
1761 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1762 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1763 posix_lck = true;
1764 /*
1765 * BB add code here to normalize offset and length to account for
1766 * negative length which we can not accept over the wire.
1767 */
1768 if (IS_GETLK(cmd)) {
1769 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
1770 free_xid(xid);
1771 return rc;
1772 }
1773
1774 if (!lock && !unlock) {
1775 /*
1776 * if no lock or unlock then nothing to do since we do not
1777 * know what it is
1778 */
1779 free_xid(xid);
1780 return -EOPNOTSUPP;
1781 }
1782
1783 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1784 xid);
1785 free_xid(xid);
1786 return rc;
1787}
1788
1789/*
1790 * update the file size (if needed) after a write. Should be called with
1791 * the inode->i_lock held
1792 */
1793void
1794cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1795 unsigned int bytes_written)
1796{
1797 loff_t end_of_write = offset + bytes_written;
1798
1799 if (end_of_write > cifsi->server_eof)
1800 cifsi->server_eof = end_of_write;
1801}
1802
1803static ssize_t
1804cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1805 size_t write_size, loff_t *offset)
1806{
1807 int rc = 0;
1808 unsigned int bytes_written = 0;
1809 unsigned int total_written;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001810 struct cifs_tcon *tcon;
1811 struct TCP_Server_Info *server;
1812 unsigned int xid;
1813 struct dentry *dentry = open_file->dentry;
1814 struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
1815 struct cifs_io_parms io_parms;
1816
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001817 cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1818 write_size, *offset, dentry);
1819
1820 tcon = tlink_tcon(open_file->tlink);
1821 server = tcon->ses->server;
1822
1823 if (!server->ops->sync_write)
1824 return -ENOSYS;
1825
1826 xid = get_xid();
1827
1828 for (total_written = 0; write_size > total_written;
1829 total_written += bytes_written) {
1830 rc = -EAGAIN;
1831 while (rc == -EAGAIN) {
1832 struct kvec iov[2];
1833 unsigned int len;
1834
1835 if (open_file->invalidHandle) {
1836 /* we could deadlock if we called
1837 filemap_fdatawait from here so tell
1838 reopen_file not to flush data to
1839 server now */
1840 rc = cifs_reopen_file(open_file, false);
1841 if (rc != 0)
1842 break;
1843 }
1844
1845 len = min(server->ops->wp_retry_size(d_inode(dentry)),
1846 (unsigned int)write_size - total_written);
1847 /* iov[0] is reserved for smb header */
1848 iov[1].iov_base = (char *)write_data + total_written;
1849 iov[1].iov_len = len;
1850 io_parms.pid = pid;
1851 io_parms.tcon = tcon;
1852 io_parms.offset = *offset;
1853 io_parms.length = len;
1854 rc = server->ops->sync_write(xid, &open_file->fid,
1855 &io_parms, &bytes_written, iov, 1);
1856 }
1857 if (rc || (bytes_written == 0)) {
1858 if (total_written)
1859 break;
1860 else {
1861 free_xid(xid);
1862 return rc;
1863 }
1864 } else {
1865 spin_lock(&d_inode(dentry)->i_lock);
1866 cifs_update_eof(cifsi, *offset, bytes_written);
1867 spin_unlock(&d_inode(dentry)->i_lock);
1868 *offset += bytes_written;
1869 }
1870 }
1871
1872 cifs_stats_bytes_written(tcon, total_written);
1873
1874 if (total_written > 0) {
1875 spin_lock(&d_inode(dentry)->i_lock);
1876 if (*offset > d_inode(dentry)->i_size)
1877 i_size_write(d_inode(dentry), *offset);
1878 spin_unlock(&d_inode(dentry)->i_lock);
1879 }
1880 mark_inode_dirty_sync(d_inode(dentry));
1881 free_xid(xid);
1882 return total_written;
1883}
1884
1885struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1886 bool fsuid_only)
1887{
1888 struct cifsFileInfo *open_file = NULL;
1889 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001890
1891 /* only filter by fsuid on multiuser mounts */
1892 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1893 fsuid_only = false;
1894
David Brazdil0f672f62019-12-10 10:32:29 +00001895 spin_lock(&cifs_inode->open_file_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001896 /* we could simply get the first_list_entry since write-only entries
1897 are always at the end of the list but since the first entry might
1898 have a close pending, we go through the whole list */
1899 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1900 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
1901 continue;
1902 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
1903 if (!open_file->invalidHandle) {
1904 /* found a good file */
1905 /* lock it so it will not be closed on us */
1906 cifsFileInfo_get(open_file);
David Brazdil0f672f62019-12-10 10:32:29 +00001907 spin_unlock(&cifs_inode->open_file_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001908 return open_file;
1909 } /* else might as well continue, and look for
1910 another, or simply have the caller reopen it
1911 again rather than trying to fix this handle */
1912 } else /* write only file */
1913 break; /* write only files are last so must be done */
1914 }
David Brazdil0f672f62019-12-10 10:32:29 +00001915 spin_unlock(&cifs_inode->open_file_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001916 return NULL;
1917}
1918
David Brazdil0f672f62019-12-10 10:32:29 +00001919/* Return -EBADF if no handle is found and general rc otherwise */
1920int
Olivier Deprez0e641232021-09-23 10:07:05 +02001921cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
David Brazdil0f672f62019-12-10 10:32:29 +00001922 struct cifsFileInfo **ret_file)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001923{
1924 struct cifsFileInfo *open_file, *inv_file = NULL;
1925 struct cifs_sb_info *cifs_sb;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001926 bool any_available = false;
David Brazdil0f672f62019-12-10 10:32:29 +00001927 int rc = -EBADF;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001928 unsigned int refind = 0;
Olivier Deprez0e641232021-09-23 10:07:05 +02001929 bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
1930 bool with_delete = flags & FIND_WR_WITH_DELETE;
David Brazdil0f672f62019-12-10 10:32:29 +00001931 *ret_file = NULL;
1932
1933 /*
1934 * Having a null inode here (because mapping->host was set to zero by
1935 * the VFS or MM) should not happen but we had reports of on oops (due
1936 * to it being zero) during stress testcases so we need to check for it
1937 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001938
1939 if (cifs_inode == NULL) {
1940 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
1941 dump_stack();
David Brazdil0f672f62019-12-10 10:32:29 +00001942 return rc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001943 }
1944
1945 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001946
1947 /* only filter by fsuid on multiuser mounts */
1948 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1949 fsuid_only = false;
1950
David Brazdil0f672f62019-12-10 10:32:29 +00001951 spin_lock(&cifs_inode->open_file_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001952refind_writable:
1953 if (refind > MAX_REOPEN_ATT) {
David Brazdil0f672f62019-12-10 10:32:29 +00001954 spin_unlock(&cifs_inode->open_file_lock);
1955 return rc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001956 }
1957 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1958 if (!any_available && open_file->pid != current->tgid)
1959 continue;
1960 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
1961 continue;
Olivier Deprez0e641232021-09-23 10:07:05 +02001962 if (with_delete && !(open_file->fid.access & DELETE))
1963 continue;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001964 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
1965 if (!open_file->invalidHandle) {
1966 /* found a good writable file */
1967 cifsFileInfo_get(open_file);
David Brazdil0f672f62019-12-10 10:32:29 +00001968 spin_unlock(&cifs_inode->open_file_lock);
1969 *ret_file = open_file;
1970 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001971 } else {
1972 if (!inv_file)
1973 inv_file = open_file;
1974 }
1975 }
1976 }
1977 /* couldn't find useable FH with same pid, try any available */
1978 if (!any_available) {
1979 any_available = true;
1980 goto refind_writable;
1981 }
1982
1983 if (inv_file) {
1984 any_available = false;
1985 cifsFileInfo_get(inv_file);
1986 }
1987
David Brazdil0f672f62019-12-10 10:32:29 +00001988 spin_unlock(&cifs_inode->open_file_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001989
1990 if (inv_file) {
1991 rc = cifs_reopen_file(inv_file, false);
David Brazdil0f672f62019-12-10 10:32:29 +00001992 if (!rc) {
1993 *ret_file = inv_file;
1994 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001995 }
David Brazdil0f672f62019-12-10 10:32:29 +00001996
1997 spin_lock(&cifs_inode->open_file_lock);
1998 list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
1999 spin_unlock(&cifs_inode->open_file_lock);
2000 cifsFileInfo_put(inv_file);
2001 ++refind;
2002 inv_file = NULL;
2003 spin_lock(&cifs_inode->open_file_lock);
2004 goto refind_writable;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002005 }
2006
David Brazdil0f672f62019-12-10 10:32:29 +00002007 return rc;
2008}
2009
2010struct cifsFileInfo *
Olivier Deprez0e641232021-09-23 10:07:05 +02002011find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
David Brazdil0f672f62019-12-10 10:32:29 +00002012{
2013 struct cifsFileInfo *cfile;
2014 int rc;
2015
Olivier Deprez0e641232021-09-23 10:07:05 +02002016 rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
David Brazdil0f672f62019-12-10 10:32:29 +00002017 if (rc)
2018 cifs_dbg(FYI, "couldn't find writable handle rc=%d", rc);
2019
2020 return cfile;
2021}
2022
2023int
2024cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
Olivier Deprez0e641232021-09-23 10:07:05 +02002025 int flags,
David Brazdil0f672f62019-12-10 10:32:29 +00002026 struct cifsFileInfo **ret_file)
2027{
2028 struct list_head *tmp;
2029 struct cifsFileInfo *cfile;
2030 struct cifsInodeInfo *cinode;
2031 char *full_path;
2032
2033 *ret_file = NULL;
2034
2035 spin_lock(&tcon->open_file_lock);
2036 list_for_each(tmp, &tcon->openFileList) {
2037 cfile = list_entry(tmp, struct cifsFileInfo,
2038 tlist);
2039 full_path = build_path_from_dentry(cfile->dentry);
2040 if (full_path == NULL) {
2041 spin_unlock(&tcon->open_file_lock);
2042 return -ENOMEM;
2043 }
2044 if (strcmp(full_path, name)) {
2045 kfree(full_path);
2046 continue;
2047 }
2048
2049 kfree(full_path);
2050 cinode = CIFS_I(d_inode(cfile->dentry));
2051 spin_unlock(&tcon->open_file_lock);
Olivier Deprez0e641232021-09-23 10:07:05 +02002052 return cifs_get_writable_file(cinode, flags, ret_file);
David Brazdil0f672f62019-12-10 10:32:29 +00002053 }
2054
2055 spin_unlock(&tcon->open_file_lock);
2056 return -ENOENT;
2057}
2058
2059int
2060cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2061 struct cifsFileInfo **ret_file)
2062{
2063 struct list_head *tmp;
2064 struct cifsFileInfo *cfile;
2065 struct cifsInodeInfo *cinode;
2066 char *full_path;
2067
2068 *ret_file = NULL;
2069
2070 spin_lock(&tcon->open_file_lock);
2071 list_for_each(tmp, &tcon->openFileList) {
2072 cfile = list_entry(tmp, struct cifsFileInfo,
2073 tlist);
2074 full_path = build_path_from_dentry(cfile->dentry);
2075 if (full_path == NULL) {
2076 spin_unlock(&tcon->open_file_lock);
2077 return -ENOMEM;
2078 }
2079 if (strcmp(full_path, name)) {
2080 kfree(full_path);
2081 continue;
2082 }
2083
2084 kfree(full_path);
2085 cinode = CIFS_I(d_inode(cfile->dentry));
2086 spin_unlock(&tcon->open_file_lock);
2087 *ret_file = find_readable_file(cinode, 0);
2088 return *ret_file ? 0 : -ENOENT;
2089 }
2090
2091 spin_unlock(&tcon->open_file_lock);
2092 return -ENOENT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002093}
2094
2095static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
2096{
2097 struct address_space *mapping = page->mapping;
2098 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
2099 char *write_data;
2100 int rc = -EFAULT;
2101 int bytes_written = 0;
2102 struct inode *inode;
2103 struct cifsFileInfo *open_file;
2104
2105 if (!mapping || !mapping->host)
2106 return -EFAULT;
2107
2108 inode = page->mapping->host;
2109
2110 offset += (loff_t)from;
2111 write_data = kmap(page);
2112 write_data += from;
2113
2114 if ((to > PAGE_SIZE) || (from > to)) {
2115 kunmap(page);
2116 return -EIO;
2117 }
2118
2119 /* racing with truncate? */
2120 if (offset > mapping->host->i_size) {
2121 kunmap(page);
2122 return 0; /* don't care */
2123 }
2124
2125 /* check to make sure that we are not extending the file */
2126 if (mapping->host->i_size - offset < (loff_t)to)
2127 to = (unsigned)(mapping->host->i_size - offset);
2128
Olivier Deprez0e641232021-09-23 10:07:05 +02002129 rc = cifs_get_writable_file(CIFS_I(mapping->host), FIND_WR_ANY,
2130 &open_file);
David Brazdil0f672f62019-12-10 10:32:29 +00002131 if (!rc) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002132 bytes_written = cifs_write(open_file, open_file->pid,
2133 write_data, to - from, &offset);
2134 cifsFileInfo_put(open_file);
2135 /* Does mm or vfs already set times? */
2136 inode->i_atime = inode->i_mtime = current_time(inode);
2137 if ((bytes_written > 0) && (offset))
2138 rc = 0;
2139 else if (bytes_written < 0)
2140 rc = bytes_written;
David Brazdil0f672f62019-12-10 10:32:29 +00002141 else
2142 rc = -EFAULT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002143 } else {
David Brazdil0f672f62019-12-10 10:32:29 +00002144 cifs_dbg(FYI, "No writable handle for write page rc=%d\n", rc);
2145 if (!is_retryable_error(rc))
2146 rc = -EIO;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002147 }
2148
2149 kunmap(page);
2150 return rc;
2151}
2152
2153static struct cifs_writedata *
2154wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
2155 pgoff_t end, pgoff_t *index,
2156 unsigned int *found_pages)
2157{
2158 struct cifs_writedata *wdata;
2159
2160 wdata = cifs_writedata_alloc((unsigned int)tofind,
2161 cifs_writev_complete);
2162 if (!wdata)
2163 return NULL;
2164
2165 *found_pages = find_get_pages_range_tag(mapping, index, end,
2166 PAGECACHE_TAG_DIRTY, tofind, wdata->pages);
2167 return wdata;
2168}
2169
2170static unsigned int
2171wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
2172 struct address_space *mapping,
2173 struct writeback_control *wbc,
2174 pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
2175{
2176 unsigned int nr_pages = 0, i;
2177 struct page *page;
2178
2179 for (i = 0; i < found_pages; i++) {
2180 page = wdata->pages[i];
2181 /*
2182 * At this point we hold neither the i_pages lock nor the
2183 * page lock: the page may be truncated or invalidated
2184 * (changing page->mapping to NULL), or even swizzled
2185 * back from swapper_space to tmpfs file mapping
2186 */
2187
2188 if (nr_pages == 0)
2189 lock_page(page);
2190 else if (!trylock_page(page))
2191 break;
2192
2193 if (unlikely(page->mapping != mapping)) {
2194 unlock_page(page);
2195 break;
2196 }
2197
2198 if (!wbc->range_cyclic && page->index > end) {
2199 *done = true;
2200 unlock_page(page);
2201 break;
2202 }
2203
2204 if (*next && (page->index != *next)) {
2205 /* Not next consecutive page */
2206 unlock_page(page);
2207 break;
2208 }
2209
2210 if (wbc->sync_mode != WB_SYNC_NONE)
2211 wait_on_page_writeback(page);
2212
2213 if (PageWriteback(page) ||
2214 !clear_page_dirty_for_io(page)) {
2215 unlock_page(page);
2216 break;
2217 }
2218
2219 /*
2220 * This actually clears the dirty bit in the radix tree.
2221 * See cifs_writepage() for more commentary.
2222 */
2223 set_page_writeback(page);
2224 if (page_offset(page) >= i_size_read(mapping->host)) {
2225 *done = true;
2226 unlock_page(page);
2227 end_page_writeback(page);
2228 break;
2229 }
2230
2231 wdata->pages[i] = page;
2232 *next = page->index + 1;
2233 ++nr_pages;
2234 }
2235
2236 /* reset index to refind any pages skipped */
2237 if (nr_pages == 0)
2238 *index = wdata->pages[0]->index + 1;
2239
2240 /* put any pages we aren't going to use */
2241 for (i = nr_pages; i < found_pages; i++) {
2242 put_page(wdata->pages[i]);
2243 wdata->pages[i] = NULL;
2244 }
2245
2246 return nr_pages;
2247}
2248
2249static int
2250wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2251 struct address_space *mapping, struct writeback_control *wbc)
2252{
David Brazdil0f672f62019-12-10 10:32:29 +00002253 int rc;
2254 struct TCP_Server_Info *server =
2255 tlink_tcon(wdata->cfile->tlink)->ses->server;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002256
2257 wdata->sync_mode = wbc->sync_mode;
2258 wdata->nr_pages = nr_pages;
2259 wdata->offset = page_offset(wdata->pages[0]);
2260 wdata->pagesz = PAGE_SIZE;
2261 wdata->tailsz = min(i_size_read(mapping->host) -
2262 page_offset(wdata->pages[nr_pages - 1]),
2263 (loff_t)PAGE_SIZE);
2264 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
David Brazdil0f672f62019-12-10 10:32:29 +00002265 wdata->pid = wdata->cfile->pid;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002266
David Brazdil0f672f62019-12-10 10:32:29 +00002267 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
2268 if (rc)
2269 return rc;
2270
2271 if (wdata->cfile->invalidHandle)
2272 rc = -EAGAIN;
2273 else
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002274 rc = server->ops->async_writev(wdata, cifs_writedata_release);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002275
2276 return rc;
2277}
2278
2279static int cifs_writepages(struct address_space *mapping,
2280 struct writeback_control *wbc)
2281{
David Brazdil0f672f62019-12-10 10:32:29 +00002282 struct inode *inode = mapping->host;
2283 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002284 struct TCP_Server_Info *server;
2285 bool done = false, scanned = false, range_whole = false;
2286 pgoff_t end, index;
2287 struct cifs_writedata *wdata;
David Brazdil0f672f62019-12-10 10:32:29 +00002288 struct cifsFileInfo *cfile = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002289 int rc = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00002290 int saved_rc = 0;
2291 unsigned int xid;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002292
2293 /*
2294 * If wsize is smaller than the page cache size, default to writing
2295 * one page at a time via cifs_writepage
2296 */
2297 if (cifs_sb->wsize < PAGE_SIZE)
2298 return generic_writepages(mapping, wbc);
2299
David Brazdil0f672f62019-12-10 10:32:29 +00002300 xid = get_xid();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002301 if (wbc->range_cyclic) {
2302 index = mapping->writeback_index; /* Start from prev offset */
2303 end = -1;
2304 } else {
2305 index = wbc->range_start >> PAGE_SHIFT;
2306 end = wbc->range_end >> PAGE_SHIFT;
2307 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2308 range_whole = true;
2309 scanned = true;
2310 }
2311 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
2312retry:
2313 while (!done && index <= end) {
David Brazdil0f672f62019-12-10 10:32:29 +00002314 unsigned int i, nr_pages, found_pages, wsize;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002315 pgoff_t next = 0, tofind, saved_index = index;
David Brazdil0f672f62019-12-10 10:32:29 +00002316 struct cifs_credits credits_on_stack;
2317 struct cifs_credits *credits = &credits_on_stack;
2318 int get_file_rc = 0;
2319
2320 if (cfile)
2321 cifsFileInfo_put(cfile);
2322
Olivier Deprez0e641232021-09-23 10:07:05 +02002323 rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile);
David Brazdil0f672f62019-12-10 10:32:29 +00002324
2325 /* in case of an error store it to return later */
2326 if (rc)
2327 get_file_rc = rc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002328
2329 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
David Brazdil0f672f62019-12-10 10:32:29 +00002330 &wsize, credits);
2331 if (rc != 0) {
2332 done = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002333 break;
David Brazdil0f672f62019-12-10 10:32:29 +00002334 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002335
2336 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
2337
2338 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2339 &found_pages);
2340 if (!wdata) {
2341 rc = -ENOMEM;
David Brazdil0f672f62019-12-10 10:32:29 +00002342 done = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002343 add_credits_and_wake_if(server, credits, 0);
2344 break;
2345 }
2346
2347 if (found_pages == 0) {
2348 kref_put(&wdata->refcount, cifs_writedata_release);
2349 add_credits_and_wake_if(server, credits, 0);
2350 break;
2351 }
2352
2353 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2354 end, &index, &next, &done);
2355
2356 /* nothing to write? */
2357 if (nr_pages == 0) {
2358 kref_put(&wdata->refcount, cifs_writedata_release);
2359 add_credits_and_wake_if(server, credits, 0);
2360 continue;
2361 }
2362
David Brazdil0f672f62019-12-10 10:32:29 +00002363 wdata->credits = credits_on_stack;
2364 wdata->cfile = cfile;
2365 cfile = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002366
David Brazdil0f672f62019-12-10 10:32:29 +00002367 if (!wdata->cfile) {
2368 cifs_dbg(VFS, "No writable handle in writepages rc=%d\n",
2369 get_file_rc);
2370 if (is_retryable_error(get_file_rc))
2371 rc = get_file_rc;
2372 else
2373 rc = -EBADF;
2374 } else
2375 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
2376
2377 for (i = 0; i < nr_pages; ++i)
2378 unlock_page(wdata->pages[i]);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002379
2380 /* send failure -- clean up the mess */
2381 if (rc != 0) {
David Brazdil0f672f62019-12-10 10:32:29 +00002382 add_credits_and_wake_if(server, &wdata->credits, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002383 for (i = 0; i < nr_pages; ++i) {
David Brazdil0f672f62019-12-10 10:32:29 +00002384 if (is_retryable_error(rc))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002385 redirty_page_for_writepage(wbc,
2386 wdata->pages[i]);
2387 else
2388 SetPageError(wdata->pages[i]);
2389 end_page_writeback(wdata->pages[i]);
2390 put_page(wdata->pages[i]);
2391 }
David Brazdil0f672f62019-12-10 10:32:29 +00002392 if (!is_retryable_error(rc))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002393 mapping_set_error(mapping, rc);
2394 }
2395 kref_put(&wdata->refcount, cifs_writedata_release);
2396
2397 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2398 index = saved_index;
2399 continue;
2400 }
2401
David Brazdil0f672f62019-12-10 10:32:29 +00002402 /* Return immediately if we received a signal during writing */
2403 if (is_interrupt_error(rc)) {
2404 done = true;
2405 break;
2406 }
2407
2408 if (rc != 0 && saved_rc == 0)
2409 saved_rc = rc;
2410
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002411 wbc->nr_to_write -= nr_pages;
2412 if (wbc->nr_to_write <= 0)
2413 done = true;
2414
2415 index = next;
2416 }
2417
2418 if (!scanned && !done) {
2419 /*
2420 * We hit the last page and there is more work to be done: wrap
2421 * back to the start of the file
2422 */
2423 scanned = true;
2424 index = 0;
2425 goto retry;
2426 }
2427
David Brazdil0f672f62019-12-10 10:32:29 +00002428 if (saved_rc != 0)
2429 rc = saved_rc;
2430
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002431 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2432 mapping->writeback_index = index;
2433
David Brazdil0f672f62019-12-10 10:32:29 +00002434 if (cfile)
2435 cifsFileInfo_put(cfile);
2436 free_xid(xid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002437 return rc;
2438}
2439
2440static int
2441cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
2442{
2443 int rc;
2444 unsigned int xid;
2445
2446 xid = get_xid();
2447/* BB add check for wbc flags */
2448 get_page(page);
2449 if (!PageUptodate(page))
2450 cifs_dbg(FYI, "ppw - page not up to date\n");
2451
2452 /*
2453 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2454 *
2455 * A writepage() implementation always needs to do either this,
2456 * or re-dirty the page with "redirty_page_for_writepage()" in
2457 * the case of a failure.
2458 *
2459 * Just unlocking the page will cause the radix tree tag-bits
2460 * to fail to update with the state of the page correctly.
2461 */
2462 set_page_writeback(page);
2463retry_write:
2464 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
David Brazdil0f672f62019-12-10 10:32:29 +00002465 if (is_retryable_error(rc)) {
2466 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002467 goto retry_write;
2468 redirty_page_for_writepage(wbc, page);
2469 } else if (rc != 0) {
2470 SetPageError(page);
2471 mapping_set_error(page->mapping, rc);
2472 } else {
2473 SetPageUptodate(page);
2474 }
2475 end_page_writeback(page);
2476 put_page(page);
2477 free_xid(xid);
2478 return rc;
2479}
2480
2481static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2482{
2483 int rc = cifs_writepage_locked(page, wbc);
2484 unlock_page(page);
2485 return rc;
2486}
2487
2488static int cifs_write_end(struct file *file, struct address_space *mapping,
2489 loff_t pos, unsigned len, unsigned copied,
2490 struct page *page, void *fsdata)
2491{
2492 int rc;
2493 struct inode *inode = mapping->host;
2494 struct cifsFileInfo *cfile = file->private_data;
2495 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2496 __u32 pid;
2497
2498 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2499 pid = cfile->pid;
2500 else
2501 pid = current->tgid;
2502
2503 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
2504 page, pos, copied);
2505
2506 if (PageChecked(page)) {
2507 if (copied == len)
2508 SetPageUptodate(page);
2509 ClearPageChecked(page);
2510 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
2511 SetPageUptodate(page);
2512
2513 if (!PageUptodate(page)) {
2514 char *page_data;
2515 unsigned offset = pos & (PAGE_SIZE - 1);
2516 unsigned int xid;
2517
2518 xid = get_xid();
2519 /* this is probably better than directly calling
2520 partialpage_write since in this function the file handle is
2521 known which we might as well leverage */
2522 /* BB check if anything else missing out of ppw
2523 such as updating last write time */
2524 page_data = kmap(page);
2525 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
2526 /* if (rc < 0) should we set writebehind rc? */
2527 kunmap(page);
2528
2529 free_xid(xid);
2530 } else {
2531 rc = copied;
2532 pos += copied;
2533 set_page_dirty(page);
2534 }
2535
2536 if (rc > 0) {
2537 spin_lock(&inode->i_lock);
2538 if (pos > inode->i_size)
2539 i_size_write(inode, pos);
2540 spin_unlock(&inode->i_lock);
2541 }
2542
2543 unlock_page(page);
2544 put_page(page);
2545
2546 return rc;
2547}
2548
2549int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2550 int datasync)
2551{
2552 unsigned int xid;
2553 int rc = 0;
2554 struct cifs_tcon *tcon;
2555 struct TCP_Server_Info *server;
2556 struct cifsFileInfo *smbfile = file->private_data;
2557 struct inode *inode = file_inode(file);
2558 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2559
2560 rc = file_write_and_wait_range(file, start, end);
2561 if (rc)
2562 return rc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002563
2564 xid = get_xid();
2565
2566 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2567 file, datasync);
2568
2569 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2570 rc = cifs_zap_mapping(inode);
2571 if (rc) {
2572 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
2573 rc = 0; /* don't care about it in fsync */
2574 }
2575 }
2576
2577 tcon = tlink_tcon(smbfile->tlink);
2578 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2579 server = tcon->ses->server;
2580 if (server->ops->flush)
2581 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2582 else
2583 rc = -ENOSYS;
2584 }
2585
2586 free_xid(xid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002587 return rc;
2588}
2589
2590int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2591{
2592 unsigned int xid;
2593 int rc = 0;
2594 struct cifs_tcon *tcon;
2595 struct TCP_Server_Info *server;
2596 struct cifsFileInfo *smbfile = file->private_data;
2597 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002598
2599 rc = file_write_and_wait_range(file, start, end);
2600 if (rc)
2601 return rc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002602
2603 xid = get_xid();
2604
2605 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2606 file, datasync);
2607
2608 tcon = tlink_tcon(smbfile->tlink);
2609 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2610 server = tcon->ses->server;
2611 if (server->ops->flush)
2612 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2613 else
2614 rc = -ENOSYS;
2615 }
2616
2617 free_xid(xid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002618 return rc;
2619}
2620
2621/*
2622 * As file closes, flush all cached write data for this inode checking
2623 * for write behind errors.
2624 */
2625int cifs_flush(struct file *file, fl_owner_t id)
2626{
2627 struct inode *inode = file_inode(file);
2628 int rc = 0;
2629
2630 if (file->f_mode & FMODE_WRITE)
2631 rc = filemap_write_and_wait(inode->i_mapping);
2632
2633 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2634
2635 return rc;
2636}
2637
2638static int
2639cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2640{
2641 int rc = 0;
2642 unsigned long i;
2643
2644 for (i = 0; i < num_pages; i++) {
2645 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2646 if (!pages[i]) {
2647 /*
2648 * save number of pages we have already allocated and
2649 * return with ENOMEM error
2650 */
2651 num_pages = i;
2652 rc = -ENOMEM;
2653 break;
2654 }
2655 }
2656
2657 if (rc) {
2658 for (i = 0; i < num_pages; i++)
2659 put_page(pages[i]);
2660 }
2661 return rc;
2662}
2663
2664static inline
2665size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2666{
2667 size_t num_pages;
2668 size_t clen;
2669
2670 clen = min_t(const size_t, len, wsize);
2671 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
2672
2673 if (cur_len)
2674 *cur_len = clen;
2675
2676 return num_pages;
2677}
2678
2679static void
2680cifs_uncached_writedata_release(struct kref *refcount)
2681{
2682 int i;
2683 struct cifs_writedata *wdata = container_of(refcount,
2684 struct cifs_writedata, refcount);
2685
2686 kref_put(&wdata->ctx->refcount, cifs_aio_ctx_release);
2687 for (i = 0; i < wdata->nr_pages; i++)
2688 put_page(wdata->pages[i]);
2689 cifs_writedata_release(refcount);
2690}
2691
2692static void collect_uncached_write_data(struct cifs_aio_ctx *ctx);
2693
2694static void
2695cifs_uncached_writev_complete(struct work_struct *work)
2696{
2697 struct cifs_writedata *wdata = container_of(work,
2698 struct cifs_writedata, work);
2699 struct inode *inode = d_inode(wdata->cfile->dentry);
2700 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2701
2702 spin_lock(&inode->i_lock);
2703 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2704 if (cifsi->server_eof > inode->i_size)
2705 i_size_write(inode, cifsi->server_eof);
2706 spin_unlock(&inode->i_lock);
2707
2708 complete(&wdata->done);
2709 collect_uncached_write_data(wdata->ctx);
2710 /* the below call can possibly free the last ref to aio ctx */
2711 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2712}
2713
2714static int
2715wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2716 size_t *len, unsigned long *num_pages)
2717{
2718 size_t save_len, copied, bytes, cur_len = *len;
2719 unsigned long i, nr_pages = *num_pages;
2720
2721 save_len = cur_len;
2722 for (i = 0; i < nr_pages; i++) {
2723 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2724 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2725 cur_len -= copied;
2726 /*
2727 * If we didn't copy as much as we expected, then that
2728 * may mean we trod into an unmapped area. Stop copying
2729 * at that point. On the next pass through the big
2730 * loop, we'll likely end up getting a zero-length
2731 * write and bailing out of it.
2732 */
2733 if (copied < bytes)
2734 break;
2735 }
2736 cur_len = save_len - cur_len;
2737 *len = cur_len;
2738
2739 /*
2740 * If we have no data to send, then that probably means that
2741 * the copy above failed altogether. That's most likely because
2742 * the address in the iovec was bogus. Return -EFAULT and let
2743 * the caller free anything we allocated and bail out.
2744 */
2745 if (!cur_len)
2746 return -EFAULT;
2747
2748 /*
2749 * i + 1 now represents the number of pages we actually used in
2750 * the copy phase above.
2751 */
2752 *num_pages = i + 1;
2753 return 0;
2754}
2755
2756static int
David Brazdil0f672f62019-12-10 10:32:29 +00002757cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
2758 struct cifs_aio_ctx *ctx)
2759{
2760 unsigned int wsize;
2761 struct cifs_credits credits;
2762 int rc;
2763 struct TCP_Server_Info *server =
2764 tlink_tcon(wdata->cfile->tlink)->ses->server;
2765
2766 do {
2767 if (wdata->cfile->invalidHandle) {
2768 rc = cifs_reopen_file(wdata->cfile, false);
2769 if (rc == -EAGAIN)
2770 continue;
2771 else if (rc)
2772 break;
2773 }
2774
2775
2776 /*
2777 * Wait for credits to resend this wdata.
2778 * Note: we are attempting to resend the whole wdata not in
2779 * segments
2780 */
2781 do {
2782 rc = server->ops->wait_mtu_credits(server, wdata->bytes,
2783 &wsize, &credits);
2784 if (rc)
2785 goto fail;
2786
2787 if (wsize < wdata->bytes) {
2788 add_credits_and_wake_if(server, &credits, 0);
2789 msleep(1000);
2790 }
2791 } while (wsize < wdata->bytes);
2792 wdata->credits = credits;
2793
2794 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
2795
2796 if (!rc) {
2797 if (wdata->cfile->invalidHandle)
2798 rc = -EAGAIN;
2799 else
2800 rc = server->ops->async_writev(wdata,
2801 cifs_uncached_writedata_release);
2802 }
2803
2804 /* If the write was successfully sent, we are done */
2805 if (!rc) {
2806 list_add_tail(&wdata->list, wdata_list);
2807 return 0;
2808 }
2809
2810 /* Roll back credits and retry if needed */
2811 add_credits_and_wake_if(server, &wdata->credits, 0);
2812 } while (rc == -EAGAIN);
2813
2814fail:
2815 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2816 return rc;
2817}
2818
2819static int
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002820cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2821 struct cifsFileInfo *open_file,
2822 struct cifs_sb_info *cifs_sb, struct list_head *wdata_list,
2823 struct cifs_aio_ctx *ctx)
2824{
2825 int rc = 0;
2826 size_t cur_len;
2827 unsigned long nr_pages, num_pages, i;
2828 struct cifs_writedata *wdata;
2829 struct iov_iter saved_from = *from;
2830 loff_t saved_offset = offset;
2831 pid_t pid;
2832 struct TCP_Server_Info *server;
David Brazdil0f672f62019-12-10 10:32:29 +00002833 struct page **pagevec;
2834 size_t start;
2835 unsigned int xid;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002836
2837 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2838 pid = open_file->pid;
2839 else
2840 pid = current->tgid;
2841
2842 server = tlink_tcon(open_file->tlink)->ses->server;
David Brazdil0f672f62019-12-10 10:32:29 +00002843 xid = get_xid();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002844
2845 do {
David Brazdil0f672f62019-12-10 10:32:29 +00002846 unsigned int wsize;
2847 struct cifs_credits credits_on_stack;
2848 struct cifs_credits *credits = &credits_on_stack;
2849
2850 if (open_file->invalidHandle) {
2851 rc = cifs_reopen_file(open_file, false);
2852 if (rc == -EAGAIN)
2853 continue;
2854 else if (rc)
2855 break;
2856 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002857
2858 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
David Brazdil0f672f62019-12-10 10:32:29 +00002859 &wsize, credits);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002860 if (rc)
2861 break;
2862
David Brazdil0f672f62019-12-10 10:32:29 +00002863 cur_len = min_t(const size_t, len, wsize);
2864
2865 if (ctx->direct_io) {
2866 ssize_t result;
2867
2868 result = iov_iter_get_pages_alloc(
2869 from, &pagevec, cur_len, &start);
2870 if (result < 0) {
2871 cifs_dbg(VFS,
2872 "direct_writev couldn't get user pages "
2873 "(rc=%zd) iter type %d iov_offset %zd "
2874 "count %zd\n",
2875 result, from->type,
2876 from->iov_offset, from->count);
2877 dump_stack();
2878
2879 rc = result;
2880 add_credits_and_wake_if(server, credits, 0);
2881 break;
2882 }
2883 cur_len = (size_t)result;
2884 iov_iter_advance(from, cur_len);
2885
2886 nr_pages =
2887 (cur_len + start + PAGE_SIZE - 1) / PAGE_SIZE;
2888
2889 wdata = cifs_writedata_direct_alloc(pagevec,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002890 cifs_uncached_writev_complete);
David Brazdil0f672f62019-12-10 10:32:29 +00002891 if (!wdata) {
2892 rc = -ENOMEM;
2893 add_credits_and_wake_if(server, credits, 0);
2894 break;
2895 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002896
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002897
David Brazdil0f672f62019-12-10 10:32:29 +00002898 wdata->page_offset = start;
2899 wdata->tailsz =
2900 nr_pages > 1 ?
2901 cur_len - (PAGE_SIZE - start) -
2902 (nr_pages - 2) * PAGE_SIZE :
2903 cur_len;
2904 } else {
2905 nr_pages = get_numpages(wsize, len, &cur_len);
2906 wdata = cifs_writedata_alloc(nr_pages,
2907 cifs_uncached_writev_complete);
2908 if (!wdata) {
2909 rc = -ENOMEM;
2910 add_credits_and_wake_if(server, credits, 0);
2911 break;
2912 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002913
David Brazdil0f672f62019-12-10 10:32:29 +00002914 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2915 if (rc) {
2916 kvfree(wdata->pages);
2917 kfree(wdata);
2918 add_credits_and_wake_if(server, credits, 0);
2919 break;
2920 }
2921
2922 num_pages = nr_pages;
2923 rc = wdata_fill_from_iovec(
2924 wdata, from, &cur_len, &num_pages);
2925 if (rc) {
2926 for (i = 0; i < nr_pages; i++)
2927 put_page(wdata->pages[i]);
2928 kvfree(wdata->pages);
2929 kfree(wdata);
2930 add_credits_and_wake_if(server, credits, 0);
2931 break;
2932 }
2933
2934 /*
2935 * Bring nr_pages down to the number of pages we
2936 * actually used, and free any pages that we didn't use.
2937 */
2938 for ( ; nr_pages > num_pages; nr_pages--)
2939 put_page(wdata->pages[nr_pages - 1]);
2940
2941 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
2942 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002943
2944 wdata->sync_mode = WB_SYNC_ALL;
2945 wdata->nr_pages = nr_pages;
2946 wdata->offset = (__u64)offset;
2947 wdata->cfile = cifsFileInfo_get(open_file);
2948 wdata->pid = pid;
2949 wdata->bytes = cur_len;
2950 wdata->pagesz = PAGE_SIZE;
David Brazdil0f672f62019-12-10 10:32:29 +00002951 wdata->credits = credits_on_stack;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002952 wdata->ctx = ctx;
2953 kref_get(&ctx->refcount);
2954
David Brazdil0f672f62019-12-10 10:32:29 +00002955 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
2956
2957 if (!rc) {
2958 if (wdata->cfile->invalidHandle)
2959 rc = -EAGAIN;
2960 else
2961 rc = server->ops->async_writev(wdata,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002962 cifs_uncached_writedata_release);
David Brazdil0f672f62019-12-10 10:32:29 +00002963 }
2964
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002965 if (rc) {
David Brazdil0f672f62019-12-10 10:32:29 +00002966 add_credits_and_wake_if(server, &wdata->credits, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002967 kref_put(&wdata->refcount,
2968 cifs_uncached_writedata_release);
2969 if (rc == -EAGAIN) {
2970 *from = saved_from;
2971 iov_iter_advance(from, offset - saved_offset);
2972 continue;
2973 }
2974 break;
2975 }
2976
2977 list_add_tail(&wdata->list, wdata_list);
2978 offset += cur_len;
2979 len -= cur_len;
2980 } while (len > 0);
2981
David Brazdil0f672f62019-12-10 10:32:29 +00002982 free_xid(xid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002983 return rc;
2984}
2985
2986static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
2987{
2988 struct cifs_writedata *wdata, *tmp;
2989 struct cifs_tcon *tcon;
2990 struct cifs_sb_info *cifs_sb;
2991 struct dentry *dentry = ctx->cfile->dentry;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002992 int rc;
2993
2994 tcon = tlink_tcon(ctx->cfile->tlink);
2995 cifs_sb = CIFS_SB(dentry->d_sb);
2996
2997 mutex_lock(&ctx->aio_mutex);
2998
2999 if (list_empty(&ctx->list)) {
3000 mutex_unlock(&ctx->aio_mutex);
3001 return;
3002 }
3003
3004 rc = ctx->rc;
3005 /*
3006 * Wait for and collect replies for any successful sends in order of
3007 * increasing offset. Once an error is hit, then return without waiting
3008 * for any more replies.
3009 */
3010restart_loop:
3011 list_for_each_entry_safe(wdata, tmp, &ctx->list, list) {
3012 if (!rc) {
3013 if (!try_wait_for_completion(&wdata->done)) {
3014 mutex_unlock(&ctx->aio_mutex);
3015 return;
3016 }
3017
3018 if (wdata->result)
3019 rc = wdata->result;
3020 else
3021 ctx->total_len += wdata->bytes;
3022
3023 /* resend call if it's a retryable error */
3024 if (rc == -EAGAIN) {
3025 struct list_head tmp_list;
3026 struct iov_iter tmp_from = ctx->iter;
3027
3028 INIT_LIST_HEAD(&tmp_list);
3029 list_del_init(&wdata->list);
3030
David Brazdil0f672f62019-12-10 10:32:29 +00003031 if (ctx->direct_io)
3032 rc = cifs_resend_wdata(
3033 wdata, &tmp_list, ctx);
3034 else {
3035 iov_iter_advance(&tmp_from,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003036 wdata->offset - ctx->pos);
3037
David Brazdil0f672f62019-12-10 10:32:29 +00003038 rc = cifs_write_from_iter(wdata->offset,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003039 wdata->bytes, &tmp_from,
3040 ctx->cfile, cifs_sb, &tmp_list,
3041 ctx);
3042
David Brazdil0f672f62019-12-10 10:32:29 +00003043 kref_put(&wdata->refcount,
3044 cifs_uncached_writedata_release);
3045 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003046
David Brazdil0f672f62019-12-10 10:32:29 +00003047 list_splice(&tmp_list, &ctx->list);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003048 goto restart_loop;
3049 }
3050 }
3051 list_del_init(&wdata->list);
3052 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
3053 }
3054
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003055 cifs_stats_bytes_written(tcon, ctx->total_len);
3056 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
3057
3058 ctx->rc = (rc == 0) ? ctx->total_len : rc;
3059
3060 mutex_unlock(&ctx->aio_mutex);
3061
3062 if (ctx->iocb && ctx->iocb->ki_complete)
3063 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
3064 else
3065 complete(&ctx->done);
3066}
3067
David Brazdil0f672f62019-12-10 10:32:29 +00003068static ssize_t __cifs_writev(
3069 struct kiocb *iocb, struct iov_iter *from, bool direct)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003070{
3071 struct file *file = iocb->ki_filp;
3072 ssize_t total_written = 0;
3073 struct cifsFileInfo *cfile;
3074 struct cifs_tcon *tcon;
3075 struct cifs_sb_info *cifs_sb;
3076 struct cifs_aio_ctx *ctx;
3077 struct iov_iter saved_from = *from;
David Brazdil0f672f62019-12-10 10:32:29 +00003078 size_t len = iov_iter_count(from);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003079 int rc;
3080
3081 /*
David Brazdil0f672f62019-12-10 10:32:29 +00003082 * iov_iter_get_pages_alloc doesn't work with ITER_KVEC.
3083 * In this case, fall back to non-direct write function.
3084 * this could be improved by getting pages directly in ITER_KVEC
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003085 */
David Brazdil0f672f62019-12-10 10:32:29 +00003086 if (direct && from->type & ITER_KVEC) {
3087 cifs_dbg(FYI, "use non-direct cifs_writev for kvec I/O\n");
3088 direct = false;
3089 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003090
3091 rc = generic_write_checks(iocb, from);
3092 if (rc <= 0)
3093 return rc;
3094
3095 cifs_sb = CIFS_FILE_SB(file);
3096 cfile = file->private_data;
3097 tcon = tlink_tcon(cfile->tlink);
3098
3099 if (!tcon->ses->server->ops->async_writev)
3100 return -ENOSYS;
3101
3102 ctx = cifs_aio_ctx_alloc();
3103 if (!ctx)
3104 return -ENOMEM;
3105
3106 ctx->cfile = cifsFileInfo_get(cfile);
3107
3108 if (!is_sync_kiocb(iocb))
3109 ctx->iocb = iocb;
3110
3111 ctx->pos = iocb->ki_pos;
3112
David Brazdil0f672f62019-12-10 10:32:29 +00003113 if (direct) {
3114 ctx->direct_io = true;
3115 ctx->iter = *from;
3116 ctx->len = len;
3117 } else {
3118 rc = setup_aio_ctx_iter(ctx, from, WRITE);
3119 if (rc) {
3120 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3121 return rc;
3122 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003123 }
3124
3125 /* grab a lock here due to read response handlers can access ctx */
3126 mutex_lock(&ctx->aio_mutex);
3127
3128 rc = cifs_write_from_iter(iocb->ki_pos, ctx->len, &saved_from,
3129 cfile, cifs_sb, &ctx->list, ctx);
3130
3131 /*
3132 * If at least one write was successfully sent, then discard any rc
3133 * value from the later writes. If the other write succeeds, then
3134 * we'll end up returning whatever was written. If it fails, then
3135 * we'll get a new rc value from that.
3136 */
3137 if (!list_empty(&ctx->list))
3138 rc = 0;
3139
3140 mutex_unlock(&ctx->aio_mutex);
3141
3142 if (rc) {
3143 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3144 return rc;
3145 }
3146
3147 if (!is_sync_kiocb(iocb)) {
3148 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3149 return -EIOCBQUEUED;
3150 }
3151
3152 rc = wait_for_completion_killable(&ctx->done);
3153 if (rc) {
3154 mutex_lock(&ctx->aio_mutex);
3155 ctx->rc = rc = -EINTR;
3156 total_written = ctx->total_len;
3157 mutex_unlock(&ctx->aio_mutex);
3158 } else {
3159 rc = ctx->rc;
3160 total_written = ctx->total_len;
3161 }
3162
3163 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3164
3165 if (unlikely(!total_written))
3166 return rc;
3167
3168 iocb->ki_pos += total_written;
3169 return total_written;
3170}
3171
David Brazdil0f672f62019-12-10 10:32:29 +00003172ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from)
3173{
3174 return __cifs_writev(iocb, from, true);
3175}
3176
3177ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
3178{
3179 return __cifs_writev(iocb, from, false);
3180}
3181
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003182static ssize_t
3183cifs_writev(struct kiocb *iocb, struct iov_iter *from)
3184{
3185 struct file *file = iocb->ki_filp;
3186 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
3187 struct inode *inode = file->f_mapping->host;
3188 struct cifsInodeInfo *cinode = CIFS_I(inode);
3189 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
3190 ssize_t rc;
3191
3192 inode_lock(inode);
3193 /*
3194 * We need to hold the sem to be sure nobody modifies lock list
3195 * with a brlock that prevents writing.
3196 */
3197 down_read(&cinode->lock_sem);
3198
3199 rc = generic_write_checks(iocb, from);
3200 if (rc <= 0)
3201 goto out;
3202
3203 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
David Brazdil0f672f62019-12-10 10:32:29 +00003204 server->vals->exclusive_lock_type, 0,
3205 NULL, CIFS_WRITE_OP))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003206 rc = __generic_file_write_iter(iocb, from);
3207 else
3208 rc = -EACCES;
3209out:
3210 up_read(&cinode->lock_sem);
3211 inode_unlock(inode);
3212
3213 if (rc > 0)
3214 rc = generic_write_sync(iocb, rc);
3215 return rc;
3216}
3217
3218ssize_t
3219cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
3220{
3221 struct inode *inode = file_inode(iocb->ki_filp);
3222 struct cifsInodeInfo *cinode = CIFS_I(inode);
3223 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3224 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3225 iocb->ki_filp->private_data;
3226 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3227 ssize_t written;
3228
3229 written = cifs_get_writer(cinode);
3230 if (written)
3231 return written;
3232
3233 if (CIFS_CACHE_WRITE(cinode)) {
3234 if (cap_unix(tcon->ses) &&
3235 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
3236 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
3237 written = generic_file_write_iter(iocb, from);
3238 goto out;
3239 }
3240 written = cifs_writev(iocb, from);
3241 goto out;
3242 }
3243 /*
3244 * For non-oplocked files in strict cache mode we need to write the data
3245 * to the server exactly from the pos to pos+len-1 rather than flush all
3246 * affected pages because it may cause a error with mandatory locks on
3247 * these pages but not on the region from pos to ppos+len-1.
3248 */
3249 written = cifs_user_writev(iocb, from);
David Brazdil0f672f62019-12-10 10:32:29 +00003250 if (CIFS_CACHE_READ(cinode)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003251 /*
David Brazdil0f672f62019-12-10 10:32:29 +00003252 * We have read level caching and we have just sent a write
3253 * request to the server thus making data in the cache stale.
3254 * Zap the cache and set oplock/lease level to NONE to avoid
3255 * reading stale data from the cache. All subsequent read
3256 * operations will read new data from the server.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003257 */
3258 cifs_zap_mapping(inode);
David Brazdil0f672f62019-12-10 10:32:29 +00003259 cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003260 inode);
3261 cinode->oplock = 0;
3262 }
3263out:
3264 cifs_put_writer(cinode);
3265 return written;
3266}
3267
3268static struct cifs_readdata *
3269cifs_readdata_direct_alloc(struct page **pages, work_func_t complete)
3270{
3271 struct cifs_readdata *rdata;
3272
3273 rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
3274 if (rdata != NULL) {
3275 rdata->pages = pages;
3276 kref_init(&rdata->refcount);
3277 INIT_LIST_HEAD(&rdata->list);
3278 init_completion(&rdata->done);
3279 INIT_WORK(&rdata->work, complete);
3280 }
3281
3282 return rdata;
3283}
3284
3285static struct cifs_readdata *
3286cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
3287{
3288 struct page **pages =
3289 kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
3290 struct cifs_readdata *ret = NULL;
3291
3292 if (pages) {
3293 ret = cifs_readdata_direct_alloc(pages, complete);
3294 if (!ret)
3295 kfree(pages);
3296 }
3297
3298 return ret;
3299}
3300
3301void
3302cifs_readdata_release(struct kref *refcount)
3303{
3304 struct cifs_readdata *rdata = container_of(refcount,
3305 struct cifs_readdata, refcount);
3306#ifdef CONFIG_CIFS_SMB_DIRECT
3307 if (rdata->mr) {
3308 smbd_deregister_mr(rdata->mr);
3309 rdata->mr = NULL;
3310 }
3311#endif
3312 if (rdata->cfile)
3313 cifsFileInfo_put(rdata->cfile);
3314
3315 kvfree(rdata->pages);
3316 kfree(rdata);
3317}
3318
3319static int
3320cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
3321{
3322 int rc = 0;
3323 struct page *page;
3324 unsigned int i;
3325
3326 for (i = 0; i < nr_pages; i++) {
3327 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3328 if (!page) {
3329 rc = -ENOMEM;
3330 break;
3331 }
3332 rdata->pages[i] = page;
3333 }
3334
3335 if (rc) {
David Brazdil0f672f62019-12-10 10:32:29 +00003336 unsigned int nr_page_failed = i;
3337
3338 for (i = 0; i < nr_page_failed; i++) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003339 put_page(rdata->pages[i]);
3340 rdata->pages[i] = NULL;
3341 }
3342 }
3343 return rc;
3344}
3345
3346static void
3347cifs_uncached_readdata_release(struct kref *refcount)
3348{
3349 struct cifs_readdata *rdata = container_of(refcount,
3350 struct cifs_readdata, refcount);
3351 unsigned int i;
3352
3353 kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release);
3354 for (i = 0; i < rdata->nr_pages; i++) {
3355 put_page(rdata->pages[i]);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003356 }
3357 cifs_readdata_release(refcount);
3358}
3359
3360/**
3361 * cifs_readdata_to_iov - copy data from pages in response to an iovec
3362 * @rdata: the readdata response with list of pages holding data
3363 * @iter: destination for our data
3364 *
3365 * This function copies data from a list of pages in a readdata response into
3366 * an array of iovecs. It will first calculate where the data should go
3367 * based on the info in the readdata and then copy the data into that spot.
3368 */
3369static int
3370cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
3371{
3372 size_t remaining = rdata->got_bytes;
3373 unsigned int i;
3374
3375 for (i = 0; i < rdata->nr_pages; i++) {
3376 struct page *page = rdata->pages[i];
3377 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
3378 size_t written;
3379
David Brazdil0f672f62019-12-10 10:32:29 +00003380 if (unlikely(iov_iter_is_pipe(iter))) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003381 void *addr = kmap_atomic(page);
3382
3383 written = copy_to_iter(addr, copy, iter);
3384 kunmap_atomic(addr);
3385 } else
3386 written = copy_page_to_iter(page, 0, copy, iter);
3387 remaining -= written;
3388 if (written < copy && iov_iter_count(iter) > 0)
3389 break;
3390 }
3391 return remaining ? -EFAULT : 0;
3392}
3393
3394static void collect_uncached_read_data(struct cifs_aio_ctx *ctx);
3395
3396static void
3397cifs_uncached_readv_complete(struct work_struct *work)
3398{
3399 struct cifs_readdata *rdata = container_of(work,
3400 struct cifs_readdata, work);
3401
3402 complete(&rdata->done);
3403 collect_uncached_read_data(rdata->ctx);
3404 /* the below call can possibly free the last ref to aio ctx */
3405 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3406}
3407
3408static int
3409uncached_fill_pages(struct TCP_Server_Info *server,
3410 struct cifs_readdata *rdata, struct iov_iter *iter,
3411 unsigned int len)
3412{
3413 int result = 0;
3414 unsigned int i;
3415 unsigned int nr_pages = rdata->nr_pages;
3416 unsigned int page_offset = rdata->page_offset;
3417
3418 rdata->got_bytes = 0;
3419 rdata->tailsz = PAGE_SIZE;
3420 for (i = 0; i < nr_pages; i++) {
3421 struct page *page = rdata->pages[i];
3422 size_t n;
3423 unsigned int segment_size = rdata->pagesz;
3424
3425 if (i == 0)
3426 segment_size -= page_offset;
3427 else
3428 page_offset = 0;
3429
3430
3431 if (len <= 0) {
3432 /* no need to hold page hostage */
3433 rdata->pages[i] = NULL;
3434 rdata->nr_pages--;
3435 put_page(page);
3436 continue;
3437 }
3438
3439 n = len;
3440 if (len >= segment_size)
3441 /* enough data to fill the page */
3442 n = segment_size;
3443 else
3444 rdata->tailsz = len;
3445 len -= n;
3446
3447 if (iter)
3448 result = copy_page_from_iter(
3449 page, page_offset, n, iter);
3450#ifdef CONFIG_CIFS_SMB_DIRECT
3451 else if (rdata->mr)
3452 result = n;
3453#endif
3454 else
3455 result = cifs_read_page_from_socket(
3456 server, page, page_offset, n);
3457 if (result < 0)
3458 break;
3459
3460 rdata->got_bytes += result;
3461 }
3462
3463 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3464 rdata->got_bytes : result;
3465}
3466
3467static int
3468cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
3469 struct cifs_readdata *rdata, unsigned int len)
3470{
3471 return uncached_fill_pages(server, rdata, NULL, len);
3472}
3473
3474static int
3475cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
3476 struct cifs_readdata *rdata,
3477 struct iov_iter *iter)
3478{
3479 return uncached_fill_pages(server, rdata, iter, iter->count);
3480}
3481
David Brazdil0f672f62019-12-10 10:32:29 +00003482static int cifs_resend_rdata(struct cifs_readdata *rdata,
3483 struct list_head *rdata_list,
3484 struct cifs_aio_ctx *ctx)
3485{
3486 unsigned int rsize;
3487 struct cifs_credits credits;
3488 int rc;
3489 struct TCP_Server_Info *server =
3490 tlink_tcon(rdata->cfile->tlink)->ses->server;
3491
3492 do {
3493 if (rdata->cfile->invalidHandle) {
3494 rc = cifs_reopen_file(rdata->cfile, true);
3495 if (rc == -EAGAIN)
3496 continue;
3497 else if (rc)
3498 break;
3499 }
3500
3501 /*
3502 * Wait for credits to resend this rdata.
3503 * Note: we are attempting to resend the whole rdata not in
3504 * segments
3505 */
3506 do {
3507 rc = server->ops->wait_mtu_credits(server, rdata->bytes,
3508 &rsize, &credits);
3509
3510 if (rc)
3511 goto fail;
3512
3513 if (rsize < rdata->bytes) {
3514 add_credits_and_wake_if(server, &credits, 0);
3515 msleep(1000);
3516 }
3517 } while (rsize < rdata->bytes);
3518 rdata->credits = credits;
3519
3520 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
3521 if (!rc) {
3522 if (rdata->cfile->invalidHandle)
3523 rc = -EAGAIN;
3524 else
3525 rc = server->ops->async_readv(rdata);
3526 }
3527
3528 /* If the read was successfully sent, we are done */
3529 if (!rc) {
3530 /* Add to aio pending list */
3531 list_add_tail(&rdata->list, rdata_list);
3532 return 0;
3533 }
3534
3535 /* Roll back credits and retry if needed */
3536 add_credits_and_wake_if(server, &rdata->credits, 0);
3537 } while (rc == -EAGAIN);
3538
3539fail:
3540 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3541 return rc;
3542}
3543
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003544static int
3545cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
3546 struct cifs_sb_info *cifs_sb, struct list_head *rdata_list,
3547 struct cifs_aio_ctx *ctx)
3548{
3549 struct cifs_readdata *rdata;
David Brazdil0f672f62019-12-10 10:32:29 +00003550 unsigned int npages, rsize;
3551 struct cifs_credits credits_on_stack;
3552 struct cifs_credits *credits = &credits_on_stack;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003553 size_t cur_len;
3554 int rc;
3555 pid_t pid;
3556 struct TCP_Server_Info *server;
David Brazdil0f672f62019-12-10 10:32:29 +00003557 struct page **pagevec;
3558 size_t start;
3559 struct iov_iter direct_iov = ctx->iter;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003560
3561 server = tlink_tcon(open_file->tlink)->ses->server;
3562
3563 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3564 pid = open_file->pid;
3565 else
3566 pid = current->tgid;
3567
David Brazdil0f672f62019-12-10 10:32:29 +00003568 if (ctx->direct_io)
3569 iov_iter_advance(&direct_iov, offset - ctx->pos);
3570
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003571 do {
David Brazdil0f672f62019-12-10 10:32:29 +00003572 if (open_file->invalidHandle) {
3573 rc = cifs_reopen_file(open_file, true);
3574 if (rc == -EAGAIN)
3575 continue;
3576 else if (rc)
3577 break;
3578 }
3579
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003580 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
David Brazdil0f672f62019-12-10 10:32:29 +00003581 &rsize, credits);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003582 if (rc)
3583 break;
3584
3585 cur_len = min_t(const size_t, len, rsize);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003586
David Brazdil0f672f62019-12-10 10:32:29 +00003587 if (ctx->direct_io) {
3588 ssize_t result;
3589
3590 result = iov_iter_get_pages_alloc(
3591 &direct_iov, &pagevec,
3592 cur_len, &start);
3593 if (result < 0) {
3594 cifs_dbg(VFS,
3595 "couldn't get user pages (rc=%zd)"
3596 " iter type %d"
3597 " iov_offset %zd count %zd\n",
3598 result, direct_iov.type,
3599 direct_iov.iov_offset,
3600 direct_iov.count);
3601 dump_stack();
3602
3603 rc = result;
3604 add_credits_and_wake_if(server, credits, 0);
3605 break;
3606 }
3607 cur_len = (size_t)result;
3608 iov_iter_advance(&direct_iov, cur_len);
3609
3610 rdata = cifs_readdata_direct_alloc(
3611 pagevec, cifs_uncached_readv_complete);
3612 if (!rdata) {
3613 add_credits_and_wake_if(server, credits, 0);
3614 rc = -ENOMEM;
3615 break;
3616 }
3617
3618 npages = (cur_len + start + PAGE_SIZE-1) / PAGE_SIZE;
3619 rdata->page_offset = start;
3620 rdata->tailsz = npages > 1 ?
3621 cur_len-(PAGE_SIZE-start)-(npages-2)*PAGE_SIZE :
3622 cur_len;
3623
3624 } else {
3625
3626 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
3627 /* allocate a readdata struct */
3628 rdata = cifs_readdata_alloc(npages,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003629 cifs_uncached_readv_complete);
David Brazdil0f672f62019-12-10 10:32:29 +00003630 if (!rdata) {
3631 add_credits_and_wake_if(server, credits, 0);
3632 rc = -ENOMEM;
3633 break;
3634 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003635
David Brazdil0f672f62019-12-10 10:32:29 +00003636 rc = cifs_read_allocate_pages(rdata, npages);
3637 if (rc) {
3638 kvfree(rdata->pages);
3639 kfree(rdata);
3640 add_credits_and_wake_if(server, credits, 0);
3641 break;
3642 }
3643
3644 rdata->tailsz = PAGE_SIZE;
3645 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003646
3647 rdata->cfile = cifsFileInfo_get(open_file);
3648 rdata->nr_pages = npages;
3649 rdata->offset = offset;
3650 rdata->bytes = cur_len;
3651 rdata->pid = pid;
3652 rdata->pagesz = PAGE_SIZE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003653 rdata->read_into_pages = cifs_uncached_read_into_pages;
3654 rdata->copy_into_pages = cifs_uncached_copy_into_pages;
David Brazdil0f672f62019-12-10 10:32:29 +00003655 rdata->credits = credits_on_stack;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003656 rdata->ctx = ctx;
3657 kref_get(&ctx->refcount);
3658
David Brazdil0f672f62019-12-10 10:32:29 +00003659 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
3660
3661 if (!rc) {
3662 if (rdata->cfile->invalidHandle)
3663 rc = -EAGAIN;
3664 else
3665 rc = server->ops->async_readv(rdata);
3666 }
3667
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003668 if (rc) {
David Brazdil0f672f62019-12-10 10:32:29 +00003669 add_credits_and_wake_if(server, &rdata->credits, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003670 kref_put(&rdata->refcount,
David Brazdil0f672f62019-12-10 10:32:29 +00003671 cifs_uncached_readdata_release);
3672 if (rc == -EAGAIN) {
3673 iov_iter_revert(&direct_iov, cur_len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003674 continue;
David Brazdil0f672f62019-12-10 10:32:29 +00003675 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003676 break;
3677 }
3678
3679 list_add_tail(&rdata->list, rdata_list);
3680 offset += cur_len;
3681 len -= cur_len;
3682 } while (len > 0);
3683
3684 return rc;
3685}
3686
3687static void
3688collect_uncached_read_data(struct cifs_aio_ctx *ctx)
3689{
3690 struct cifs_readdata *rdata, *tmp;
3691 struct iov_iter *to = &ctx->iter;
3692 struct cifs_sb_info *cifs_sb;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003693 int rc;
3694
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003695 cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb);
3696
3697 mutex_lock(&ctx->aio_mutex);
3698
3699 if (list_empty(&ctx->list)) {
3700 mutex_unlock(&ctx->aio_mutex);
3701 return;
3702 }
3703
3704 rc = ctx->rc;
3705 /* the loop below should proceed in the order of increasing offsets */
3706again:
3707 list_for_each_entry_safe(rdata, tmp, &ctx->list, list) {
3708 if (!rc) {
3709 if (!try_wait_for_completion(&rdata->done)) {
3710 mutex_unlock(&ctx->aio_mutex);
3711 return;
3712 }
3713
3714 if (rdata->result == -EAGAIN) {
3715 /* resend call if it's a retryable error */
3716 struct list_head tmp_list;
3717 unsigned int got_bytes = rdata->got_bytes;
3718
3719 list_del_init(&rdata->list);
3720 INIT_LIST_HEAD(&tmp_list);
3721
3722 /*
3723 * Got a part of data and then reconnect has
3724 * happened -- fill the buffer and continue
3725 * reading.
3726 */
3727 if (got_bytes && got_bytes < rdata->bytes) {
David Brazdil0f672f62019-12-10 10:32:29 +00003728 rc = 0;
3729 if (!ctx->direct_io)
3730 rc = cifs_readdata_to_iov(rdata, to);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003731 if (rc) {
3732 kref_put(&rdata->refcount,
David Brazdil0f672f62019-12-10 10:32:29 +00003733 cifs_uncached_readdata_release);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003734 continue;
3735 }
3736 }
3737
David Brazdil0f672f62019-12-10 10:32:29 +00003738 if (ctx->direct_io) {
3739 /*
3740 * Re-use rdata as this is a
3741 * direct I/O
3742 */
3743 rc = cifs_resend_rdata(
3744 rdata,
3745 &tmp_list, ctx);
3746 } else {
3747 rc = cifs_send_async_read(
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003748 rdata->offset + got_bytes,
3749 rdata->bytes - got_bytes,
3750 rdata->cfile, cifs_sb,
3751 &tmp_list, ctx);
3752
David Brazdil0f672f62019-12-10 10:32:29 +00003753 kref_put(&rdata->refcount,
3754 cifs_uncached_readdata_release);
3755 }
3756
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003757 list_splice(&tmp_list, &ctx->list);
3758
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003759 goto again;
3760 } else if (rdata->result)
3761 rc = rdata->result;
David Brazdil0f672f62019-12-10 10:32:29 +00003762 else if (!ctx->direct_io)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003763 rc = cifs_readdata_to_iov(rdata, to);
3764
3765 /* if there was a short read -- discard anything left */
3766 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3767 rc = -ENODATA;
David Brazdil0f672f62019-12-10 10:32:29 +00003768
3769 ctx->total_len += rdata->got_bytes;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003770 }
3771 list_del_init(&rdata->list);
3772 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3773 }
3774
David Brazdil0f672f62019-12-10 10:32:29 +00003775 if (!ctx->direct_io)
3776 ctx->total_len = ctx->len - iov_iter_count(to);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003777
3778 /* mask nodata case */
3779 if (rc == -ENODATA)
3780 rc = 0;
3781
Olivier Deprez0e641232021-09-23 10:07:05 +02003782 ctx->rc = (rc == 0) ? (ssize_t)ctx->total_len : rc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003783
3784 mutex_unlock(&ctx->aio_mutex);
3785
3786 if (ctx->iocb && ctx->iocb->ki_complete)
3787 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
3788 else
3789 complete(&ctx->done);
3790}
3791
David Brazdil0f672f62019-12-10 10:32:29 +00003792static ssize_t __cifs_readv(
3793 struct kiocb *iocb, struct iov_iter *to, bool direct)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003794{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003795 size_t len;
David Brazdil0f672f62019-12-10 10:32:29 +00003796 struct file *file = iocb->ki_filp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003797 struct cifs_sb_info *cifs_sb;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003798 struct cifsFileInfo *cfile;
David Brazdil0f672f62019-12-10 10:32:29 +00003799 struct cifs_tcon *tcon;
3800 ssize_t rc, total_read = 0;
3801 loff_t offset = iocb->ki_pos;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003802 struct cifs_aio_ctx *ctx;
3803
David Brazdil0f672f62019-12-10 10:32:29 +00003804 /*
3805 * iov_iter_get_pages_alloc() doesn't work with ITER_KVEC,
3806 * fall back to data copy read path
3807 * this could be improved by getting pages directly in ITER_KVEC
3808 */
3809 if (direct && to->type & ITER_KVEC) {
3810 cifs_dbg(FYI, "use non-direct cifs_user_readv for kvec I/O\n");
3811 direct = false;
3812 }
3813
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003814 len = iov_iter_count(to);
3815 if (!len)
3816 return 0;
3817
3818 cifs_sb = CIFS_FILE_SB(file);
3819 cfile = file->private_data;
3820 tcon = tlink_tcon(cfile->tlink);
3821
3822 if (!tcon->ses->server->ops->async_readv)
3823 return -ENOSYS;
3824
3825 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3826 cifs_dbg(FYI, "attempting read on write only file instance\n");
3827
3828 ctx = cifs_aio_ctx_alloc();
3829 if (!ctx)
3830 return -ENOMEM;
3831
3832 ctx->cfile = cifsFileInfo_get(cfile);
3833
3834 if (!is_sync_kiocb(iocb))
3835 ctx->iocb = iocb;
3836
David Brazdil0f672f62019-12-10 10:32:29 +00003837 if (iter_is_iovec(to))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003838 ctx->should_dirty = true;
3839
David Brazdil0f672f62019-12-10 10:32:29 +00003840 if (direct) {
3841 ctx->pos = offset;
3842 ctx->direct_io = true;
3843 ctx->iter = *to;
3844 ctx->len = len;
3845 } else {
3846 rc = setup_aio_ctx_iter(ctx, to, READ);
3847 if (rc) {
3848 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3849 return rc;
3850 }
3851 len = ctx->len;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003852 }
3853
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003854 /* grab a lock here due to read response handlers can access ctx */
3855 mutex_lock(&ctx->aio_mutex);
3856
3857 rc = cifs_send_async_read(offset, len, cfile, cifs_sb, &ctx->list, ctx);
3858
3859 /* if at least one read request send succeeded, then reset rc */
3860 if (!list_empty(&ctx->list))
3861 rc = 0;
3862
3863 mutex_unlock(&ctx->aio_mutex);
3864
3865 if (rc) {
3866 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3867 return rc;
3868 }
3869
3870 if (!is_sync_kiocb(iocb)) {
3871 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3872 return -EIOCBQUEUED;
3873 }
3874
3875 rc = wait_for_completion_killable(&ctx->done);
3876 if (rc) {
3877 mutex_lock(&ctx->aio_mutex);
3878 ctx->rc = rc = -EINTR;
3879 total_read = ctx->total_len;
3880 mutex_unlock(&ctx->aio_mutex);
3881 } else {
3882 rc = ctx->rc;
3883 total_read = ctx->total_len;
3884 }
3885
3886 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3887
3888 if (total_read) {
3889 iocb->ki_pos += total_read;
3890 return total_read;
3891 }
3892 return rc;
3893}
3894
David Brazdil0f672f62019-12-10 10:32:29 +00003895ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to)
3896{
3897 return __cifs_readv(iocb, to, true);
3898}
3899
3900ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
3901{
3902 return __cifs_readv(iocb, to, false);
3903}
3904
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003905ssize_t
3906cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
3907{
3908 struct inode *inode = file_inode(iocb->ki_filp);
3909 struct cifsInodeInfo *cinode = CIFS_I(inode);
3910 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3911 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3912 iocb->ki_filp->private_data;
3913 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3914 int rc = -EACCES;
3915
3916 /*
3917 * In strict cache mode we need to read from the server all the time
3918 * if we don't have level II oplock because the server can delay mtime
3919 * change - so we can't make a decision about inode invalidating.
3920 * And we can also fail with pagereading if there are mandatory locks
3921 * on pages affected by this read but not on the region from pos to
3922 * pos+len-1.
3923 */
3924 if (!CIFS_CACHE_READ(cinode))
3925 return cifs_user_readv(iocb, to);
3926
3927 if (cap_unix(tcon->ses) &&
3928 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
3929 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
3930 return generic_file_read_iter(iocb, to);
3931
3932 /*
3933 * We need to hold the sem to be sure nobody modifies lock list
3934 * with a brlock that prevents reading.
3935 */
3936 down_read(&cinode->lock_sem);
3937 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
3938 tcon->ses->server->vals->shared_lock_type,
David Brazdil0f672f62019-12-10 10:32:29 +00003939 0, NULL, CIFS_READ_OP))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003940 rc = generic_file_read_iter(iocb, to);
3941 up_read(&cinode->lock_sem);
3942 return rc;
3943}
3944
3945static ssize_t
3946cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
3947{
3948 int rc = -EACCES;
3949 unsigned int bytes_read = 0;
3950 unsigned int total_read;
3951 unsigned int current_read_size;
3952 unsigned int rsize;
3953 struct cifs_sb_info *cifs_sb;
3954 struct cifs_tcon *tcon;
3955 struct TCP_Server_Info *server;
3956 unsigned int xid;
3957 char *cur_offset;
3958 struct cifsFileInfo *open_file;
3959 struct cifs_io_parms io_parms;
3960 int buf_type = CIFS_NO_BUFFER;
3961 __u32 pid;
3962
3963 xid = get_xid();
3964 cifs_sb = CIFS_FILE_SB(file);
3965
3966 /* FIXME: set up handlers for larger reads and/or convert to async */
3967 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
3968
3969 if (file->private_data == NULL) {
3970 rc = -EBADF;
3971 free_xid(xid);
3972 return rc;
3973 }
3974 open_file = file->private_data;
3975 tcon = tlink_tcon(open_file->tlink);
3976 server = tcon->ses->server;
3977
3978 if (!server->ops->sync_read) {
3979 free_xid(xid);
3980 return -ENOSYS;
3981 }
3982
3983 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3984 pid = open_file->pid;
3985 else
3986 pid = current->tgid;
3987
3988 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3989 cifs_dbg(FYI, "attempting read on write only file instance\n");
3990
3991 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3992 total_read += bytes_read, cur_offset += bytes_read) {
3993 do {
3994 current_read_size = min_t(uint, read_size - total_read,
3995 rsize);
3996 /*
3997 * For windows me and 9x we do not want to request more
3998 * than it negotiated since it will refuse the read
3999 * then.
4000 */
Olivier Deprez0e641232021-09-23 10:07:05 +02004001 if (!(tcon->ses->capabilities &
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004002 tcon->ses->server->vals->cap_large_files)) {
4003 current_read_size = min_t(uint,
4004 current_read_size, CIFSMaxBufSize);
4005 }
4006 if (open_file->invalidHandle) {
4007 rc = cifs_reopen_file(open_file, true);
4008 if (rc != 0)
4009 break;
4010 }
4011 io_parms.pid = pid;
4012 io_parms.tcon = tcon;
4013 io_parms.offset = *offset;
4014 io_parms.length = current_read_size;
4015 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
4016 &bytes_read, &cur_offset,
4017 &buf_type);
4018 } while (rc == -EAGAIN);
4019
4020 if (rc || (bytes_read == 0)) {
4021 if (total_read) {
4022 break;
4023 } else {
4024 free_xid(xid);
4025 return rc;
4026 }
4027 } else {
4028 cifs_stats_bytes_read(tcon, total_read);
4029 *offset += bytes_read;
4030 }
4031 }
4032 free_xid(xid);
4033 return total_read;
4034}
4035
4036/*
4037 * If the page is mmap'ed into a process' page tables, then we need to make
4038 * sure that it doesn't change while being written back.
4039 */
4040static vm_fault_t
4041cifs_page_mkwrite(struct vm_fault *vmf)
4042{
4043 struct page *page = vmf->page;
4044
4045 lock_page(page);
4046 return VM_FAULT_LOCKED;
4047}
4048
4049static const struct vm_operations_struct cifs_file_vm_ops = {
4050 .fault = filemap_fault,
4051 .map_pages = filemap_map_pages,
4052 .page_mkwrite = cifs_page_mkwrite,
4053};
4054
4055int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
4056{
4057 int xid, rc = 0;
4058 struct inode *inode = file_inode(file);
4059
4060 xid = get_xid();
4061
4062 if (!CIFS_CACHE_READ(CIFS_I(inode)))
4063 rc = cifs_zap_mapping(inode);
4064 if (!rc)
4065 rc = generic_file_mmap(file, vma);
4066 if (!rc)
4067 vma->vm_ops = &cifs_file_vm_ops;
4068
4069 free_xid(xid);
4070 return rc;
4071}
4072
4073int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
4074{
4075 int rc, xid;
4076
4077 xid = get_xid();
4078
4079 rc = cifs_revalidate_file(file);
4080 if (rc)
4081 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
4082 rc);
4083 if (!rc)
4084 rc = generic_file_mmap(file, vma);
4085 if (!rc)
4086 vma->vm_ops = &cifs_file_vm_ops;
4087
4088 free_xid(xid);
4089 return rc;
4090}
4091
4092static void
4093cifs_readv_complete(struct work_struct *work)
4094{
4095 unsigned int i, got_bytes;
4096 struct cifs_readdata *rdata = container_of(work,
4097 struct cifs_readdata, work);
4098
4099 got_bytes = rdata->got_bytes;
4100 for (i = 0; i < rdata->nr_pages; i++) {
4101 struct page *page = rdata->pages[i];
4102
4103 lru_cache_add_file(page);
4104
4105 if (rdata->result == 0 ||
4106 (rdata->result == -EAGAIN && got_bytes)) {
4107 flush_dcache_page(page);
4108 SetPageUptodate(page);
4109 }
4110
4111 unlock_page(page);
4112
4113 if (rdata->result == 0 ||
4114 (rdata->result == -EAGAIN && got_bytes))
4115 cifs_readpage_to_fscache(rdata->mapping->host, page);
4116
4117 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
4118
4119 put_page(page);
4120 rdata->pages[i] = NULL;
4121 }
4122 kref_put(&rdata->refcount, cifs_readdata_release);
4123}
4124
4125static int
4126readpages_fill_pages(struct TCP_Server_Info *server,
4127 struct cifs_readdata *rdata, struct iov_iter *iter,
4128 unsigned int len)
4129{
4130 int result = 0;
4131 unsigned int i;
4132 u64 eof;
4133 pgoff_t eof_index;
4134 unsigned int nr_pages = rdata->nr_pages;
4135 unsigned int page_offset = rdata->page_offset;
4136
4137 /* determine the eof that the server (probably) has */
4138 eof = CIFS_I(rdata->mapping->host)->server_eof;
4139 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
4140 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
4141
4142 rdata->got_bytes = 0;
4143 rdata->tailsz = PAGE_SIZE;
4144 for (i = 0; i < nr_pages; i++) {
4145 struct page *page = rdata->pages[i];
4146 unsigned int to_read = rdata->pagesz;
4147 size_t n;
4148
4149 if (i == 0)
4150 to_read -= page_offset;
4151 else
4152 page_offset = 0;
4153
4154 n = to_read;
4155
4156 if (len >= to_read) {
4157 len -= to_read;
4158 } else if (len > 0) {
4159 /* enough for partial page, fill and zero the rest */
4160 zero_user(page, len + page_offset, to_read - len);
4161 n = rdata->tailsz = len;
4162 len = 0;
4163 } else if (page->index > eof_index) {
4164 /*
4165 * The VFS will not try to do readahead past the
4166 * i_size, but it's possible that we have outstanding
4167 * writes with gaps in the middle and the i_size hasn't
4168 * caught up yet. Populate those with zeroed out pages
4169 * to prevent the VFS from repeatedly attempting to
4170 * fill them until the writes are flushed.
4171 */
4172 zero_user(page, 0, PAGE_SIZE);
4173 lru_cache_add_file(page);
4174 flush_dcache_page(page);
4175 SetPageUptodate(page);
4176 unlock_page(page);
4177 put_page(page);
4178 rdata->pages[i] = NULL;
4179 rdata->nr_pages--;
4180 continue;
4181 } else {
4182 /* no need to hold page hostage */
4183 lru_cache_add_file(page);
4184 unlock_page(page);
4185 put_page(page);
4186 rdata->pages[i] = NULL;
4187 rdata->nr_pages--;
4188 continue;
4189 }
4190
4191 if (iter)
4192 result = copy_page_from_iter(
4193 page, page_offset, n, iter);
4194#ifdef CONFIG_CIFS_SMB_DIRECT
4195 else if (rdata->mr)
4196 result = n;
4197#endif
4198 else
4199 result = cifs_read_page_from_socket(
4200 server, page, page_offset, n);
4201 if (result < 0)
4202 break;
4203
4204 rdata->got_bytes += result;
4205 }
4206
4207 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
4208 rdata->got_bytes : result;
4209}
4210
4211static int
4212cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
4213 struct cifs_readdata *rdata, unsigned int len)
4214{
4215 return readpages_fill_pages(server, rdata, NULL, len);
4216}
4217
4218static int
4219cifs_readpages_copy_into_pages(struct TCP_Server_Info *server,
4220 struct cifs_readdata *rdata,
4221 struct iov_iter *iter)
4222{
4223 return readpages_fill_pages(server, rdata, iter, iter->count);
4224}
4225
4226static int
4227readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
4228 unsigned int rsize, struct list_head *tmplist,
4229 unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
4230{
4231 struct page *page, *tpage;
4232 unsigned int expected_index;
4233 int rc;
4234 gfp_t gfp = readahead_gfp_mask(mapping);
4235
4236 INIT_LIST_HEAD(tmplist);
4237
David Brazdil0f672f62019-12-10 10:32:29 +00004238 page = lru_to_page(page_list);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004239
4240 /*
4241 * Lock the page and put it in the cache. Since no one else
4242 * should have access to this page, we're safe to simply set
4243 * PG_locked without checking it first.
4244 */
4245 __SetPageLocked(page);
4246 rc = add_to_page_cache_locked(page, mapping,
4247 page->index, gfp);
4248
4249 /* give up if we can't stick it in the cache */
4250 if (rc) {
4251 __ClearPageLocked(page);
4252 return rc;
4253 }
4254
4255 /* move first page to the tmplist */
4256 *offset = (loff_t)page->index << PAGE_SHIFT;
4257 *bytes = PAGE_SIZE;
4258 *nr_pages = 1;
4259 list_move_tail(&page->lru, tmplist);
4260
4261 /* now try and add more pages onto the request */
4262 expected_index = page->index + 1;
4263 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
4264 /* discontinuity ? */
4265 if (page->index != expected_index)
4266 break;
4267
4268 /* would this page push the read over the rsize? */
4269 if (*bytes + PAGE_SIZE > rsize)
4270 break;
4271
4272 __SetPageLocked(page);
Olivier Deprez0e641232021-09-23 10:07:05 +02004273 rc = add_to_page_cache_locked(page, mapping, page->index, gfp);
4274 if (rc) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004275 __ClearPageLocked(page);
4276 break;
4277 }
4278 list_move_tail(&page->lru, tmplist);
4279 (*bytes) += PAGE_SIZE;
4280 expected_index++;
4281 (*nr_pages)++;
4282 }
4283 return rc;
4284}
4285
4286static int cifs_readpages(struct file *file, struct address_space *mapping,
4287 struct list_head *page_list, unsigned num_pages)
4288{
4289 int rc;
Olivier Deprez0e641232021-09-23 10:07:05 +02004290 int err = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004291 struct list_head tmplist;
4292 struct cifsFileInfo *open_file = file->private_data;
4293 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
4294 struct TCP_Server_Info *server;
4295 pid_t pid;
David Brazdil0f672f62019-12-10 10:32:29 +00004296 unsigned int xid;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004297
David Brazdil0f672f62019-12-10 10:32:29 +00004298 xid = get_xid();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004299 /*
4300 * Reads as many pages as possible from fscache. Returns -ENOBUFS
4301 * immediately if the cookie is negative
4302 *
4303 * After this point, every page in the list might have PG_fscache set,
4304 * so we will need to clean that up off of every page we don't use.
4305 */
4306 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
4307 &num_pages);
David Brazdil0f672f62019-12-10 10:32:29 +00004308 if (rc == 0) {
4309 free_xid(xid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004310 return rc;
David Brazdil0f672f62019-12-10 10:32:29 +00004311 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004312
4313 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
4314 pid = open_file->pid;
4315 else
4316 pid = current->tgid;
4317
4318 rc = 0;
4319 server = tlink_tcon(open_file->tlink)->ses->server;
4320
4321 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
4322 __func__, file, mapping, num_pages);
4323
4324 /*
4325 * Start with the page at end of list and move it to private
4326 * list. Do the same with any following pages until we hit
4327 * the rsize limit, hit an index discontinuity, or run out of
4328 * pages. Issue the async read and then start the loop again
4329 * until the list is empty.
4330 *
4331 * Note that list order is important. The page_list is in
4332 * the order of declining indexes. When we put the pages in
4333 * the rdata->pages, then we want them in increasing order.
4334 */
Olivier Deprez0e641232021-09-23 10:07:05 +02004335 while (!list_empty(page_list) && !err) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004336 unsigned int i, nr_pages, bytes, rsize;
4337 loff_t offset;
4338 struct page *page, *tpage;
4339 struct cifs_readdata *rdata;
David Brazdil0f672f62019-12-10 10:32:29 +00004340 struct cifs_credits credits_on_stack;
4341 struct cifs_credits *credits = &credits_on_stack;
4342
4343 if (open_file->invalidHandle) {
4344 rc = cifs_reopen_file(open_file, true);
4345 if (rc == -EAGAIN)
4346 continue;
4347 else if (rc)
4348 break;
4349 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004350
4351 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
David Brazdil0f672f62019-12-10 10:32:29 +00004352 &rsize, credits);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004353 if (rc)
4354 break;
4355
4356 /*
4357 * Give up immediately if rsize is too small to read an entire
4358 * page. The VFS will fall back to readpage. We should never
4359 * reach this point however since we set ra_pages to 0 when the
4360 * rsize is smaller than a cache page.
4361 */
4362 if (unlikely(rsize < PAGE_SIZE)) {
4363 add_credits_and_wake_if(server, credits, 0);
David Brazdil0f672f62019-12-10 10:32:29 +00004364 free_xid(xid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004365 return 0;
4366 }
4367
Olivier Deprez0e641232021-09-23 10:07:05 +02004368 nr_pages = 0;
4369 err = readpages_get_pages(mapping, page_list, rsize, &tmplist,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004370 &nr_pages, &offset, &bytes);
Olivier Deprez0e641232021-09-23 10:07:05 +02004371 if (!nr_pages) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004372 add_credits_and_wake_if(server, credits, 0);
4373 break;
4374 }
4375
4376 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
4377 if (!rdata) {
4378 /* best to give up if we're out of mem */
4379 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
4380 list_del(&page->lru);
4381 lru_cache_add_file(page);
4382 unlock_page(page);
4383 put_page(page);
4384 }
4385 rc = -ENOMEM;
4386 add_credits_and_wake_if(server, credits, 0);
4387 break;
4388 }
4389
4390 rdata->cfile = cifsFileInfo_get(open_file);
4391 rdata->mapping = mapping;
4392 rdata->offset = offset;
4393 rdata->bytes = bytes;
4394 rdata->pid = pid;
4395 rdata->pagesz = PAGE_SIZE;
4396 rdata->tailsz = PAGE_SIZE;
4397 rdata->read_into_pages = cifs_readpages_read_into_pages;
4398 rdata->copy_into_pages = cifs_readpages_copy_into_pages;
David Brazdil0f672f62019-12-10 10:32:29 +00004399 rdata->credits = credits_on_stack;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004400
4401 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
4402 list_del(&page->lru);
4403 rdata->pages[rdata->nr_pages++] = page;
4404 }
4405
David Brazdil0f672f62019-12-10 10:32:29 +00004406 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
4407
4408 if (!rc) {
4409 if (rdata->cfile->invalidHandle)
4410 rc = -EAGAIN;
4411 else
4412 rc = server->ops->async_readv(rdata);
4413 }
4414
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004415 if (rc) {
David Brazdil0f672f62019-12-10 10:32:29 +00004416 add_credits_and_wake_if(server, &rdata->credits, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004417 for (i = 0; i < rdata->nr_pages; i++) {
4418 page = rdata->pages[i];
4419 lru_cache_add_file(page);
4420 unlock_page(page);
4421 put_page(page);
4422 }
4423 /* Fallback to the readpage in error/reconnect cases */
4424 kref_put(&rdata->refcount, cifs_readdata_release);
4425 break;
4426 }
4427
4428 kref_put(&rdata->refcount, cifs_readdata_release);
4429 }
4430
4431 /* Any pages that have been shown to fscache but didn't get added to
4432 * the pagecache must be uncached before they get returned to the
4433 * allocator.
4434 */
4435 cifs_fscache_readpages_cancel(mapping->host, page_list);
David Brazdil0f672f62019-12-10 10:32:29 +00004436 free_xid(xid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004437 return rc;
4438}
4439
4440/*
4441 * cifs_readpage_worker must be called with the page pinned
4442 */
4443static int cifs_readpage_worker(struct file *file, struct page *page,
4444 loff_t *poffset)
4445{
4446 char *read_data;
4447 int rc;
4448
4449 /* Is the page cached? */
4450 rc = cifs_readpage_from_fscache(file_inode(file), page);
4451 if (rc == 0)
4452 goto read_complete;
4453
4454 read_data = kmap(page);
4455 /* for reads over a certain size could initiate async read ahead */
4456
4457 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
4458
4459 if (rc < 0)
4460 goto io_error;
4461 else
4462 cifs_dbg(FYI, "Bytes read %d\n", rc);
4463
David Brazdil0f672f62019-12-10 10:32:29 +00004464 /* we do not want atime to be less than mtime, it broke some apps */
4465 file_inode(file)->i_atime = current_time(file_inode(file));
4466 if (timespec64_compare(&(file_inode(file)->i_atime), &(file_inode(file)->i_mtime)))
4467 file_inode(file)->i_atime = file_inode(file)->i_mtime;
4468 else
4469 file_inode(file)->i_atime = current_time(file_inode(file));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004470
4471 if (PAGE_SIZE > rc)
4472 memset(read_data + rc, 0, PAGE_SIZE - rc);
4473
4474 flush_dcache_page(page);
4475 SetPageUptodate(page);
4476
4477 /* send this page to the cache */
4478 cifs_readpage_to_fscache(file_inode(file), page);
4479
4480 rc = 0;
4481
4482io_error:
4483 kunmap(page);
4484 unlock_page(page);
4485
4486read_complete:
4487 return rc;
4488}
4489
4490static int cifs_readpage(struct file *file, struct page *page)
4491{
4492 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
4493 int rc = -EACCES;
4494 unsigned int xid;
4495
4496 xid = get_xid();
4497
4498 if (file->private_data == NULL) {
4499 rc = -EBADF;
4500 free_xid(xid);
4501 return rc;
4502 }
4503
4504 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
4505 page, (int)offset, (int)offset);
4506
4507 rc = cifs_readpage_worker(file, page, &offset);
4508
4509 free_xid(xid);
4510 return rc;
4511}
4512
4513static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
4514{
4515 struct cifsFileInfo *open_file;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004516
David Brazdil0f672f62019-12-10 10:32:29 +00004517 spin_lock(&cifs_inode->open_file_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004518 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
4519 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
David Brazdil0f672f62019-12-10 10:32:29 +00004520 spin_unlock(&cifs_inode->open_file_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004521 return 1;
4522 }
4523 }
David Brazdil0f672f62019-12-10 10:32:29 +00004524 spin_unlock(&cifs_inode->open_file_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004525 return 0;
4526}
4527
4528/* We do not want to update the file size from server for inodes
4529 open for write - to avoid races with writepage extending
4530 the file - in the future we could consider allowing
4531 refreshing the inode only on increases in the file size
4532 but this is tricky to do without racing with writebehind
4533 page caching in the current Linux kernel design */
4534bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
4535{
4536 if (!cifsInode)
4537 return true;
4538
4539 if (is_inode_writable(cifsInode)) {
4540 /* This inode is open for write at least once */
4541 struct cifs_sb_info *cifs_sb;
4542
4543 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
4544 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
4545 /* since no page cache to corrupt on directio
4546 we can change size safely */
4547 return true;
4548 }
4549
4550 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
4551 return true;
4552
4553 return false;
4554 } else
4555 return true;
4556}
4557
4558static int cifs_write_begin(struct file *file, struct address_space *mapping,
4559 loff_t pos, unsigned len, unsigned flags,
4560 struct page **pagep, void **fsdata)
4561{
4562 int oncethru = 0;
4563 pgoff_t index = pos >> PAGE_SHIFT;
4564 loff_t offset = pos & (PAGE_SIZE - 1);
4565 loff_t page_start = pos & PAGE_MASK;
4566 loff_t i_size;
4567 struct page *page;
4568 int rc = 0;
4569
4570 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
4571
4572start:
4573 page = grab_cache_page_write_begin(mapping, index, flags);
4574 if (!page) {
4575 rc = -ENOMEM;
4576 goto out;
4577 }
4578
4579 if (PageUptodate(page))
4580 goto out;
4581
4582 /*
4583 * If we write a full page it will be up to date, no need to read from
4584 * the server. If the write is short, we'll end up doing a sync write
4585 * instead.
4586 */
4587 if (len == PAGE_SIZE)
4588 goto out;
4589
4590 /*
4591 * optimize away the read when we have an oplock, and we're not
4592 * expecting to use any of the data we'd be reading in. That
4593 * is, when the page lies beyond the EOF, or straddles the EOF
4594 * and the write will cover all of the existing data.
4595 */
4596 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
4597 i_size = i_size_read(mapping->host);
4598 if (page_start >= i_size ||
4599 (offset == 0 && (pos + len) >= i_size)) {
4600 zero_user_segments(page, 0, offset,
4601 offset + len,
4602 PAGE_SIZE);
4603 /*
4604 * PageChecked means that the parts of the page
4605 * to which we're not writing are considered up
4606 * to date. Once the data is copied to the
4607 * page, it can be set uptodate.
4608 */
4609 SetPageChecked(page);
4610 goto out;
4611 }
4612 }
4613
4614 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
4615 /*
4616 * might as well read a page, it is fast enough. If we get
4617 * an error, we don't need to return it. cifs_write_end will
4618 * do a sync write instead since PG_uptodate isn't set.
4619 */
4620 cifs_readpage_worker(file, page, &page_start);
4621 put_page(page);
4622 oncethru = 1;
4623 goto start;
4624 } else {
4625 /* we could try using another file handle if there is one -
4626 but how would we lock it to prevent close of that handle
4627 racing with this read? In any case
4628 this will be written out by write_end so is fine */
4629 }
4630out:
4631 *pagep = page;
4632 return rc;
4633}
4634
4635static int cifs_release_page(struct page *page, gfp_t gfp)
4636{
4637 if (PagePrivate(page))
4638 return 0;
4639
4640 return cifs_fscache_release_page(page, gfp);
4641}
4642
4643static void cifs_invalidate_page(struct page *page, unsigned int offset,
4644 unsigned int length)
4645{
4646 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
4647
4648 if (offset == 0 && length == PAGE_SIZE)
4649 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
4650}
4651
4652static int cifs_launder_page(struct page *page)
4653{
4654 int rc = 0;
4655 loff_t range_start = page_offset(page);
4656 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
4657 struct writeback_control wbc = {
4658 .sync_mode = WB_SYNC_ALL,
4659 .nr_to_write = 0,
4660 .range_start = range_start,
4661 .range_end = range_end,
4662 };
4663
4664 cifs_dbg(FYI, "Launder page: %p\n", page);
4665
4666 if (clear_page_dirty_for_io(page))
4667 rc = cifs_writepage_locked(page, &wbc);
4668
4669 cifs_fscache_invalidate_page(page, page->mapping->host);
4670 return rc;
4671}
4672
4673void cifs_oplock_break(struct work_struct *work)
4674{
4675 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
4676 oplock_break);
4677 struct inode *inode = d_inode(cfile->dentry);
4678 struct cifsInodeInfo *cinode = CIFS_I(inode);
4679 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
4680 struct TCP_Server_Info *server = tcon->ses->server;
4681 int rc = 0;
Olivier Deprez0e641232021-09-23 10:07:05 +02004682 bool purge_cache = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004683
4684 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
4685 TASK_UNINTERRUPTIBLE);
4686
Olivier Deprez0e641232021-09-23 10:07:05 +02004687 server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
4688 cfile->oplock_epoch, &purge_cache);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004689
4690 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
4691 cifs_has_mand_locks(cinode)) {
4692 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
4693 inode);
4694 cinode->oplock = 0;
4695 }
4696
4697 if (inode && S_ISREG(inode->i_mode)) {
4698 if (CIFS_CACHE_READ(cinode))
4699 break_lease(inode, O_RDONLY);
4700 else
4701 break_lease(inode, O_WRONLY);
4702 rc = filemap_fdatawrite(inode->i_mapping);
Olivier Deprez0e641232021-09-23 10:07:05 +02004703 if (!CIFS_CACHE_READ(cinode) || purge_cache) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004704 rc = filemap_fdatawait(inode->i_mapping);
4705 mapping_set_error(inode->i_mapping, rc);
4706 cifs_zap_mapping(inode);
4707 }
4708 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
Olivier Deprez0e641232021-09-23 10:07:05 +02004709 if (CIFS_CACHE_WRITE(cinode))
4710 goto oplock_break_ack;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004711 }
4712
4713 rc = cifs_push_locks(cfile);
4714 if (rc)
4715 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
4716
Olivier Deprez0e641232021-09-23 10:07:05 +02004717oplock_break_ack:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004718 /*
4719 * releasing stale oplock after recent reconnect of smb session using
4720 * a now incorrect file handle is not a data integrity issue but do
4721 * not bother sending an oplock release if session to server still is
4722 * disconnected since oplock already released by the server
4723 */
4724 if (!cfile->oplock_break_cancelled) {
4725 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
4726 cinode);
4727 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
4728 }
Olivier Deprez0e641232021-09-23 10:07:05 +02004729 _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004730 cifs_done_oplock_break(cinode);
4731}
4732
4733/*
4734 * The presence of cifs_direct_io() in the address space ops vector
4735 * allowes open() O_DIRECT flags which would have failed otherwise.
4736 *
4737 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
4738 * so this method should never be called.
4739 *
4740 * Direct IO is not yet supported in the cached mode.
4741 */
4742static ssize_t
4743cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
4744{
4745 /*
4746 * FIXME
4747 * Eventually need to support direct IO for non forcedirectio mounts
4748 */
4749 return -EINVAL;
4750}
4751
4752
4753const struct address_space_operations cifs_addr_ops = {
4754 .readpage = cifs_readpage,
4755 .readpages = cifs_readpages,
4756 .writepage = cifs_writepage,
4757 .writepages = cifs_writepages,
4758 .write_begin = cifs_write_begin,
4759 .write_end = cifs_write_end,
4760 .set_page_dirty = __set_page_dirty_nobuffers,
4761 .releasepage = cifs_release_page,
4762 .direct_IO = cifs_direct_io,
4763 .invalidatepage = cifs_invalidate_page,
4764 .launder_page = cifs_launder_page,
4765};
4766
4767/*
4768 * cifs_readpages requires the server to support a buffer large enough to
4769 * contain the header plus one complete page of data. Otherwise, we need
4770 * to leave cifs_readpages out of the address space operations.
4771 */
4772const struct address_space_operations cifs_addr_ops_smallbuf = {
4773 .readpage = cifs_readpage,
4774 .writepage = cifs_writepage,
4775 .writepages = cifs_writepages,
4776 .write_begin = cifs_write_begin,
4777 .write_end = cifs_write_end,
4778 .set_page_dirty = __set_page_dirty_nobuffers,
4779 .releasepage = cifs_release_page,
4780 .invalidatepage = cifs_invalidate_page,
4781 .launder_page = cifs_launder_page,
4782};