blob: defee1d208d22859a2e08955292dc426600fd612 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * SMB2 version specific operations
4 *
5 * Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006 */
7
8#include <linux/pagemap.h>
9#include <linux/vfs.h>
10#include <linux/falloc.h>
11#include <linux/scatterlist.h>
12#include <linux/uuid.h>
13#include <crypto/aead.h>
14#include "cifsglob.h"
15#include "smb2pdu.h"
16#include "smb2proto.h"
17#include "cifsproto.h"
18#include "cifs_debug.h"
19#include "cifs_unicode.h"
20#include "smb2status.h"
21#include "smb2glob.h"
22#include "cifs_ioctl.h"
23#include "smbdirect.h"
24
David Brazdil0f672f62019-12-10 10:32:29 +000025/* Change credits for different ops and return the total number of credits */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000026static int
27change_conf(struct TCP_Server_Info *server)
28{
29 server->credits += server->echo_credits + server->oplock_credits;
30 server->oplock_credits = server->echo_credits = 0;
31 switch (server->credits) {
32 case 0:
David Brazdil0f672f62019-12-10 10:32:29 +000033 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000034 case 1:
35 server->echoes = false;
36 server->oplocks = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000037 break;
38 case 2:
39 server->echoes = true;
40 server->oplocks = false;
41 server->echo_credits = 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000042 break;
43 default:
44 server->echoes = true;
45 if (enable_oplocks) {
46 server->oplocks = true;
47 server->oplock_credits = 1;
48 } else
49 server->oplocks = false;
50
51 server->echo_credits = 1;
52 }
53 server->credits -= server->echo_credits + server->oplock_credits;
David Brazdil0f672f62019-12-10 10:32:29 +000054 return server->credits + server->echo_credits + server->oplock_credits;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000055}
56
57static void
David Brazdil0f672f62019-12-10 10:32:29 +000058smb2_add_credits(struct TCP_Server_Info *server,
59 const struct cifs_credits *credits, const int optype)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000060{
David Brazdil0f672f62019-12-10 10:32:29 +000061 int *val, rc = -1;
62 unsigned int add = credits->value;
63 unsigned int instance = credits->instance;
64 bool reconnect_detected = false;
65
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000066 spin_lock(&server->req_lock);
67 val = server->ops->get_credits_field(server, optype);
David Brazdil0f672f62019-12-10 10:32:29 +000068
69 /* eg found case where write overlapping reconnect messed up credits */
70 if (((optype & CIFS_OP_MASK) == CIFS_NEG_OP) && (*val != 0))
71 trace_smb3_reconnect_with_invalid_credits(server->CurrentMid,
72 server->hostname, *val);
73 if ((instance == 0) || (instance == server->reconnect_instance))
74 *val += add;
75 else
76 reconnect_detected = true;
77
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000078 if (*val > 65000) {
79 *val = 65000; /* Don't get near 64K credits, avoid srv bugs */
80 printk_once(KERN_WARNING "server overflowed SMB3 credits\n");
81 }
82 server->in_flight--;
83 if (server->in_flight == 0 && (optype & CIFS_OP_MASK) != CIFS_NEG_OP)
84 rc = change_conf(server);
85 /*
86 * Sometimes server returns 0 credits on oplock break ack - we need to
87 * rebalance credits in this case.
88 */
89 else if (server->in_flight > 0 && server->oplock_credits == 0 &&
90 server->oplocks) {
91 if (server->credits > 1) {
92 server->credits--;
93 server->oplock_credits++;
94 }
95 }
96 spin_unlock(&server->req_lock);
97 wake_up(&server->request_q);
David Brazdil0f672f62019-12-10 10:32:29 +000098
99 if (reconnect_detected)
100 cifs_dbg(FYI, "trying to put %d credits from the old server instance %d\n",
101 add, instance);
102
103 if (server->tcpStatus == CifsNeedReconnect
104 || server->tcpStatus == CifsExiting)
105 return;
106
107 switch (rc) {
108 case -1:
109 /* change_conf hasn't been executed */
110 break;
111 case 0:
112 cifs_server_dbg(VFS, "Possible client or server bug - zero credits\n");
113 break;
114 case 1:
115 cifs_server_dbg(VFS, "disabling echoes and oplocks\n");
116 break;
117 case 2:
118 cifs_dbg(FYI, "disabling oplocks\n");
119 break;
120 default:
121 cifs_dbg(FYI, "add %u credits total=%d\n", add, rc);
122 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000123}
124
125static void
126smb2_set_credits(struct TCP_Server_Info *server, const int val)
127{
128 spin_lock(&server->req_lock);
129 server->credits = val;
David Brazdil0f672f62019-12-10 10:32:29 +0000130 if (val == 1)
131 server->reconnect_instance++;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000132 spin_unlock(&server->req_lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000133 /* don't log while holding the lock */
134 if (val == 1)
135 cifs_dbg(FYI, "set credits to 1 due to smb2 reconnect\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000136}
137
138static int *
139smb2_get_credits_field(struct TCP_Server_Info *server, const int optype)
140{
141 switch (optype) {
142 case CIFS_ECHO_OP:
143 return &server->echo_credits;
144 case CIFS_OBREAK_OP:
145 return &server->oplock_credits;
146 default:
147 return &server->credits;
148 }
149}
150
151static unsigned int
152smb2_get_credits(struct mid_q_entry *mid)
153{
Olivier Deprez0e641232021-09-23 10:07:05 +0200154 return mid->credits_received;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000155}
156
157static int
158smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
David Brazdil0f672f62019-12-10 10:32:29 +0000159 unsigned int *num, struct cifs_credits *credits)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000160{
161 int rc = 0;
162 unsigned int scredits;
163
164 spin_lock(&server->req_lock);
165 while (1) {
166 if (server->credits <= 0) {
167 spin_unlock(&server->req_lock);
168 cifs_num_waiters_inc(server);
169 rc = wait_event_killable(server->request_q,
David Brazdil0f672f62019-12-10 10:32:29 +0000170 has_credits(server, &server->credits, 1));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000171 cifs_num_waiters_dec(server);
172 if (rc)
173 return rc;
174 spin_lock(&server->req_lock);
175 } else {
176 if (server->tcpStatus == CifsExiting) {
177 spin_unlock(&server->req_lock);
178 return -ENOENT;
179 }
180
181 scredits = server->credits;
182 /* can deadlock with reopen */
David Brazdil0f672f62019-12-10 10:32:29 +0000183 if (scredits <= 8) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000184 *num = SMB2_MAX_BUFFER_SIZE;
David Brazdil0f672f62019-12-10 10:32:29 +0000185 credits->value = 0;
186 credits->instance = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000187 break;
188 }
189
David Brazdil0f672f62019-12-10 10:32:29 +0000190 /* leave some credits for reopen and other ops */
191 scredits -= 8;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000192 *num = min_t(unsigned int, size,
193 scredits * SMB2_MAX_BUFFER_SIZE);
194
David Brazdil0f672f62019-12-10 10:32:29 +0000195 credits->value =
196 DIV_ROUND_UP(*num, SMB2_MAX_BUFFER_SIZE);
197 credits->instance = server->reconnect_instance;
198 server->credits -= credits->value;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000199 server->in_flight++;
David Brazdil0f672f62019-12-10 10:32:29 +0000200 if (server->in_flight > server->max_in_flight)
201 server->max_in_flight = server->in_flight;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000202 break;
203 }
204 }
205 spin_unlock(&server->req_lock);
206 return rc;
207}
208
David Brazdil0f672f62019-12-10 10:32:29 +0000209static int
210smb2_adjust_credits(struct TCP_Server_Info *server,
211 struct cifs_credits *credits,
212 const unsigned int payload_size)
213{
214 int new_val = DIV_ROUND_UP(payload_size, SMB2_MAX_BUFFER_SIZE);
215
216 if (!credits->value || credits->value == new_val)
217 return 0;
218
219 if (credits->value < new_val) {
220 WARN_ONCE(1, "request has less credits (%d) than required (%d)",
221 credits->value, new_val);
222 return -ENOTSUPP;
223 }
224
225 spin_lock(&server->req_lock);
226
227 if (server->reconnect_instance != credits->instance) {
228 spin_unlock(&server->req_lock);
229 cifs_server_dbg(VFS, "trying to return %d credits to old session\n",
230 credits->value - new_val);
231 return -EAGAIN;
232 }
233
234 server->credits += credits->value - new_val;
235 spin_unlock(&server->req_lock);
236 wake_up(&server->request_q);
237 credits->value = new_val;
238 return 0;
239}
240
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000241static __u64
242smb2_get_next_mid(struct TCP_Server_Info *server)
243{
244 __u64 mid;
245 /* for SMB2 we need the current value */
246 spin_lock(&GlobalMid_Lock);
247 mid = server->CurrentMid++;
248 spin_unlock(&GlobalMid_Lock);
249 return mid;
250}
251
David Brazdil0f672f62019-12-10 10:32:29 +0000252static void
253smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
254{
255 spin_lock(&GlobalMid_Lock);
256 if (server->CurrentMid >= val)
257 server->CurrentMid -= val;
258 spin_unlock(&GlobalMid_Lock);
259}
260
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000261static struct mid_q_entry *
Olivier Deprez0e641232021-09-23 10:07:05 +0200262__smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000263{
264 struct mid_q_entry *mid;
265 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
266 __u64 wire_mid = le64_to_cpu(shdr->MessageId);
267
268 if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
David Brazdil0f672f62019-12-10 10:32:29 +0000269 cifs_server_dbg(VFS, "Encrypted frame parsing not supported yet\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000270 return NULL;
271 }
272
273 spin_lock(&GlobalMid_Lock);
274 list_for_each_entry(mid, &server->pending_mid_q, qhead) {
275 if ((mid->mid == wire_mid) &&
276 (mid->mid_state == MID_REQUEST_SUBMITTED) &&
277 (mid->command == shdr->Command)) {
278 kref_get(&mid->refcount);
Olivier Deprez0e641232021-09-23 10:07:05 +0200279 if (dequeue) {
280 list_del_init(&mid->qhead);
281 mid->mid_flags |= MID_DELETED;
282 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000283 spin_unlock(&GlobalMid_Lock);
284 return mid;
285 }
286 }
287 spin_unlock(&GlobalMid_Lock);
288 return NULL;
289}
290
Olivier Deprez0e641232021-09-23 10:07:05 +0200291static struct mid_q_entry *
292smb2_find_mid(struct TCP_Server_Info *server, char *buf)
293{
294 return __smb2_find_mid(server, buf, false);
295}
296
297static struct mid_q_entry *
298smb2_find_dequeue_mid(struct TCP_Server_Info *server, char *buf)
299{
300 return __smb2_find_mid(server, buf, true);
301}
302
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000303static void
304smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
305{
306#ifdef CONFIG_CIFS_DEBUG2
307 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
308
David Brazdil0f672f62019-12-10 10:32:29 +0000309 cifs_server_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000310 shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId,
311 shdr->ProcessId);
David Brazdil0f672f62019-12-10 10:32:29 +0000312 cifs_server_dbg(VFS, "smb buf %p len %u\n", buf,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000313 server->ops->calc_smb_size(buf, server));
314#endif
315}
316
317static bool
318smb2_need_neg(struct TCP_Server_Info *server)
319{
320 return server->max_read == 0;
321}
322
323static int
324smb2_negotiate(const unsigned int xid, struct cifs_ses *ses)
325{
326 int rc;
David Brazdil0f672f62019-12-10 10:32:29 +0000327
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000328 ses->server->CurrentMid = 0;
329 rc = SMB2_negotiate(xid, ses);
330 /* BB we probably don't need to retry with modern servers */
331 if (rc == -EAGAIN)
332 rc = -EHOSTDOWN;
333 return rc;
334}
335
336static unsigned int
337smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
338{
339 struct TCP_Server_Info *server = tcon->ses->server;
340 unsigned int wsize;
341
342 /* start with specified wsize, or default */
343 wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE;
344 wsize = min_t(unsigned int, wsize, server->max_write);
345#ifdef CONFIG_CIFS_SMB_DIRECT
346 if (server->rdma) {
347 if (server->sign)
348 wsize = min_t(unsigned int,
349 wsize, server->smbd_conn->max_fragmented_send_size);
350 else
351 wsize = min_t(unsigned int,
352 wsize, server->smbd_conn->max_readwrite_size);
353 }
354#endif
355 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
356 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
357
358 return wsize;
359}
360
361static unsigned int
David Brazdil0f672f62019-12-10 10:32:29 +0000362smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
363{
364 struct TCP_Server_Info *server = tcon->ses->server;
365 unsigned int wsize;
366
367 /* start with specified wsize, or default */
368 wsize = volume_info->wsize ? volume_info->wsize : SMB3_DEFAULT_IOSIZE;
369 wsize = min_t(unsigned int, wsize, server->max_write);
370#ifdef CONFIG_CIFS_SMB_DIRECT
371 if (server->rdma) {
372 if (server->sign)
373 wsize = min_t(unsigned int,
374 wsize, server->smbd_conn->max_fragmented_send_size);
375 else
376 wsize = min_t(unsigned int,
377 wsize, server->smbd_conn->max_readwrite_size);
378 }
379#endif
380 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
381 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
382
383 return wsize;
384}
385
386static unsigned int
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000387smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
388{
389 struct TCP_Server_Info *server = tcon->ses->server;
390 unsigned int rsize;
391
392 /* start with specified rsize, or default */
393 rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE;
394 rsize = min_t(unsigned int, rsize, server->max_read);
395#ifdef CONFIG_CIFS_SMB_DIRECT
396 if (server->rdma) {
397 if (server->sign)
398 rsize = min_t(unsigned int,
399 rsize, server->smbd_conn->max_fragmented_recv_size);
400 else
401 rsize = min_t(unsigned int,
402 rsize, server->smbd_conn->max_readwrite_size);
403 }
404#endif
405
406 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
407 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
408
409 return rsize;
410}
411
David Brazdil0f672f62019-12-10 10:32:29 +0000412static unsigned int
413smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
414{
415 struct TCP_Server_Info *server = tcon->ses->server;
416 unsigned int rsize;
417
418 /* start with specified rsize, or default */
419 rsize = volume_info->rsize ? volume_info->rsize : SMB3_DEFAULT_IOSIZE;
420 rsize = min_t(unsigned int, rsize, server->max_read);
421#ifdef CONFIG_CIFS_SMB_DIRECT
422 if (server->rdma) {
423 if (server->sign)
424 rsize = min_t(unsigned int,
425 rsize, server->smbd_conn->max_fragmented_recv_size);
426 else
427 rsize = min_t(unsigned int,
428 rsize, server->smbd_conn->max_readwrite_size);
429 }
430#endif
431
432 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
433 rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
434
435 return rsize;
436}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000437
438static int
439parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
440 size_t buf_len,
441 struct cifs_server_iface **iface_list,
442 size_t *iface_count)
443{
444 struct network_interface_info_ioctl_rsp *p;
445 struct sockaddr_in *addr4;
446 struct sockaddr_in6 *addr6;
447 struct iface_info_ipv4 *p4;
448 struct iface_info_ipv6 *p6;
449 struct cifs_server_iface *info;
450 ssize_t bytes_left;
451 size_t next = 0;
452 int nb_iface = 0;
453 int rc = 0;
454
455 *iface_list = NULL;
456 *iface_count = 0;
457
458 /*
459 * Fist pass: count and sanity check
460 */
461
462 bytes_left = buf_len;
463 p = buf;
464 while (bytes_left >= sizeof(*p)) {
465 nb_iface++;
466 next = le32_to_cpu(p->Next);
467 if (!next) {
468 bytes_left -= sizeof(*p);
469 break;
470 }
471 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
472 bytes_left -= next;
473 }
474
475 if (!nb_iface) {
476 cifs_dbg(VFS, "%s: malformed interface info\n", __func__);
477 rc = -EINVAL;
478 goto out;
479 }
480
Olivier Deprez0e641232021-09-23 10:07:05 +0200481 /* Azure rounds the buffer size up 8, to a 16 byte boundary */
482 if ((bytes_left > 8) || p->Next)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000483 cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
484
485
486 /*
487 * Second pass: extract info to internal structure
488 */
489
490 *iface_list = kcalloc(nb_iface, sizeof(**iface_list), GFP_KERNEL);
491 if (!*iface_list) {
492 rc = -ENOMEM;
493 goto out;
494 }
495
496 info = *iface_list;
497 bytes_left = buf_len;
498 p = buf;
499 while (bytes_left >= sizeof(*p)) {
500 info->speed = le64_to_cpu(p->LinkSpeed);
Olivier Deprez0e641232021-09-23 10:07:05 +0200501 info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0;
502 info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000503
504 cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
505 cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
506 cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
507 le32_to_cpu(p->Capability));
508
509 switch (p->Family) {
510 /*
511 * The kernel and wire socket structures have the same
512 * layout and use network byte order but make the
513 * conversion explicit in case either one changes.
514 */
515 case INTERNETWORK:
516 addr4 = (struct sockaddr_in *)&info->sockaddr;
517 p4 = (struct iface_info_ipv4 *)p->Buffer;
518 addr4->sin_family = AF_INET;
519 memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
520
521 /* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
522 addr4->sin_port = cpu_to_be16(CIFS_PORT);
523
524 cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
525 &addr4->sin_addr);
526 break;
527 case INTERNETWORKV6:
528 addr6 = (struct sockaddr_in6 *)&info->sockaddr;
529 p6 = (struct iface_info_ipv6 *)p->Buffer;
530 addr6->sin6_family = AF_INET6;
531 memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16);
532
533 /* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
534 addr6->sin6_flowinfo = 0;
535 addr6->sin6_scope_id = 0;
536 addr6->sin6_port = cpu_to_be16(CIFS_PORT);
537
538 cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
539 &addr6->sin6_addr);
540 break;
541 default:
542 cifs_dbg(VFS,
543 "%s: skipping unsupported socket family\n",
544 __func__);
545 goto next_iface;
546 }
547
548 (*iface_count)++;
549 info++;
550next_iface:
551 next = le32_to_cpu(p->Next);
552 if (!next)
553 break;
554 p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
555 bytes_left -= next;
556 }
557
558 if (!*iface_count) {
559 rc = -EINVAL;
560 goto out;
561 }
562
563out:
564 if (rc) {
565 kfree(*iface_list);
566 *iface_count = 0;
567 *iface_list = NULL;
568 }
569 return rc;
570}
571
572
573static int
574SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
575{
576 int rc;
577 unsigned int ret_data_len = 0;
578 struct network_interface_info_ioctl_rsp *out_buf = NULL;
579 struct cifs_server_iface *iface_list;
580 size_t iface_count;
581 struct cifs_ses *ses = tcon->ses;
582
583 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
584 FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
585 NULL /* no data input */, 0 /* no data input */,
David Brazdil0f672f62019-12-10 10:32:29 +0000586 CIFSMaxBufSize, (char **)&out_buf, &ret_data_len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000587 if (rc == -EOPNOTSUPP) {
588 cifs_dbg(FYI,
589 "server does not support query network interfaces\n");
590 goto out;
591 } else if (rc != 0) {
David Brazdil0f672f62019-12-10 10:32:29 +0000592 cifs_tcon_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000593 goto out;
594 }
595
596 rc = parse_server_interfaces(out_buf, ret_data_len,
597 &iface_list, &iface_count);
598 if (rc)
599 goto out;
600
601 spin_lock(&ses->iface_lock);
602 kfree(ses->iface_list);
603 ses->iface_list = iface_list;
604 ses->iface_count = iface_count;
605 ses->iface_last_update = jiffies;
606 spin_unlock(&ses->iface_lock);
607
608out:
609 kfree(out_buf);
610 return rc;
611}
612
613static void
614smb2_close_cached_fid(struct kref *ref)
615{
616 struct cached_fid *cfid = container_of(ref, struct cached_fid,
617 refcount);
618
619 if (cfid->is_valid) {
620 cifs_dbg(FYI, "clear cached root file handle\n");
621 SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid,
622 cfid->fid->volatile_fid);
623 cfid->is_valid = false;
David Brazdil0f672f62019-12-10 10:32:29 +0000624 cfid->file_all_info_is_valid = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000625 }
626}
627
628void close_shroot(struct cached_fid *cfid)
629{
630 mutex_lock(&cfid->fid_mutex);
631 kref_put(&cfid->refcount, smb2_close_cached_fid);
632 mutex_unlock(&cfid->fid_mutex);
633}
634
635void
636smb2_cached_lease_break(struct work_struct *work)
637{
638 struct cached_fid *cfid = container_of(work,
639 struct cached_fid, lease_break);
640
641 close_shroot(cfid);
642}
643
644/*
645 * Open the directory at the root of a share
646 */
647int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
648{
David Brazdil0f672f62019-12-10 10:32:29 +0000649 struct cifs_ses *ses = tcon->ses;
650 struct TCP_Server_Info *server = ses->server;
651 struct cifs_open_parms oparms;
652 struct smb2_create_rsp *o_rsp = NULL;
653 struct smb2_query_info_rsp *qi_rsp = NULL;
654 int resp_buftype[2];
655 struct smb_rqst rqst[2];
656 struct kvec rsp_iov[2];
657 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
658 struct kvec qi_iov[1];
659 int rc, flags = 0;
660 __le16 utf16_path = 0; /* Null - since an open of top of share */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000661 u8 oplock = SMB2_OPLOCK_LEVEL_II;
662
663 mutex_lock(&tcon->crfid.fid_mutex);
664 if (tcon->crfid.is_valid) {
665 cifs_dbg(FYI, "found a cached root file handle\n");
666 memcpy(pfid, tcon->crfid.fid, sizeof(struct cifs_fid));
667 kref_get(&tcon->crfid.refcount);
668 mutex_unlock(&tcon->crfid.fid_mutex);
669 return 0;
670 }
671
David Brazdil0f672f62019-12-10 10:32:29 +0000672 /*
673 * We do not hold the lock for the open because in case
674 * SMB2_open needs to reconnect, it will end up calling
675 * cifs_mark_open_files_invalid() which takes the lock again
676 * thus causing a deadlock
677 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000678
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000679 mutex_unlock(&tcon->crfid.fid_mutex);
David Brazdil0f672f62019-12-10 10:32:29 +0000680
681 if (smb3_encryption_required(tcon))
682 flags |= CIFS_TRANSFORM_REQ;
683
Olivier Deprez0e641232021-09-23 10:07:05 +0200684 if (!server->ops->new_lease_key)
685 return -EIO;
686
687 server->ops->new_lease_key(pfid);
688
David Brazdil0f672f62019-12-10 10:32:29 +0000689 memset(rqst, 0, sizeof(rqst));
690 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
691 memset(rsp_iov, 0, sizeof(rsp_iov));
692
693 /* Open */
694 memset(&open_iov, 0, sizeof(open_iov));
695 rqst[0].rq_iov = open_iov;
696 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
697
698 oparms.tcon = tcon;
699 oparms.create_options = 0;
700 oparms.desired_access = FILE_READ_ATTRIBUTES;
701 oparms.disposition = FILE_OPEN;
702 oparms.fid = pfid;
703 oparms.reconnect = false;
704
705 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, &utf16_path);
706 if (rc)
707 goto oshr_free;
708 smb2_set_next_command(tcon, &rqst[0]);
709
710 memset(&qi_iov, 0, sizeof(qi_iov));
711 rqst[1].rq_iov = qi_iov;
712 rqst[1].rq_nvec = 1;
713
714 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID,
715 COMPOUND_FID, FILE_ALL_INFORMATION,
716 SMB2_O_INFO_FILE, 0,
717 sizeof(struct smb2_file_all_info) +
718 PATH_MAX * 2, 0, NULL);
719 if (rc)
720 goto oshr_free;
721
722 smb2_set_related(&rqst[1]);
723
724 rc = compound_send_recv(xid, ses, flags, 2, rqst,
725 resp_buftype, rsp_iov);
726 mutex_lock(&tcon->crfid.fid_mutex);
727
728 /*
729 * Now we need to check again as the cached root might have
730 * been successfully re-opened from a concurrent process
731 */
732
733 if (tcon->crfid.is_valid) {
734 /* work was already done */
735
736 /* stash fids for close() later */
737 struct cifs_fid fid = {
738 .persistent_fid = pfid->persistent_fid,
739 .volatile_fid = pfid->volatile_fid,
740 };
741
742 /*
743 * caller expects this func to set pfid to a valid
744 * cached root, so we copy the existing one and get a
745 * reference.
746 */
747 memcpy(pfid, tcon->crfid.fid, sizeof(*pfid));
748 kref_get(&tcon->crfid.refcount);
749
750 mutex_unlock(&tcon->crfid.fid_mutex);
751
752 if (rc == 0) {
753 /* close extra handle outside of crit sec */
754 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
755 }
Olivier Deprez0e641232021-09-23 10:07:05 +0200756 rc = 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000757 goto oshr_free;
758 }
759
760 /* Cached root is still invalid, continue normaly */
761
762 if (rc) {
763 if (rc == -EREMCHG) {
764 tcon->need_reconnect = true;
765 printk_once(KERN_WARNING "server share %s deleted\n",
766 tcon->treeName);
767 }
768 goto oshr_exit;
769 }
770
771 atomic_inc(&tcon->num_remote_opens);
772
773 o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
774 oparms.fid->persistent_fid = o_rsp->PersistentFileId;
775 oparms.fid->volatile_fid = o_rsp->VolatileFileId;
776#ifdef CONFIG_CIFS_DEBUG2
777 oparms.fid->mid = le64_to_cpu(o_rsp->sync_hdr.MessageId);
778#endif /* CIFS_DEBUG2 */
779
780 memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
781 tcon->crfid.tcon = tcon;
782 tcon->crfid.is_valid = true;
783 kref_init(&tcon->crfid.refcount);
784
785 /* BB TBD check to see if oplock level check can be removed below */
786 if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) {
787 kref_get(&tcon->crfid.refcount);
788 smb2_parse_contexts(server, o_rsp,
789 &oparms.fid->epoch,
790 oparms.fid->lease_key, &oplock, NULL);
791 } else
792 goto oshr_exit;
793
794 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
795 if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
796 goto oshr_exit;
797 if (!smb2_validate_and_copy_iov(
798 le16_to_cpu(qi_rsp->OutputBufferOffset),
799 sizeof(struct smb2_file_all_info),
800 &rsp_iov[1], sizeof(struct smb2_file_all_info),
801 (char *)&tcon->crfid.file_all_info))
802 tcon->crfid.file_all_info_is_valid = 1;
803
804oshr_exit:
805 mutex_unlock(&tcon->crfid.fid_mutex);
806oshr_free:
807 SMB2_open_free(&rqst[0]);
808 SMB2_query_info_free(&rqst[1]);
809 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
810 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000811 return rc;
812}
813
814static void
815smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
816{
817 int rc;
818 __le16 srch_path = 0; /* Null - open root of share */
819 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
820 struct cifs_open_parms oparms;
821 struct cifs_fid fid;
822 bool no_cached_open = tcon->nohandlecache;
823
824 oparms.tcon = tcon;
825 oparms.desired_access = FILE_READ_ATTRIBUTES;
826 oparms.disposition = FILE_OPEN;
827 oparms.create_options = 0;
828 oparms.fid = &fid;
829 oparms.reconnect = false;
830
831 if (no_cached_open)
832 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
833 NULL);
834 else
835 rc = open_shroot(xid, tcon, &fid);
836
837 if (rc)
838 return;
839
840 SMB3_request_interfaces(xid, tcon);
841
842 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
843 FS_ATTRIBUTE_INFORMATION);
844 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
845 FS_DEVICE_INFORMATION);
846 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
847 FS_VOLUME_INFORMATION);
848 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
849 FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */
850 if (no_cached_open)
851 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
852 else
853 close_shroot(&tcon->crfid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000854}
855
856static void
857smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
858{
859 int rc;
860 __le16 srch_path = 0; /* Null - open root of share */
861 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
862 struct cifs_open_parms oparms;
863 struct cifs_fid fid;
864
865 oparms.tcon = tcon;
866 oparms.desired_access = FILE_READ_ATTRIBUTES;
867 oparms.disposition = FILE_OPEN;
868 oparms.create_options = 0;
869 oparms.fid = &fid;
870 oparms.reconnect = false;
871
872 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, NULL);
873 if (rc)
874 return;
875
876 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
877 FS_ATTRIBUTE_INFORMATION);
878 SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
879 FS_DEVICE_INFORMATION);
880 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000881}
882
883static int
884smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
885 struct cifs_sb_info *cifs_sb, const char *full_path)
886{
887 int rc;
888 __le16 *utf16_path;
889 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
890 struct cifs_open_parms oparms;
891 struct cifs_fid fid;
892
893 if ((*full_path == 0) && tcon->crfid.is_valid)
894 return 0;
895
896 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
897 if (!utf16_path)
898 return -ENOMEM;
899
900 oparms.tcon = tcon;
901 oparms.desired_access = FILE_READ_ATTRIBUTES;
902 oparms.disposition = FILE_OPEN;
903 if (backup_cred(cifs_sb))
904 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
905 else
906 oparms.create_options = 0;
907 oparms.fid = &fid;
908 oparms.reconnect = false;
909
910 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
911 if (rc) {
912 kfree(utf16_path);
913 return rc;
914 }
915
916 rc = SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
917 kfree(utf16_path);
918 return rc;
919}
920
921static int
922smb2_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
923 struct cifs_sb_info *cifs_sb, const char *full_path,
924 u64 *uniqueid, FILE_ALL_INFO *data)
925{
926 *uniqueid = le64_to_cpu(data->IndexNumber);
927 return 0;
928}
929
930static int
931smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
932 struct cifs_fid *fid, FILE_ALL_INFO *data)
933{
934 int rc;
935 struct smb2_file_all_info *smb2_data;
936
937 smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
938 GFP_KERNEL);
939 if (smb2_data == NULL)
940 return -ENOMEM;
941
942 rc = SMB2_query_info(xid, tcon, fid->persistent_fid, fid->volatile_fid,
943 smb2_data);
944 if (!rc)
945 move_smb2_info_to_cifs(data, smb2_data);
946 kfree(smb2_data);
947 return rc;
948}
949
950#ifdef CONFIG_CIFS_XATTR
951static ssize_t
952move_smb2_ea_to_cifs(char *dst, size_t dst_size,
953 struct smb2_file_full_ea_info *src, size_t src_size,
954 const unsigned char *ea_name)
955{
956 int rc = 0;
957 unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
958 char *name, *value;
959 size_t buf_size = dst_size;
960 size_t name_len, value_len, user_name_len;
961
962 while (src_size > 0) {
963 name = &src->ea_data[0];
964 name_len = (size_t)src->ea_name_length;
965 value = &src->ea_data[src->ea_name_length + 1];
966 value_len = (size_t)le16_to_cpu(src->ea_value_length);
967
David Brazdil0f672f62019-12-10 10:32:29 +0000968 if (name_len == 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000969 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000970
971 if (src_size < 8 + name_len + 1 + value_len) {
972 cifs_dbg(FYI, "EA entry goes beyond length of list\n");
973 rc = -EIO;
974 goto out;
975 }
976
977 if (ea_name) {
978 if (ea_name_len == name_len &&
979 memcmp(ea_name, name, name_len) == 0) {
980 rc = value_len;
981 if (dst_size == 0)
982 goto out;
983 if (dst_size < value_len) {
984 rc = -ERANGE;
985 goto out;
986 }
987 memcpy(dst, value, value_len);
988 goto out;
989 }
990 } else {
991 /* 'user.' plus a terminating null */
992 user_name_len = 5 + 1 + name_len;
993
994 if (buf_size == 0) {
995 /* skip copy - calc size only */
996 rc += user_name_len;
997 } else if (dst_size >= user_name_len) {
998 dst_size -= user_name_len;
999 memcpy(dst, "user.", 5);
1000 dst += 5;
1001 memcpy(dst, src->ea_data, name_len);
1002 dst += name_len;
1003 *dst = 0;
1004 ++dst;
1005 rc += user_name_len;
1006 } else {
1007 /* stop before overrun buffer */
1008 rc = -ERANGE;
1009 break;
1010 }
1011 }
1012
1013 if (!src->next_entry_offset)
1014 break;
1015
1016 if (src_size < le32_to_cpu(src->next_entry_offset)) {
1017 /* stop before overrun buffer */
1018 rc = -ERANGE;
1019 break;
1020 }
1021 src_size -= le32_to_cpu(src->next_entry_offset);
1022 src = (void *)((char *)src +
1023 le32_to_cpu(src->next_entry_offset));
1024 }
1025
1026 /* didn't find the named attribute */
1027 if (ea_name)
1028 rc = -ENODATA;
1029
1030out:
1031 return (ssize_t)rc;
1032}
1033
1034static ssize_t
1035smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
1036 const unsigned char *path, const unsigned char *ea_name,
1037 char *ea_data, size_t buf_size,
1038 struct cifs_sb_info *cifs_sb)
1039{
1040 int rc;
1041 __le16 *utf16_path;
David Brazdil0f672f62019-12-10 10:32:29 +00001042 struct kvec rsp_iov = {NULL, 0};
1043 int buftype = CIFS_NO_BUFFER;
1044 struct smb2_query_info_rsp *rsp;
1045 struct smb2_file_full_ea_info *info = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001046
1047 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1048 if (!utf16_path)
1049 return -ENOMEM;
1050
David Brazdil0f672f62019-12-10 10:32:29 +00001051 rc = smb2_query_info_compound(xid, tcon, utf16_path,
1052 FILE_READ_EA,
1053 FILE_FULL_EA_INFORMATION,
1054 SMB2_O_INFO_FILE,
1055 CIFSMaxBufSize -
1056 MAX_SMB2_CREATE_RESPONSE_SIZE -
1057 MAX_SMB2_CLOSE_RESPONSE_SIZE,
1058 &rsp_iov, &buftype, cifs_sb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001059 if (rc) {
David Brazdil0f672f62019-12-10 10:32:29 +00001060 /*
1061 * If ea_name is NULL (listxattr) and there are no EAs,
1062 * return 0 as it's not an error. Otherwise, the specified
1063 * ea_name was not found.
1064 */
1065 if (!ea_name && rc == -ENODATA)
1066 rc = 0;
1067 goto qeas_exit;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001068 }
1069
David Brazdil0f672f62019-12-10 10:32:29 +00001070 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
1071 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
1072 le32_to_cpu(rsp->OutputBufferLength),
1073 &rsp_iov,
1074 sizeof(struct smb2_file_full_ea_info));
1075 if (rc)
1076 goto qeas_exit;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001077
David Brazdil0f672f62019-12-10 10:32:29 +00001078 info = (struct smb2_file_full_ea_info *)(
1079 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
1080 rc = move_smb2_ea_to_cifs(ea_data, buf_size, info,
1081 le32_to_cpu(rsp->OutputBufferLength), ea_name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001082
David Brazdil0f672f62019-12-10 10:32:29 +00001083 qeas_exit:
1084 kfree(utf16_path);
1085 free_rsp_buf(buftype, rsp_iov.iov_base);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001086 return rc;
1087}
1088
1089
1090static int
1091smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
1092 const char *path, const char *ea_name, const void *ea_value,
1093 const __u16 ea_value_len, const struct nls_table *nls_codepage,
1094 struct cifs_sb_info *cifs_sb)
1095{
David Brazdil0f672f62019-12-10 10:32:29 +00001096 struct cifs_ses *ses = tcon->ses;
1097 __le16 *utf16_path = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001098 int ea_name_len = strlen(ea_name);
David Brazdil0f672f62019-12-10 10:32:29 +00001099 int flags = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001100 int len;
David Brazdil0f672f62019-12-10 10:32:29 +00001101 struct smb_rqst rqst[3];
1102 int resp_buftype[3];
1103 struct kvec rsp_iov[3];
1104 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
1105 struct cifs_open_parms oparms;
1106 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1107 struct cifs_fid fid;
1108 struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
1109 unsigned int size[1];
1110 void *data[1];
1111 struct smb2_file_full_ea_info *ea = NULL;
1112 struct kvec close_iov[1];
Olivier Deprez0e641232021-09-23 10:07:05 +02001113 struct smb2_query_info_rsp *rsp;
1114 int rc, used_len = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00001115
1116 if (smb3_encryption_required(tcon))
1117 flags |= CIFS_TRANSFORM_REQ;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001118
1119 if (ea_name_len > 255)
1120 return -EINVAL;
1121
1122 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1123 if (!utf16_path)
1124 return -ENOMEM;
1125
David Brazdil0f672f62019-12-10 10:32:29 +00001126 memset(rqst, 0, sizeof(rqst));
1127 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1128 memset(rsp_iov, 0, sizeof(rsp_iov));
1129
1130 if (ses->server->ops->query_all_EAs) {
1131 if (!ea_value) {
1132 rc = ses->server->ops->query_all_EAs(xid, tcon, path,
1133 ea_name, NULL, 0,
1134 cifs_sb);
1135 if (rc == -ENODATA)
1136 goto sea_exit;
Olivier Deprez0e641232021-09-23 10:07:05 +02001137 } else {
1138 /* If we are adding a attribute we should first check
1139 * if there will be enough space available to store
1140 * the new EA. If not we should not add it since we
1141 * would not be able to even read the EAs back.
1142 */
1143 rc = smb2_query_info_compound(xid, tcon, utf16_path,
1144 FILE_READ_EA,
1145 FILE_FULL_EA_INFORMATION,
1146 SMB2_O_INFO_FILE,
1147 CIFSMaxBufSize -
1148 MAX_SMB2_CREATE_RESPONSE_SIZE -
1149 MAX_SMB2_CLOSE_RESPONSE_SIZE,
1150 &rsp_iov[1], &resp_buftype[1], cifs_sb);
1151 if (rc == 0) {
1152 rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
1153 used_len = le32_to_cpu(rsp->OutputBufferLength);
1154 }
1155 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1156 resp_buftype[1] = CIFS_NO_BUFFER;
1157 memset(&rsp_iov[1], 0, sizeof(rsp_iov[1]));
1158 rc = 0;
1159
1160 /* Use a fudge factor of 256 bytes in case we collide
1161 * with a different set_EAs command.
1162 */
1163 if(CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
1164 MAX_SMB2_CLOSE_RESPONSE_SIZE - 256 <
1165 used_len + ea_name_len + ea_value_len + 1) {
1166 rc = -ENOSPC;
1167 goto sea_exit;
1168 }
David Brazdil0f672f62019-12-10 10:32:29 +00001169 }
1170 }
1171
1172 /* Open */
1173 memset(&open_iov, 0, sizeof(open_iov));
1174 rqst[0].rq_iov = open_iov;
1175 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
1176
1177 memset(&oparms, 0, sizeof(oparms));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001178 oparms.tcon = tcon;
1179 oparms.desired_access = FILE_WRITE_EA;
1180 oparms.disposition = FILE_OPEN;
1181 if (backup_cred(cifs_sb))
1182 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
1183 else
1184 oparms.create_options = 0;
1185 oparms.fid = &fid;
1186 oparms.reconnect = false;
1187
David Brazdil0f672f62019-12-10 10:32:29 +00001188 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
1189 if (rc)
1190 goto sea_exit;
1191 smb2_set_next_command(tcon, &rqst[0]);
1192
1193
1194 /* Set Info */
1195 memset(&si_iov, 0, sizeof(si_iov));
1196 rqst[1].rq_iov = si_iov;
1197 rqst[1].rq_nvec = 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001198
Olivier Deprez0e641232021-09-23 10:07:05 +02001199 len = sizeof(*ea) + ea_name_len + ea_value_len + 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001200 ea = kzalloc(len, GFP_KERNEL);
1201 if (ea == NULL) {
David Brazdil0f672f62019-12-10 10:32:29 +00001202 rc = -ENOMEM;
1203 goto sea_exit;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001204 }
1205
1206 ea->ea_name_length = ea_name_len;
1207 ea->ea_value_length = cpu_to_le16(ea_value_len);
1208 memcpy(ea->ea_data, ea_name, ea_name_len + 1);
1209 memcpy(ea->ea_data + ea_name_len + 1, ea_value, ea_value_len);
1210
David Brazdil0f672f62019-12-10 10:32:29 +00001211 size[0] = len;
1212 data[0] = ea;
1213
1214 rc = SMB2_set_info_init(tcon, &rqst[1], COMPOUND_FID,
1215 COMPOUND_FID, current->tgid,
1216 FILE_FULL_EA_INFORMATION,
1217 SMB2_O_INFO_FILE, 0, data, size);
1218 smb2_set_next_command(tcon, &rqst[1]);
1219 smb2_set_related(&rqst[1]);
1220
1221
1222 /* Close */
1223 memset(&close_iov, 0, sizeof(close_iov));
1224 rqst[2].rq_iov = close_iov;
1225 rqst[2].rq_nvec = 1;
1226 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
1227 smb2_set_related(&rqst[2]);
1228
1229 rc = compound_send_recv(xid, ses, flags, 3, rqst,
1230 resp_buftype, rsp_iov);
1231 /* no need to bump num_remote_opens because handle immediately closed */
1232
1233 sea_exit:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001234 kfree(ea);
David Brazdil0f672f62019-12-10 10:32:29 +00001235 kfree(utf16_path);
1236 SMB2_open_free(&rqst[0]);
1237 SMB2_set_info_free(&rqst[1]);
1238 SMB2_close_free(&rqst[2]);
1239 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1240 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1241 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001242 return rc;
1243}
1244#endif
1245
1246static bool
1247smb2_can_echo(struct TCP_Server_Info *server)
1248{
1249 return server->echoes;
1250}
1251
1252static void
1253smb2_clear_stats(struct cifs_tcon *tcon)
1254{
1255 int i;
David Brazdil0f672f62019-12-10 10:32:29 +00001256
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001257 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
1258 atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
1259 atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
1260 }
1261}
1262
1263static void
1264smb2_dump_share_caps(struct seq_file *m, struct cifs_tcon *tcon)
1265{
1266 seq_puts(m, "\n\tShare Capabilities:");
1267 if (tcon->capabilities & SMB2_SHARE_CAP_DFS)
1268 seq_puts(m, " DFS,");
1269 if (tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY)
1270 seq_puts(m, " CONTINUOUS AVAILABILITY,");
1271 if (tcon->capabilities & SMB2_SHARE_CAP_SCALEOUT)
1272 seq_puts(m, " SCALEOUT,");
1273 if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER)
1274 seq_puts(m, " CLUSTER,");
1275 if (tcon->capabilities & SMB2_SHARE_CAP_ASYMMETRIC)
1276 seq_puts(m, " ASYMMETRIC,");
1277 if (tcon->capabilities == 0)
1278 seq_puts(m, " None");
1279 if (tcon->ss_flags & SSINFO_FLAGS_ALIGNED_DEVICE)
1280 seq_puts(m, " Aligned,");
1281 if (tcon->ss_flags & SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE)
1282 seq_puts(m, " Partition Aligned,");
1283 if (tcon->ss_flags & SSINFO_FLAGS_NO_SEEK_PENALTY)
1284 seq_puts(m, " SSD,");
1285 if (tcon->ss_flags & SSINFO_FLAGS_TRIM_ENABLED)
1286 seq_puts(m, " TRIM-support,");
1287
1288 seq_printf(m, "\tShare Flags: 0x%x", tcon->share_flags);
1289 seq_printf(m, "\n\ttid: 0x%x", tcon->tid);
1290 if (tcon->perf_sector_size)
1291 seq_printf(m, "\tOptimal sector size: 0x%x",
1292 tcon->perf_sector_size);
1293 seq_printf(m, "\tMaximal Access: 0x%x", tcon->maximal_access);
1294}
1295
1296static void
1297smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
1298{
1299 atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
1300 atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
1301
1302 /*
1303 * Can't display SMB2_NEGOTIATE, SESSION_SETUP, LOGOFF, CANCEL and ECHO
1304 * totals (requests sent) since those SMBs are per-session not per tcon
1305 */
1306 seq_printf(m, "\nBytes read: %llu Bytes written: %llu",
1307 (long long)(tcon->bytes_read),
1308 (long long)(tcon->bytes_written));
David Brazdil0f672f62019-12-10 10:32:29 +00001309 seq_printf(m, "\nOpen files: %d total (local), %d open on server",
1310 atomic_read(&tcon->num_local_opens),
1311 atomic_read(&tcon->num_remote_opens));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001312 seq_printf(m, "\nTreeConnects: %d total %d failed",
1313 atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
1314 atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
1315 seq_printf(m, "\nTreeDisconnects: %d total %d failed",
1316 atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
1317 atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
1318 seq_printf(m, "\nCreates: %d total %d failed",
1319 atomic_read(&sent[SMB2_CREATE_HE]),
1320 atomic_read(&failed[SMB2_CREATE_HE]));
1321 seq_printf(m, "\nCloses: %d total %d failed",
1322 atomic_read(&sent[SMB2_CLOSE_HE]),
1323 atomic_read(&failed[SMB2_CLOSE_HE]));
1324 seq_printf(m, "\nFlushes: %d total %d failed",
1325 atomic_read(&sent[SMB2_FLUSH_HE]),
1326 atomic_read(&failed[SMB2_FLUSH_HE]));
1327 seq_printf(m, "\nReads: %d total %d failed",
1328 atomic_read(&sent[SMB2_READ_HE]),
1329 atomic_read(&failed[SMB2_READ_HE]));
1330 seq_printf(m, "\nWrites: %d total %d failed",
1331 atomic_read(&sent[SMB2_WRITE_HE]),
1332 atomic_read(&failed[SMB2_WRITE_HE]));
1333 seq_printf(m, "\nLocks: %d total %d failed",
1334 atomic_read(&sent[SMB2_LOCK_HE]),
1335 atomic_read(&failed[SMB2_LOCK_HE]));
1336 seq_printf(m, "\nIOCTLs: %d total %d failed",
1337 atomic_read(&sent[SMB2_IOCTL_HE]),
1338 atomic_read(&failed[SMB2_IOCTL_HE]));
1339 seq_printf(m, "\nQueryDirectories: %d total %d failed",
1340 atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
1341 atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
1342 seq_printf(m, "\nChangeNotifies: %d total %d failed",
1343 atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
1344 atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
1345 seq_printf(m, "\nQueryInfos: %d total %d failed",
1346 atomic_read(&sent[SMB2_QUERY_INFO_HE]),
1347 atomic_read(&failed[SMB2_QUERY_INFO_HE]));
1348 seq_printf(m, "\nSetInfos: %d total %d failed",
1349 atomic_read(&sent[SMB2_SET_INFO_HE]),
1350 atomic_read(&failed[SMB2_SET_INFO_HE]));
1351 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
1352 atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
1353 atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
1354}
1355
1356static void
1357smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
1358{
1359 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1360 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1361
1362 cfile->fid.persistent_fid = fid->persistent_fid;
1363 cfile->fid.volatile_fid = fid->volatile_fid;
Olivier Deprez0e641232021-09-23 10:07:05 +02001364 cfile->fid.access = fid->access;
David Brazdil0f672f62019-12-10 10:32:29 +00001365#ifdef CONFIG_CIFS_DEBUG2
1366 cfile->fid.mid = fid->mid;
1367#endif /* CIFS_DEBUG2 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001368 server->ops->set_oplock_level(cinode, oplock, fid->epoch,
1369 &fid->purge_cache);
1370 cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
1371 memcpy(cfile->fid.create_guid, fid->create_guid, 16);
1372}
1373
1374static void
1375smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon,
1376 struct cifs_fid *fid)
1377{
1378 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
1379}
1380
1381static int
1382SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
1383 u64 persistent_fid, u64 volatile_fid,
1384 struct copychunk_ioctl *pcchunk)
1385{
1386 int rc;
1387 unsigned int ret_data_len;
1388 struct resume_key_req *res_key;
1389
1390 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
1391 FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
David Brazdil0f672f62019-12-10 10:32:29 +00001392 NULL, 0 /* no input */, CIFSMaxBufSize,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001393 (char **)&res_key, &ret_data_len);
1394
1395 if (rc) {
David Brazdil0f672f62019-12-10 10:32:29 +00001396 cifs_tcon_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001397 goto req_res_key_exit;
1398 }
1399 if (ret_data_len < sizeof(struct resume_key_req)) {
David Brazdil0f672f62019-12-10 10:32:29 +00001400 cifs_tcon_dbg(VFS, "Invalid refcopy resume key length\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001401 rc = -EINVAL;
1402 goto req_res_key_exit;
1403 }
1404 memcpy(pcchunk->SourceKey, res_key->ResumeKey, COPY_CHUNK_RES_KEY_SIZE);
1405
1406req_res_key_exit:
1407 kfree(res_key);
1408 return rc;
1409}
1410
David Brazdil0f672f62019-12-10 10:32:29 +00001411static int
1412smb2_ioctl_query_info(const unsigned int xid,
1413 struct cifs_tcon *tcon,
1414 __le16 *path, int is_dir,
1415 unsigned long p)
1416{
1417 struct cifs_ses *ses = tcon->ses;
1418 char __user *arg = (char __user *)p;
1419 struct smb_query_info qi;
1420 struct smb_query_info __user *pqi;
1421 int rc = 0;
1422 int flags = 0;
1423 struct smb2_query_info_rsp *qi_rsp = NULL;
1424 struct smb2_ioctl_rsp *io_rsp = NULL;
1425 void *buffer = NULL;
1426 struct smb_rqst rqst[3];
1427 int resp_buftype[3];
1428 struct kvec rsp_iov[3];
1429 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
1430 struct cifs_open_parms oparms;
1431 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1432 struct cifs_fid fid;
1433 struct kvec qi_iov[1];
1434 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
1435 struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
1436 struct kvec close_iov[1];
1437 unsigned int size[2];
1438 void *data[2];
1439
1440 memset(rqst, 0, sizeof(rqst));
1441 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1442 memset(rsp_iov, 0, sizeof(rsp_iov));
1443
1444 if (copy_from_user(&qi, arg, sizeof(struct smb_query_info)))
1445 return -EFAULT;
1446
1447 if (qi.output_buffer_length > 1024)
1448 return -EINVAL;
1449
1450 if (!ses || !(ses->server))
1451 return -EIO;
1452
1453 if (smb3_encryption_required(tcon))
1454 flags |= CIFS_TRANSFORM_REQ;
1455
1456 buffer = kmalloc(qi.output_buffer_length, GFP_KERNEL);
1457 if (buffer == NULL)
1458 return -ENOMEM;
1459
1460 if (copy_from_user(buffer, arg + sizeof(struct smb_query_info),
1461 qi.output_buffer_length)) {
1462 rc = -EFAULT;
1463 goto iqinf_exit;
1464 }
1465
1466 /* Open */
1467 memset(&open_iov, 0, sizeof(open_iov));
1468 rqst[0].rq_iov = open_iov;
1469 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
1470
1471 memset(&oparms, 0, sizeof(oparms));
1472 oparms.tcon = tcon;
1473 oparms.disposition = FILE_OPEN;
1474 if (is_dir)
1475 oparms.create_options = CREATE_NOT_FILE;
1476 else
1477 oparms.create_options = CREATE_NOT_DIR;
1478 oparms.fid = &fid;
1479 oparms.reconnect = false;
1480
1481 if (qi.flags & PASSTHRU_FSCTL) {
1482 switch (qi.info_type & FSCTL_DEVICE_ACCESS_MASK) {
1483 case FSCTL_DEVICE_ACCESS_FILE_READ_WRITE_ACCESS:
1484 oparms.desired_access = FILE_READ_DATA | FILE_WRITE_DATA | FILE_READ_ATTRIBUTES | SYNCHRONIZE;
1485 break;
1486 case FSCTL_DEVICE_ACCESS_FILE_ANY_ACCESS:
1487 oparms.desired_access = GENERIC_ALL;
1488 break;
1489 case FSCTL_DEVICE_ACCESS_FILE_READ_ACCESS:
1490 oparms.desired_access = GENERIC_READ;
1491 break;
1492 case FSCTL_DEVICE_ACCESS_FILE_WRITE_ACCESS:
1493 oparms.desired_access = GENERIC_WRITE;
1494 break;
1495 }
1496 } else if (qi.flags & PASSTHRU_SET_INFO) {
1497 oparms.desired_access = GENERIC_WRITE;
1498 } else {
1499 oparms.desired_access = FILE_READ_ATTRIBUTES | READ_CONTROL;
1500 }
1501
1502 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, path);
1503 if (rc)
1504 goto iqinf_exit;
1505 smb2_set_next_command(tcon, &rqst[0]);
1506
1507 /* Query */
1508 if (qi.flags & PASSTHRU_FSCTL) {
1509 /* Can eventually relax perm check since server enforces too */
1510 if (!capable(CAP_SYS_ADMIN))
1511 rc = -EPERM;
1512 else {
1513 memset(&io_iov, 0, sizeof(io_iov));
1514 rqst[1].rq_iov = io_iov;
1515 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
1516
1517 rc = SMB2_ioctl_init(tcon, &rqst[1],
1518 COMPOUND_FID, COMPOUND_FID,
1519 qi.info_type, true, buffer,
1520 qi.output_buffer_length,
Olivier Deprez0e641232021-09-23 10:07:05 +02001521 CIFSMaxBufSize -
1522 MAX_SMB2_CREATE_RESPONSE_SIZE -
1523 MAX_SMB2_CLOSE_RESPONSE_SIZE);
David Brazdil0f672f62019-12-10 10:32:29 +00001524 }
1525 } else if (qi.flags == PASSTHRU_SET_INFO) {
1526 /* Can eventually relax perm check since server enforces too */
1527 if (!capable(CAP_SYS_ADMIN))
1528 rc = -EPERM;
1529 else {
1530 memset(&si_iov, 0, sizeof(si_iov));
1531 rqst[1].rq_iov = si_iov;
1532 rqst[1].rq_nvec = 1;
1533
1534 size[0] = 8;
1535 data[0] = buffer;
1536
1537 rc = SMB2_set_info_init(tcon, &rqst[1],
1538 COMPOUND_FID, COMPOUND_FID,
1539 current->tgid,
1540 FILE_END_OF_FILE_INFORMATION,
1541 SMB2_O_INFO_FILE, 0, data, size);
1542 }
1543 } else if (qi.flags == PASSTHRU_QUERY_INFO) {
1544 memset(&qi_iov, 0, sizeof(qi_iov));
1545 rqst[1].rq_iov = qi_iov;
1546 rqst[1].rq_nvec = 1;
1547
1548 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID,
1549 COMPOUND_FID, qi.file_info_class,
1550 qi.info_type, qi.additional_information,
1551 qi.input_buffer_length,
1552 qi.output_buffer_length, buffer);
1553 } else { /* unknown flags */
1554 cifs_tcon_dbg(VFS, "invalid passthru query flags: 0x%x\n", qi.flags);
1555 rc = -EINVAL;
1556 }
1557
1558 if (rc)
1559 goto iqinf_exit;
1560 smb2_set_next_command(tcon, &rqst[1]);
1561 smb2_set_related(&rqst[1]);
1562
1563 /* Close */
1564 memset(&close_iov, 0, sizeof(close_iov));
1565 rqst[2].rq_iov = close_iov;
1566 rqst[2].rq_nvec = 1;
1567
1568 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
1569 if (rc)
1570 goto iqinf_exit;
1571 smb2_set_related(&rqst[2]);
1572
1573 rc = compound_send_recv(xid, ses, flags, 3, rqst,
1574 resp_buftype, rsp_iov);
1575 if (rc)
1576 goto iqinf_exit;
1577
1578 /* No need to bump num_remote_opens since handle immediately closed */
1579 if (qi.flags & PASSTHRU_FSCTL) {
1580 pqi = (struct smb_query_info __user *)arg;
1581 io_rsp = (struct smb2_ioctl_rsp *)rsp_iov[1].iov_base;
1582 if (le32_to_cpu(io_rsp->OutputCount) < qi.input_buffer_length)
1583 qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount);
1584 if (qi.input_buffer_length > 0 &&
Olivier Deprez0e641232021-09-23 10:07:05 +02001585 le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length
1586 > rsp_iov[1].iov_len)
1587 goto e_fault;
1588
1589 if (copy_to_user(&pqi->input_buffer_length,
1590 &qi.input_buffer_length,
1591 sizeof(qi.input_buffer_length)))
1592 goto e_fault;
1593
David Brazdil0f672f62019-12-10 10:32:29 +00001594 if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info),
1595 (const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset),
Olivier Deprez0e641232021-09-23 10:07:05 +02001596 qi.input_buffer_length))
1597 goto e_fault;
David Brazdil0f672f62019-12-10 10:32:29 +00001598 } else {
1599 pqi = (struct smb_query_info __user *)arg;
1600 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
1601 if (le32_to_cpu(qi_rsp->OutputBufferLength) < qi.input_buffer_length)
1602 qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength);
Olivier Deprez0e641232021-09-23 10:07:05 +02001603 if (copy_to_user(&pqi->input_buffer_length,
1604 &qi.input_buffer_length,
1605 sizeof(qi.input_buffer_length)))
1606 goto e_fault;
1607
1608 if (copy_to_user(pqi + 1, qi_rsp->Buffer,
1609 qi.input_buffer_length))
1610 goto e_fault;
David Brazdil0f672f62019-12-10 10:32:29 +00001611 }
1612
1613 iqinf_exit:
1614 kfree(buffer);
1615 SMB2_open_free(&rqst[0]);
1616 if (qi.flags & PASSTHRU_FSCTL)
1617 SMB2_ioctl_free(&rqst[1]);
1618 else
1619 SMB2_query_info_free(&rqst[1]);
1620
1621 SMB2_close_free(&rqst[2]);
1622 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1623 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1624 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
1625 return rc;
Olivier Deprez0e641232021-09-23 10:07:05 +02001626
1627e_fault:
1628 rc = -EFAULT;
1629 goto iqinf_exit;
David Brazdil0f672f62019-12-10 10:32:29 +00001630}
1631
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001632static ssize_t
1633smb2_copychunk_range(const unsigned int xid,
1634 struct cifsFileInfo *srcfile,
1635 struct cifsFileInfo *trgtfile, u64 src_off,
1636 u64 len, u64 dest_off)
1637{
1638 int rc;
1639 unsigned int ret_data_len;
1640 struct copychunk_ioctl *pcchunk;
1641 struct copychunk_ioctl_rsp *retbuf = NULL;
1642 struct cifs_tcon *tcon;
1643 int chunks_copied = 0;
1644 bool chunk_sizes_updated = false;
1645 ssize_t bytes_written, total_bytes_written = 0;
1646
1647 pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
1648
1649 if (pcchunk == NULL)
1650 return -ENOMEM;
1651
David Brazdil0f672f62019-12-10 10:32:29 +00001652 cifs_dbg(FYI, "%s: about to call request res key\n", __func__);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001653 /* Request a key from the server to identify the source of the copy */
1654 rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
1655 srcfile->fid.persistent_fid,
1656 srcfile->fid.volatile_fid, pcchunk);
1657
1658 /* Note: request_res_key sets res_key null only if rc !=0 */
1659 if (rc)
1660 goto cchunk_out;
1661
1662 /* For now array only one chunk long, will make more flexible later */
1663 pcchunk->ChunkCount = cpu_to_le32(1);
1664 pcchunk->Reserved = 0;
1665 pcchunk->Reserved2 = 0;
1666
1667 tcon = tlink_tcon(trgtfile->tlink);
1668
1669 while (len > 0) {
1670 pcchunk->SourceOffset = cpu_to_le64(src_off);
1671 pcchunk->TargetOffset = cpu_to_le64(dest_off);
1672 pcchunk->Length =
1673 cpu_to_le32(min_t(u32, len, tcon->max_bytes_chunk));
1674
1675 /* Request server copy to target from src identified by key */
Olivier Deprez0e641232021-09-23 10:07:05 +02001676 kfree(retbuf);
1677 retbuf = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001678 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
1679 trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
1680 true /* is_fsctl */, (char *)pcchunk,
David Brazdil0f672f62019-12-10 10:32:29 +00001681 sizeof(struct copychunk_ioctl), CIFSMaxBufSize,
1682 (char **)&retbuf, &ret_data_len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001683 if (rc == 0) {
1684 if (ret_data_len !=
1685 sizeof(struct copychunk_ioctl_rsp)) {
David Brazdil0f672f62019-12-10 10:32:29 +00001686 cifs_tcon_dbg(VFS, "invalid cchunk response size\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001687 rc = -EIO;
1688 goto cchunk_out;
1689 }
1690 if (retbuf->TotalBytesWritten == 0) {
1691 cifs_dbg(FYI, "no bytes copied\n");
1692 rc = -EIO;
1693 goto cchunk_out;
1694 }
1695 /*
1696 * Check if server claimed to write more than we asked
1697 */
1698 if (le32_to_cpu(retbuf->TotalBytesWritten) >
1699 le32_to_cpu(pcchunk->Length)) {
David Brazdil0f672f62019-12-10 10:32:29 +00001700 cifs_tcon_dbg(VFS, "invalid copy chunk response\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001701 rc = -EIO;
1702 goto cchunk_out;
1703 }
1704 if (le32_to_cpu(retbuf->ChunksWritten) != 1) {
David Brazdil0f672f62019-12-10 10:32:29 +00001705 cifs_tcon_dbg(VFS, "invalid num chunks written\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001706 rc = -EIO;
1707 goto cchunk_out;
1708 }
1709 chunks_copied++;
1710
1711 bytes_written = le32_to_cpu(retbuf->TotalBytesWritten);
1712 src_off += bytes_written;
1713 dest_off += bytes_written;
1714 len -= bytes_written;
1715 total_bytes_written += bytes_written;
1716
1717 cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n",
1718 le32_to_cpu(retbuf->ChunksWritten),
1719 le32_to_cpu(retbuf->ChunkBytesWritten),
1720 bytes_written);
1721 } else if (rc == -EINVAL) {
1722 if (ret_data_len != sizeof(struct copychunk_ioctl_rsp))
1723 goto cchunk_out;
1724
1725 cifs_dbg(FYI, "MaxChunks %d BytesChunk %d MaxCopy %d\n",
1726 le32_to_cpu(retbuf->ChunksWritten),
1727 le32_to_cpu(retbuf->ChunkBytesWritten),
1728 le32_to_cpu(retbuf->TotalBytesWritten));
1729
1730 /*
1731 * Check if this is the first request using these sizes,
1732 * (ie check if copy succeed once with original sizes
1733 * and check if the server gave us different sizes after
1734 * we already updated max sizes on previous request).
1735 * if not then why is the server returning an error now
1736 */
1737 if ((chunks_copied != 0) || chunk_sizes_updated)
1738 goto cchunk_out;
1739
1740 /* Check that server is not asking us to grow size */
1741 if (le32_to_cpu(retbuf->ChunkBytesWritten) <
1742 tcon->max_bytes_chunk)
1743 tcon->max_bytes_chunk =
1744 le32_to_cpu(retbuf->ChunkBytesWritten);
1745 else
1746 goto cchunk_out; /* server gave us bogus size */
1747
1748 /* No need to change MaxChunks since already set to 1 */
1749 chunk_sizes_updated = true;
1750 } else
1751 goto cchunk_out;
1752 }
1753
1754cchunk_out:
1755 kfree(pcchunk);
1756 kfree(retbuf);
1757 if (rc)
1758 return rc;
1759 else
1760 return total_bytes_written;
1761}
1762
1763static int
1764smb2_flush_file(const unsigned int xid, struct cifs_tcon *tcon,
1765 struct cifs_fid *fid)
1766{
1767 return SMB2_flush(xid, tcon, fid->persistent_fid, fid->volatile_fid);
1768}
1769
1770static unsigned int
1771smb2_read_data_offset(char *buf)
1772{
1773 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
David Brazdil0f672f62019-12-10 10:32:29 +00001774
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001775 return rsp->DataOffset;
1776}
1777
1778static unsigned int
1779smb2_read_data_length(char *buf, bool in_remaining)
1780{
1781 struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
1782
1783 if (in_remaining)
1784 return le32_to_cpu(rsp->DataRemaining);
1785
1786 return le32_to_cpu(rsp->DataLength);
1787}
1788
1789
1790static int
1791smb2_sync_read(const unsigned int xid, struct cifs_fid *pfid,
1792 struct cifs_io_parms *parms, unsigned int *bytes_read,
1793 char **buf, int *buf_type)
1794{
1795 parms->persistent_fid = pfid->persistent_fid;
1796 parms->volatile_fid = pfid->volatile_fid;
1797 return SMB2_read(xid, parms, bytes_read, buf, buf_type);
1798}
1799
1800static int
1801smb2_sync_write(const unsigned int xid, struct cifs_fid *pfid,
1802 struct cifs_io_parms *parms, unsigned int *written,
1803 struct kvec *iov, unsigned long nr_segs)
1804{
1805
1806 parms->persistent_fid = pfid->persistent_fid;
1807 parms->volatile_fid = pfid->volatile_fid;
1808 return SMB2_write(xid, parms, written, iov, nr_segs);
1809}
1810
1811/* Set or clear the SPARSE_FILE attribute based on value passed in setsparse */
1812static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
1813 struct cifsFileInfo *cfile, struct inode *inode, __u8 setsparse)
1814{
1815 struct cifsInodeInfo *cifsi;
1816 int rc;
1817
1818 cifsi = CIFS_I(inode);
1819
1820 /* if file already sparse don't bother setting sparse again */
1821 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && setsparse)
1822 return true; /* already sparse */
1823
1824 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && !setsparse)
1825 return true; /* already not sparse */
1826
1827 /*
1828 * Can't check for sparse support on share the usual way via the
1829 * FS attribute info (FILE_SUPPORTS_SPARSE_FILES) on the share
1830 * since Samba server doesn't set the flag on the share, yet
1831 * supports the set sparse FSCTL and returns sparse correctly
1832 * in the file attributes. If we fail setting sparse though we
1833 * mark that server does not support sparse files for this share
1834 * to avoid repeatedly sending the unsupported fsctl to server
1835 * if the file is repeatedly extended.
1836 */
1837 if (tcon->broken_sparse_sup)
1838 return false;
1839
1840 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1841 cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
1842 true /* is_fctl */,
David Brazdil0f672f62019-12-10 10:32:29 +00001843 &setsparse, 1, CIFSMaxBufSize, NULL, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001844 if (rc) {
1845 tcon->broken_sparse_sup = true;
1846 cifs_dbg(FYI, "set sparse rc = %d\n", rc);
1847 return false;
1848 }
1849
1850 if (setsparse)
1851 cifsi->cifsAttrs |= FILE_ATTRIBUTE_SPARSE_FILE;
1852 else
1853 cifsi->cifsAttrs &= (~FILE_ATTRIBUTE_SPARSE_FILE);
1854
1855 return true;
1856}
1857
1858static int
1859smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
1860 struct cifsFileInfo *cfile, __u64 size, bool set_alloc)
1861{
1862 __le64 eof = cpu_to_le64(size);
1863 struct inode *inode;
1864
1865 /*
1866 * If extending file more than one page make sparse. Many Linux fs
1867 * make files sparse by default when extending via ftruncate
1868 */
1869 inode = d_inode(cfile->dentry);
1870
1871 if (!set_alloc && (size > inode->i_size + 8192)) {
1872 __u8 set_sparse = 1;
1873
1874 /* whether set sparse succeeds or not, extend the file */
1875 smb2_set_sparse(xid, tcon, cfile, inode, set_sparse);
1876 }
1877
1878 return SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
David Brazdil0f672f62019-12-10 10:32:29 +00001879 cfile->fid.volatile_fid, cfile->pid, &eof);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001880}
1881
1882static int
1883smb2_duplicate_extents(const unsigned int xid,
1884 struct cifsFileInfo *srcfile,
1885 struct cifsFileInfo *trgtfile, u64 src_off,
1886 u64 len, u64 dest_off)
1887{
1888 int rc;
1889 unsigned int ret_data_len;
1890 struct duplicate_extents_to_file dup_ext_buf;
1891 struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
1892
1893 /* server fileays advertise duplicate extent support with this flag */
1894 if ((le32_to_cpu(tcon->fsAttrInfo.Attributes) &
1895 FILE_SUPPORTS_BLOCK_REFCOUNTING) == 0)
1896 return -EOPNOTSUPP;
1897
1898 dup_ext_buf.VolatileFileHandle = srcfile->fid.volatile_fid;
1899 dup_ext_buf.PersistentFileHandle = srcfile->fid.persistent_fid;
1900 dup_ext_buf.SourceFileOffset = cpu_to_le64(src_off);
1901 dup_ext_buf.TargetFileOffset = cpu_to_le64(dest_off);
1902 dup_ext_buf.ByteCount = cpu_to_le64(len);
David Brazdil0f672f62019-12-10 10:32:29 +00001903 cifs_dbg(FYI, "Duplicate extents: src off %lld dst off %lld len %lld\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001904 src_off, dest_off, len);
1905
1906 rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
1907 if (rc)
1908 goto duplicate_extents_out;
1909
1910 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
1911 trgtfile->fid.volatile_fid,
1912 FSCTL_DUPLICATE_EXTENTS_TO_FILE,
1913 true /* is_fsctl */,
1914 (char *)&dup_ext_buf,
1915 sizeof(struct duplicate_extents_to_file),
David Brazdil0f672f62019-12-10 10:32:29 +00001916 CIFSMaxBufSize, NULL,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001917 &ret_data_len);
1918
1919 if (ret_data_len > 0)
David Brazdil0f672f62019-12-10 10:32:29 +00001920 cifs_dbg(FYI, "Non-zero response length in duplicate extents\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001921
1922duplicate_extents_out:
1923 return rc;
1924}
1925
1926static int
1927smb2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
1928 struct cifsFileInfo *cfile)
1929{
1930 return SMB2_set_compression(xid, tcon, cfile->fid.persistent_fid,
1931 cfile->fid.volatile_fid);
1932}
1933
1934static int
1935smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
1936 struct cifsFileInfo *cfile)
1937{
1938 struct fsctl_set_integrity_information_req integr_info;
1939 unsigned int ret_data_len;
1940
1941 integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED);
1942 integr_info.Flags = 0;
1943 integr_info.Reserved = 0;
1944
1945 return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1946 cfile->fid.volatile_fid,
1947 FSCTL_SET_INTEGRITY_INFORMATION,
1948 true /* is_fsctl */,
1949 (char *)&integr_info,
1950 sizeof(struct fsctl_set_integrity_information_req),
David Brazdil0f672f62019-12-10 10:32:29 +00001951 CIFSMaxBufSize, NULL,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001952 &ret_data_len);
1953
1954}
1955
1956/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
1957#define GMT_TOKEN_SIZE 50
1958
David Brazdil0f672f62019-12-10 10:32:29 +00001959#define MIN_SNAPSHOT_ARRAY_SIZE 16 /* See MS-SMB2 section 3.3.5.15.1 */
1960
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001961/*
1962 * Input buffer contains (empty) struct smb_snapshot array with size filled in
1963 * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
1964 */
1965static int
1966smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
1967 struct cifsFileInfo *cfile, void __user *ioc_buf)
1968{
1969 char *retbuf = NULL;
1970 unsigned int ret_data_len = 0;
1971 int rc;
David Brazdil0f672f62019-12-10 10:32:29 +00001972 u32 max_response_size;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001973 struct smb_snapshot_array snapshot_in;
1974
David Brazdil0f672f62019-12-10 10:32:29 +00001975 /*
1976 * On the first query to enumerate the list of snapshots available
1977 * for this volume the buffer begins with 0 (number of snapshots
1978 * which can be returned is zero since at that point we do not know
1979 * how big the buffer needs to be). On the second query,
1980 * it (ret_data_len) is set to number of snapshots so we can
1981 * know to set the maximum response size larger (see below).
1982 */
1983 if (get_user(ret_data_len, (unsigned int __user *)ioc_buf))
1984 return -EFAULT;
1985
1986 /*
1987 * Note that for snapshot queries that servers like Azure expect that
1988 * the first query be minimal size (and just used to get the number/size
1989 * of previous versions) so response size must be specified as EXACTLY
1990 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
1991 * of eight bytes.
1992 */
1993 if (ret_data_len == 0)
1994 max_response_size = MIN_SNAPSHOT_ARRAY_SIZE;
1995 else
1996 max_response_size = CIFSMaxBufSize;
1997
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001998 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1999 cfile->fid.volatile_fid,
2000 FSCTL_SRV_ENUMERATE_SNAPSHOTS,
2001 true /* is_fsctl */,
David Brazdil0f672f62019-12-10 10:32:29 +00002002 NULL, 0 /* no input data */, max_response_size,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002003 (char **)&retbuf,
2004 &ret_data_len);
2005 cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n",
2006 rc, ret_data_len);
2007 if (rc)
2008 return rc;
2009
2010 if (ret_data_len && (ioc_buf != NULL) && (retbuf != NULL)) {
2011 /* Fixup buffer */
2012 if (copy_from_user(&snapshot_in, ioc_buf,
2013 sizeof(struct smb_snapshot_array))) {
2014 rc = -EFAULT;
2015 kfree(retbuf);
2016 return rc;
2017 }
2018
2019 /*
2020 * Check for min size, ie not large enough to fit even one GMT
2021 * token (snapshot). On the first ioctl some users may pass in
2022 * smaller size (or zero) to simply get the size of the array
2023 * so the user space caller can allocate sufficient memory
2024 * and retry the ioctl again with larger array size sufficient
2025 * to hold all of the snapshot GMT tokens on the second try.
2026 */
2027 if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE)
2028 ret_data_len = sizeof(struct smb_snapshot_array);
2029
2030 /*
2031 * We return struct SRV_SNAPSHOT_ARRAY, followed by
2032 * the snapshot array (of 50 byte GMT tokens) each
2033 * representing an available previous version of the data
2034 */
2035 if (ret_data_len > (snapshot_in.snapshot_array_size +
2036 sizeof(struct smb_snapshot_array)))
2037 ret_data_len = snapshot_in.snapshot_array_size +
2038 sizeof(struct smb_snapshot_array);
2039
2040 if (copy_to_user(ioc_buf, retbuf, ret_data_len))
2041 rc = -EFAULT;
2042 }
2043
2044 kfree(retbuf);
2045 return rc;
2046}
2047
2048static int
2049smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
2050 const char *path, struct cifs_sb_info *cifs_sb,
2051 struct cifs_fid *fid, __u16 search_flags,
2052 struct cifs_search_info *srch_inf)
2053{
2054 __le16 *utf16_path;
2055 int rc;
2056 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2057 struct cifs_open_parms oparms;
2058
2059 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
2060 if (!utf16_path)
2061 return -ENOMEM;
2062
2063 oparms.tcon = tcon;
2064 oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
2065 oparms.disposition = FILE_OPEN;
2066 if (backup_cred(cifs_sb))
2067 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2068 else
2069 oparms.create_options = 0;
2070 oparms.fid = fid;
2071 oparms.reconnect = false;
2072
2073 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
2074 kfree(utf16_path);
2075 if (rc) {
2076 cifs_dbg(FYI, "open dir failed rc=%d\n", rc);
2077 return rc;
2078 }
2079
2080 srch_inf->entries_in_buffer = 0;
2081 srch_inf->index_of_last_entry = 2;
2082
2083 rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
2084 fid->volatile_fid, 0, srch_inf);
2085 if (rc) {
2086 cifs_dbg(FYI, "query directory failed rc=%d\n", rc);
2087 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
2088 }
2089 return rc;
2090}
2091
2092static int
2093smb2_query_dir_next(const unsigned int xid, struct cifs_tcon *tcon,
2094 struct cifs_fid *fid, __u16 search_flags,
2095 struct cifs_search_info *srch_inf)
2096{
2097 return SMB2_query_directory(xid, tcon, fid->persistent_fid,
2098 fid->volatile_fid, 0, srch_inf);
2099}
2100
2101static int
2102smb2_close_dir(const unsigned int xid, struct cifs_tcon *tcon,
2103 struct cifs_fid *fid)
2104{
2105 return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
2106}
2107
2108/*
David Brazdil0f672f62019-12-10 10:32:29 +00002109 * If we negotiate SMB2 protocol and get STATUS_PENDING - update
2110 * the number of credits and return true. Otherwise - return false.
2111 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002112static bool
David Brazdil0f672f62019-12-10 10:32:29 +00002113smb2_is_status_pending(char *buf, struct TCP_Server_Info *server)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002114{
2115 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
2116
2117 if (shdr->Status != STATUS_PENDING)
2118 return false;
2119
David Brazdil0f672f62019-12-10 10:32:29 +00002120 if (shdr->CreditRequest) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002121 spin_lock(&server->req_lock);
2122 server->credits += le16_to_cpu(shdr->CreditRequest);
2123 spin_unlock(&server->req_lock);
2124 wake_up(&server->request_q);
2125 }
2126
2127 return true;
2128}
2129
2130static bool
2131smb2_is_session_expired(char *buf)
2132{
2133 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
2134
2135 if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED &&
2136 shdr->Status != STATUS_USER_SESSION_DELETED)
2137 return false;
2138
2139 trace_smb3_ses_expired(shdr->TreeId, shdr->SessionId,
2140 le16_to_cpu(shdr->Command),
2141 le64_to_cpu(shdr->MessageId));
2142 cifs_dbg(FYI, "Session expired or deleted\n");
2143
2144 return true;
2145}
2146
2147static int
2148smb2_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
2149 struct cifsInodeInfo *cinode)
2150{
2151 if (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
2152 return SMB2_lease_break(0, tcon, cinode->lease_key,
2153 smb2_get_lease_state(cinode));
2154
2155 return SMB2_oplock_break(0, tcon, fid->persistent_fid,
2156 fid->volatile_fid,
2157 CIFS_CACHE_READ(cinode) ? 1 : 0);
2158}
2159
David Brazdil0f672f62019-12-10 10:32:29 +00002160void
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002161smb2_set_related(struct smb_rqst *rqst)
2162{
2163 struct smb2_sync_hdr *shdr;
2164
2165 shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
David Brazdil0f672f62019-12-10 10:32:29 +00002166 if (shdr == NULL) {
2167 cifs_dbg(FYI, "shdr NULL in smb2_set_related\n");
2168 return;
2169 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002170 shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
2171}
2172
2173char smb2_padding[7] = {0, 0, 0, 0, 0, 0, 0};
2174
David Brazdil0f672f62019-12-10 10:32:29 +00002175void
2176smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002177{
2178 struct smb2_sync_hdr *shdr;
David Brazdil0f672f62019-12-10 10:32:29 +00002179 struct cifs_ses *ses = tcon->ses;
2180 struct TCP_Server_Info *server = ses->server;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002181 unsigned long len = smb_rqst_len(server, rqst);
David Brazdil0f672f62019-12-10 10:32:29 +00002182 int i, num_padding;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002183
2184 shdr = (struct smb2_sync_hdr *)(rqst->rq_iov[0].iov_base);
David Brazdil0f672f62019-12-10 10:32:29 +00002185 if (shdr == NULL) {
2186 cifs_dbg(FYI, "shdr NULL in smb2_set_next_command\n");
2187 return;
2188 }
2189
2190 /* SMB headers in a compound are 8 byte aligned. */
2191
2192 /* No padding needed */
2193 if (!(len & 7))
2194 goto finished;
2195
2196 num_padding = 8 - (len & 7);
2197 if (!smb3_encryption_required(tcon)) {
2198 /*
2199 * If we do not have encryption then we can just add an extra
2200 * iov for the padding.
2201 */
2202 rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding;
2203 rqst->rq_iov[rqst->rq_nvec].iov_len = num_padding;
2204 rqst->rq_nvec++;
2205 len += num_padding;
2206 } else {
2207 /*
2208 * We can not add a small padding iov for the encryption case
2209 * because the encryption framework can not handle the padding
2210 * iovs.
2211 * We have to flatten this into a single buffer and add
2212 * the padding to it.
2213 */
2214 for (i = 1; i < rqst->rq_nvec; i++) {
2215 memcpy(rqst->rq_iov[0].iov_base +
2216 rqst->rq_iov[0].iov_len,
2217 rqst->rq_iov[i].iov_base,
2218 rqst->rq_iov[i].iov_len);
2219 rqst->rq_iov[0].iov_len += rqst->rq_iov[i].iov_len;
2220 }
2221 memset(rqst->rq_iov[0].iov_base + rqst->rq_iov[0].iov_len,
2222 0, num_padding);
2223 rqst->rq_iov[0].iov_len += num_padding;
2224 len += num_padding;
2225 rqst->rq_nvec = 1;
2226 }
2227
2228 finished:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002229 shdr->NextCommand = cpu_to_le32(len);
2230}
2231
David Brazdil0f672f62019-12-10 10:32:29 +00002232/*
2233 * Passes the query info response back to the caller on success.
2234 * Caller need to free this with free_rsp_buf().
2235 */
2236int
2237smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
2238 __le16 *utf16_path, u32 desired_access,
2239 u32 class, u32 type, u32 output_len,
2240 struct kvec *rsp, int *buftype,
2241 struct cifs_sb_info *cifs_sb)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002242{
David Brazdil0f672f62019-12-10 10:32:29 +00002243 struct cifs_ses *ses = tcon->ses;
2244 int flags = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002245 struct smb_rqst rqst[3];
2246 int resp_buftype[3];
2247 struct kvec rsp_iov[3];
2248 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
2249 struct kvec qi_iov[1];
2250 struct kvec close_iov[1];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002251 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2252 struct cifs_open_parms oparms;
2253 struct cifs_fid fid;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002254 int rc;
2255
2256 if (smb3_encryption_required(tcon))
2257 flags |= CIFS_TRANSFORM_REQ;
2258
2259 memset(rqst, 0, sizeof(rqst));
David Brazdil0f672f62019-12-10 10:32:29 +00002260 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002261 memset(rsp_iov, 0, sizeof(rsp_iov));
2262
2263 memset(&open_iov, 0, sizeof(open_iov));
2264 rqst[0].rq_iov = open_iov;
2265 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
2266
2267 oparms.tcon = tcon;
David Brazdil0f672f62019-12-10 10:32:29 +00002268 oparms.desired_access = desired_access;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002269 oparms.disposition = FILE_OPEN;
David Brazdil0f672f62019-12-10 10:32:29 +00002270 if (cifs_sb && backup_cred(cifs_sb))
2271 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2272 else
2273 oparms.create_options = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002274 oparms.fid = &fid;
2275 oparms.reconnect = false;
2276
David Brazdil0f672f62019-12-10 10:32:29 +00002277 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002278 if (rc)
David Brazdil0f672f62019-12-10 10:32:29 +00002279 goto qic_exit;
2280 smb2_set_next_command(tcon, &rqst[0]);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002281
2282 memset(&qi_iov, 0, sizeof(qi_iov));
2283 rqst[1].rq_iov = qi_iov;
2284 rqst[1].rq_nvec = 1;
2285
2286 rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID, COMPOUND_FID,
David Brazdil0f672f62019-12-10 10:32:29 +00002287 class, type, 0,
2288 output_len, 0,
2289 NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002290 if (rc)
David Brazdil0f672f62019-12-10 10:32:29 +00002291 goto qic_exit;
2292 smb2_set_next_command(tcon, &rqst[1]);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002293 smb2_set_related(&rqst[1]);
2294
2295 memset(&close_iov, 0, sizeof(close_iov));
2296 rqst[2].rq_iov = close_iov;
2297 rqst[2].rq_nvec = 1;
2298
2299 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
2300 if (rc)
David Brazdil0f672f62019-12-10 10:32:29 +00002301 goto qic_exit;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002302 smb2_set_related(&rqst[2]);
2303
2304 rc = compound_send_recv(xid, ses, flags, 3, rqst,
2305 resp_buftype, rsp_iov);
David Brazdil0f672f62019-12-10 10:32:29 +00002306 if (rc) {
2307 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
2308 if (rc == -EREMCHG) {
2309 tcon->need_reconnect = true;
2310 printk_once(KERN_WARNING "server share %s deleted\n",
2311 tcon->treeName);
2312 }
2313 goto qic_exit;
2314 }
2315 *rsp = rsp_iov[1];
2316 *buftype = resp_buftype[1];
2317
2318 qic_exit:
2319 SMB2_open_free(&rqst[0]);
2320 SMB2_query_info_free(&rqst[1]);
2321 SMB2_close_free(&rqst[2]);
2322 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2323 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
2324 return rc;
2325}
2326
2327static int
2328smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
2329 struct kstatfs *buf)
2330{
2331 struct smb2_query_info_rsp *rsp;
2332 struct smb2_fs_full_size_info *info = NULL;
2333 __le16 utf16_path = 0; /* Null - open root of share */
2334 struct kvec rsp_iov = {NULL, 0};
2335 int buftype = CIFS_NO_BUFFER;
2336 int rc;
2337
2338
2339 rc = smb2_query_info_compound(xid, tcon, &utf16_path,
2340 FILE_READ_ATTRIBUTES,
2341 FS_FULL_SIZE_INFORMATION,
2342 SMB2_O_INFO_FILESYSTEM,
2343 sizeof(struct smb2_fs_full_size_info),
2344 &rsp_iov, &buftype, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002345 if (rc)
2346 goto qfs_exit;
2347
David Brazdil0f672f62019-12-10 10:32:29 +00002348 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002349 buf->f_type = SMB2_MAGIC_NUMBER;
2350 info = (struct smb2_fs_full_size_info *)(
2351 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
2352 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
2353 le32_to_cpu(rsp->OutputBufferLength),
David Brazdil0f672f62019-12-10 10:32:29 +00002354 &rsp_iov,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002355 sizeof(struct smb2_fs_full_size_info));
2356 if (!rc)
2357 smb2_copy_fs_info_to_kstatfs(info, buf);
2358
2359qfs_exit:
David Brazdil0f672f62019-12-10 10:32:29 +00002360 free_rsp_buf(buftype, rsp_iov.iov_base);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002361 return rc;
2362}
2363
2364static int
2365smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
2366 struct kstatfs *buf)
2367{
2368 int rc;
2369 __le16 srch_path = 0; /* Null - open root of share */
2370 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2371 struct cifs_open_parms oparms;
2372 struct cifs_fid fid;
2373
2374 if (!tcon->posix_extensions)
2375 return smb2_queryfs(xid, tcon, buf);
2376
2377 oparms.tcon = tcon;
2378 oparms.desired_access = FILE_READ_ATTRIBUTES;
2379 oparms.disposition = FILE_OPEN;
2380 oparms.create_options = 0;
2381 oparms.fid = &fid;
2382 oparms.reconnect = false;
2383
2384 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, NULL);
2385 if (rc)
2386 return rc;
2387
2388 rc = SMB311_posix_qfs_info(xid, tcon, fid.persistent_fid,
2389 fid.volatile_fid, buf);
2390 buf->f_type = SMB2_MAGIC_NUMBER;
2391 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2392 return rc;
2393}
2394
2395static bool
2396smb2_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2)
2397{
2398 return ob1->fid.persistent_fid == ob2->fid.persistent_fid &&
2399 ob1->fid.volatile_fid == ob2->fid.volatile_fid;
2400}
2401
2402static int
2403smb2_mand_lock(const unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
2404 __u64 length, __u32 type, int lock, int unlock, bool wait)
2405{
2406 if (unlock && !lock)
2407 type = SMB2_LOCKFLAG_UNLOCK;
2408 return SMB2_lock(xid, tlink_tcon(cfile->tlink),
2409 cfile->fid.persistent_fid, cfile->fid.volatile_fid,
2410 current->tgid, length, offset, type, wait);
2411}
2412
2413static void
2414smb2_get_lease_key(struct inode *inode, struct cifs_fid *fid)
2415{
2416 memcpy(fid->lease_key, CIFS_I(inode)->lease_key, SMB2_LEASE_KEY_SIZE);
2417}
2418
2419static void
2420smb2_set_lease_key(struct inode *inode, struct cifs_fid *fid)
2421{
2422 memcpy(CIFS_I(inode)->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
2423}
2424
2425static void
2426smb2_new_lease_key(struct cifs_fid *fid)
2427{
2428 generate_random_uuid(fid->lease_key);
2429}
2430
2431static int
2432smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
2433 const char *search_name,
2434 struct dfs_info3_param **target_nodes,
2435 unsigned int *num_of_nodes,
2436 const struct nls_table *nls_codepage, int remap)
2437{
2438 int rc;
2439 __le16 *utf16_path = NULL;
2440 int utf16_path_len = 0;
2441 struct cifs_tcon *tcon;
2442 struct fsctl_get_dfs_referral_req *dfs_req = NULL;
2443 struct get_dfs_referral_rsp *dfs_rsp = NULL;
2444 u32 dfs_req_size = 0, dfs_rsp_size = 0;
2445
David Brazdil0f672f62019-12-10 10:32:29 +00002446 cifs_dbg(FYI, "%s: path: %s\n", __func__, search_name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002447
2448 /*
2449 * Try to use the IPC tcon, otherwise just use any
2450 */
2451 tcon = ses->tcon_ipc;
2452 if (tcon == NULL) {
2453 spin_lock(&cifs_tcp_ses_lock);
2454 tcon = list_first_entry_or_null(&ses->tcon_list,
2455 struct cifs_tcon,
2456 tcon_list);
2457 if (tcon)
2458 tcon->tc_count++;
2459 spin_unlock(&cifs_tcp_ses_lock);
2460 }
2461
2462 if (tcon == NULL) {
2463 cifs_dbg(VFS, "session %p has no tcon available for a dfs referral request\n",
2464 ses);
2465 rc = -ENOTCONN;
2466 goto out;
2467 }
2468
2469 utf16_path = cifs_strndup_to_utf16(search_name, PATH_MAX,
2470 &utf16_path_len,
2471 nls_codepage, remap);
2472 if (!utf16_path) {
2473 rc = -ENOMEM;
2474 goto out;
2475 }
2476
2477 dfs_req_size = sizeof(*dfs_req) + utf16_path_len;
2478 dfs_req = kzalloc(dfs_req_size, GFP_KERNEL);
2479 if (!dfs_req) {
2480 rc = -ENOMEM;
2481 goto out;
2482 }
2483
2484 /* Highest DFS referral version understood */
2485 dfs_req->MaxReferralLevel = DFS_VERSION;
2486
2487 /* Path to resolve in an UTF-16 null-terminated string */
2488 memcpy(dfs_req->RequestFileName, utf16_path, utf16_path_len);
2489
2490 do {
2491 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
2492 FSCTL_DFS_GET_REFERRALS,
2493 true /* is_fsctl */,
David Brazdil0f672f62019-12-10 10:32:29 +00002494 (char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002495 (char **)&dfs_rsp, &dfs_rsp_size);
2496 } while (rc == -EAGAIN);
2497
2498 if (rc) {
2499 if ((rc != -ENOENT) && (rc != -EOPNOTSUPP))
David Brazdil0f672f62019-12-10 10:32:29 +00002500 cifs_tcon_dbg(VFS, "ioctl error in %s rc=%d\n", __func__, rc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002501 goto out;
2502 }
2503
2504 rc = parse_dfs_referrals(dfs_rsp, dfs_rsp_size,
2505 num_of_nodes, target_nodes,
2506 nls_codepage, remap, search_name,
2507 true /* is_unicode */);
2508 if (rc) {
David Brazdil0f672f62019-12-10 10:32:29 +00002509 cifs_tcon_dbg(VFS, "parse error in %s rc=%d\n", __func__, rc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002510 goto out;
2511 }
2512
2513 out:
2514 if (tcon && !tcon->ipc) {
2515 /* ipc tcons are not refcounted */
2516 spin_lock(&cifs_tcp_ses_lock);
2517 tcon->tc_count--;
2518 spin_unlock(&cifs_tcp_ses_lock);
2519 }
2520 kfree(utf16_path);
2521 kfree(dfs_req);
2522 kfree(dfs_rsp);
2523 return rc;
2524}
David Brazdil0f672f62019-12-10 10:32:29 +00002525
2526static int
2527parse_reparse_posix(struct reparse_posix_data *symlink_buf,
2528 u32 plen, char **target_path,
2529 struct cifs_sb_info *cifs_sb)
2530{
2531 unsigned int len;
2532
2533 /* See MS-FSCC 2.1.2.6 for the 'NFS' style reparse tags */
2534 len = le16_to_cpu(symlink_buf->ReparseDataLength);
2535
2536 if (le64_to_cpu(symlink_buf->InodeType) != NFS_SPECFILE_LNK) {
2537 cifs_dbg(VFS, "%lld not a supported symlink type\n",
2538 le64_to_cpu(symlink_buf->InodeType));
2539 return -EOPNOTSUPP;
2540 }
2541
2542 *target_path = cifs_strndup_from_utf16(
2543 symlink_buf->PathBuffer,
2544 len, true, cifs_sb->local_nls);
2545 if (!(*target_path))
2546 return -ENOMEM;
2547
2548 convert_delimiter(*target_path, '/');
2549 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
2550
2551 return 0;
2552}
2553
2554static int
2555parse_reparse_symlink(struct reparse_symlink_data_buffer *symlink_buf,
2556 u32 plen, char **target_path,
2557 struct cifs_sb_info *cifs_sb)
2558{
2559 unsigned int sub_len;
2560 unsigned int sub_offset;
2561
2562 /* We handle Symbolic Link reparse tag here. See: MS-FSCC 2.1.2.4 */
2563
2564 sub_offset = le16_to_cpu(symlink_buf->SubstituteNameOffset);
2565 sub_len = le16_to_cpu(symlink_buf->SubstituteNameLength);
2566 if (sub_offset + 20 > plen ||
2567 sub_offset + sub_len + 20 > plen) {
2568 cifs_dbg(VFS, "srv returned malformed symlink buffer\n");
2569 return -EIO;
2570 }
2571
2572 *target_path = cifs_strndup_from_utf16(
2573 symlink_buf->PathBuffer + sub_offset,
2574 sub_len, true, cifs_sb->local_nls);
2575 if (!(*target_path))
2576 return -ENOMEM;
2577
2578 convert_delimiter(*target_path, '/');
2579 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
2580
2581 return 0;
2582}
2583
2584static int
2585parse_reparse_point(struct reparse_data_buffer *buf,
2586 u32 plen, char **target_path,
2587 struct cifs_sb_info *cifs_sb)
2588{
2589 if (plen < sizeof(struct reparse_data_buffer)) {
2590 cifs_dbg(VFS, "reparse buffer is too small. Must be "
2591 "at least 8 bytes but was %d\n", plen);
2592 return -EIO;
2593 }
2594
2595 if (plen < le16_to_cpu(buf->ReparseDataLength) +
2596 sizeof(struct reparse_data_buffer)) {
2597 cifs_dbg(VFS, "srv returned invalid reparse buf "
2598 "length: %d\n", plen);
2599 return -EIO;
2600 }
2601
2602 /* See MS-FSCC 2.1.2 */
2603 switch (le32_to_cpu(buf->ReparseTag)) {
2604 case IO_REPARSE_TAG_NFS:
2605 return parse_reparse_posix(
2606 (struct reparse_posix_data *)buf,
2607 plen, target_path, cifs_sb);
2608 case IO_REPARSE_TAG_SYMLINK:
2609 return parse_reparse_symlink(
2610 (struct reparse_symlink_data_buffer *)buf,
2611 plen, target_path, cifs_sb);
2612 default:
2613 cifs_dbg(VFS, "srv returned unknown symlink buffer "
2614 "tag:0x%08x\n", le32_to_cpu(buf->ReparseTag));
2615 return -EOPNOTSUPP;
2616 }
2617}
2618
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002619#define SMB2_SYMLINK_STRUCT_SIZE \
2620 (sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp))
2621
2622static int
2623smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
David Brazdil0f672f62019-12-10 10:32:29 +00002624 struct cifs_sb_info *cifs_sb, const char *full_path,
2625 char **target_path, bool is_reparse_point)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002626{
2627 int rc;
David Brazdil0f672f62019-12-10 10:32:29 +00002628 __le16 *utf16_path = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002629 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2630 struct cifs_open_parms oparms;
2631 struct cifs_fid fid;
2632 struct kvec err_iov = {NULL, 0};
2633 struct smb2_err_rsp *err_buf = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002634 struct smb2_symlink_err_rsp *symlink;
2635 unsigned int sub_len;
2636 unsigned int sub_offset;
2637 unsigned int print_len;
2638 unsigned int print_offset;
David Brazdil0f672f62019-12-10 10:32:29 +00002639 int flags = 0;
2640 struct smb_rqst rqst[3];
2641 int resp_buftype[3];
2642 struct kvec rsp_iov[3];
2643 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
2644 struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
2645 struct kvec close_iov[1];
2646 struct smb2_create_rsp *create_rsp;
2647 struct smb2_ioctl_rsp *ioctl_rsp;
2648 struct reparse_data_buffer *reparse_buf;
2649 u32 plen;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002650
2651 cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
2652
David Brazdil0f672f62019-12-10 10:32:29 +00002653 *target_path = NULL;
2654
2655 if (smb3_encryption_required(tcon))
2656 flags |= CIFS_TRANSFORM_REQ;
2657
2658 memset(rqst, 0, sizeof(rqst));
2659 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
2660 memset(rsp_iov, 0, sizeof(rsp_iov));
2661
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002662 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
2663 if (!utf16_path)
2664 return -ENOMEM;
2665
David Brazdil0f672f62019-12-10 10:32:29 +00002666 /* Open */
2667 memset(&open_iov, 0, sizeof(open_iov));
2668 rqst[0].rq_iov = open_iov;
2669 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
2670
2671 memset(&oparms, 0, sizeof(oparms));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002672 oparms.tcon = tcon;
2673 oparms.desired_access = FILE_READ_ATTRIBUTES;
2674 oparms.disposition = FILE_OPEN;
David Brazdil0f672f62019-12-10 10:32:29 +00002675
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002676 if (backup_cred(cifs_sb))
2677 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2678 else
2679 oparms.create_options = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00002680 if (is_reparse_point)
2681 oparms.create_options = OPEN_REPARSE_POINT;
2682
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002683 oparms.fid = &fid;
2684 oparms.reconnect = false;
2685
David Brazdil0f672f62019-12-10 10:32:29 +00002686 rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
2687 if (rc)
2688 goto querty_exit;
2689 smb2_set_next_command(tcon, &rqst[0]);
2690
2691
2692 /* IOCTL */
2693 memset(&io_iov, 0, sizeof(io_iov));
2694 rqst[1].rq_iov = io_iov;
2695 rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
2696
2697 rc = SMB2_ioctl_init(tcon, &rqst[1], fid.persistent_fid,
2698 fid.volatile_fid, FSCTL_GET_REPARSE_POINT,
Olivier Deprez0e641232021-09-23 10:07:05 +02002699 true /* is_fctl */, NULL, 0,
2700 CIFSMaxBufSize -
2701 MAX_SMB2_CREATE_RESPONSE_SIZE -
2702 MAX_SMB2_CLOSE_RESPONSE_SIZE);
David Brazdil0f672f62019-12-10 10:32:29 +00002703 if (rc)
2704 goto querty_exit;
2705
2706 smb2_set_next_command(tcon, &rqst[1]);
2707 smb2_set_related(&rqst[1]);
2708
2709
2710 /* Close */
2711 memset(&close_iov, 0, sizeof(close_iov));
2712 rqst[2].rq_iov = close_iov;
2713 rqst[2].rq_nvec = 1;
2714
2715 rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
2716 if (rc)
2717 goto querty_exit;
2718
2719 smb2_set_related(&rqst[2]);
2720
2721 rc = compound_send_recv(xid, tcon->ses, flags, 3, rqst,
2722 resp_buftype, rsp_iov);
2723
2724 create_rsp = rsp_iov[0].iov_base;
2725 if (create_rsp && create_rsp->sync_hdr.Status)
2726 err_iov = rsp_iov[0];
2727 ioctl_rsp = rsp_iov[1].iov_base;
2728
2729 /*
2730 * Open was successful and we got an ioctl response.
2731 */
2732 if ((rc == 0) && (is_reparse_point)) {
2733 /* See MS-FSCC 2.3.23 */
2734
2735 reparse_buf = (struct reparse_data_buffer *)
2736 ((char *)ioctl_rsp +
2737 le32_to_cpu(ioctl_rsp->OutputOffset));
2738 plen = le32_to_cpu(ioctl_rsp->OutputCount);
2739
2740 if (plen + le32_to_cpu(ioctl_rsp->OutputOffset) >
2741 rsp_iov[1].iov_len) {
2742 cifs_tcon_dbg(VFS, "srv returned invalid ioctl len: %d\n",
2743 plen);
2744 rc = -EIO;
2745 goto querty_exit;
2746 }
2747
2748 rc = parse_reparse_point(reparse_buf, plen, target_path,
2749 cifs_sb);
2750 goto querty_exit;
2751 }
2752
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002753 if (!rc || !err_iov.iov_base) {
2754 rc = -ENOENT;
David Brazdil0f672f62019-12-10 10:32:29 +00002755 goto querty_exit;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002756 }
2757
2758 err_buf = err_iov.iov_base;
2759 if (le32_to_cpu(err_buf->ByteCount) < sizeof(struct smb2_symlink_err_rsp) ||
2760 err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE) {
David Brazdil0f672f62019-12-10 10:32:29 +00002761 rc = -EINVAL;
2762 goto querty_exit;
2763 }
2764
2765 symlink = (struct smb2_symlink_err_rsp *)err_buf->ErrorData;
2766 if (le32_to_cpu(symlink->SymLinkErrorTag) != SYMLINK_ERROR_TAG ||
2767 le32_to_cpu(symlink->ReparseTag) != IO_REPARSE_TAG_SYMLINK) {
2768 rc = -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002769 goto querty_exit;
2770 }
2771
2772 /* open must fail on symlink - reset rc */
2773 rc = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002774 sub_len = le16_to_cpu(symlink->SubstituteNameLength);
2775 sub_offset = le16_to_cpu(symlink->SubstituteNameOffset);
2776 print_len = le16_to_cpu(symlink->PrintNameLength);
2777 print_offset = le16_to_cpu(symlink->PrintNameOffset);
2778
2779 if (err_iov.iov_len < SMB2_SYMLINK_STRUCT_SIZE + sub_offset + sub_len) {
David Brazdil0f672f62019-12-10 10:32:29 +00002780 rc = -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002781 goto querty_exit;
2782 }
2783
2784 if (err_iov.iov_len <
2785 SMB2_SYMLINK_STRUCT_SIZE + print_offset + print_len) {
David Brazdil0f672f62019-12-10 10:32:29 +00002786 rc = -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002787 goto querty_exit;
2788 }
2789
2790 *target_path = cifs_strndup_from_utf16(
2791 (char *)symlink->PathBuffer + sub_offset,
2792 sub_len, true, cifs_sb->local_nls);
2793 if (!(*target_path)) {
2794 rc = -ENOMEM;
2795 goto querty_exit;
2796 }
2797 convert_delimiter(*target_path, '/');
2798 cifs_dbg(FYI, "%s: target path: %s\n", __func__, *target_path);
2799
2800 querty_exit:
David Brazdil0f672f62019-12-10 10:32:29 +00002801 cifs_dbg(FYI, "query symlink rc %d\n", rc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002802 kfree(utf16_path);
David Brazdil0f672f62019-12-10 10:32:29 +00002803 SMB2_open_free(&rqst[0]);
2804 SMB2_ioctl_free(&rqst[1]);
2805 SMB2_close_free(&rqst[2]);
2806 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2807 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
2808 free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002809 return rc;
2810}
2811
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002812static struct cifs_ntsd *
2813get_smb2_acl_by_fid(struct cifs_sb_info *cifs_sb,
2814 const struct cifs_fid *cifsfid, u32 *pacllen)
2815{
2816 struct cifs_ntsd *pntsd = NULL;
2817 unsigned int xid;
2818 int rc = -EOPNOTSUPP;
2819 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2820
2821 if (IS_ERR(tlink))
2822 return ERR_CAST(tlink);
2823
2824 xid = get_xid();
2825 cifs_dbg(FYI, "trying to get acl\n");
2826
2827 rc = SMB2_query_acl(xid, tlink_tcon(tlink), cifsfid->persistent_fid,
2828 cifsfid->volatile_fid, (void **)&pntsd, pacllen);
2829 free_xid(xid);
2830
2831 cifs_put_tlink(tlink);
2832
2833 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
2834 if (rc)
2835 return ERR_PTR(rc);
2836 return pntsd;
2837
2838}
2839
2840static struct cifs_ntsd *
2841get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
2842 const char *path, u32 *pacllen)
2843{
2844 struct cifs_ntsd *pntsd = NULL;
2845 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2846 unsigned int xid;
2847 int rc;
2848 struct cifs_tcon *tcon;
2849 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2850 struct cifs_fid fid;
2851 struct cifs_open_parms oparms;
2852 __le16 *utf16_path;
2853
2854 cifs_dbg(FYI, "get smb3 acl for path %s\n", path);
2855 if (IS_ERR(tlink))
2856 return ERR_CAST(tlink);
2857
2858 tcon = tlink_tcon(tlink);
2859 xid = get_xid();
2860
2861 if (backup_cred(cifs_sb))
2862 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2863 else
2864 oparms.create_options = 0;
2865
2866 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
2867 if (!utf16_path) {
2868 rc = -ENOMEM;
2869 free_xid(xid);
2870 return ERR_PTR(rc);
2871 }
2872
2873 oparms.tcon = tcon;
2874 oparms.desired_access = READ_CONTROL;
2875 oparms.disposition = FILE_OPEN;
2876 oparms.fid = &fid;
2877 oparms.reconnect = false;
2878
2879 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
2880 kfree(utf16_path);
2881 if (!rc) {
2882 rc = SMB2_query_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
2883 fid.volatile_fid, (void **)&pntsd, pacllen);
2884 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2885 }
2886
2887 cifs_put_tlink(tlink);
2888 free_xid(xid);
2889
2890 cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
2891 if (rc)
2892 return ERR_PTR(rc);
2893 return pntsd;
2894}
2895
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002896static int
2897set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
2898 struct inode *inode, const char *path, int aclflag)
2899{
2900 u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2901 unsigned int xid;
2902 int rc, access_flags = 0;
2903 struct cifs_tcon *tcon;
2904 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2905 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
2906 struct cifs_fid fid;
2907 struct cifs_open_parms oparms;
2908 __le16 *utf16_path;
2909
2910 cifs_dbg(FYI, "set smb3 acl for path %s\n", path);
2911 if (IS_ERR(tlink))
2912 return PTR_ERR(tlink);
2913
2914 tcon = tlink_tcon(tlink);
2915 xid = get_xid();
2916
2917 if (backup_cred(cifs_sb))
2918 oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
2919 else
2920 oparms.create_options = 0;
2921
2922 if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
2923 access_flags = WRITE_OWNER;
2924 else
2925 access_flags = WRITE_DAC;
2926
2927 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
2928 if (!utf16_path) {
2929 rc = -ENOMEM;
2930 free_xid(xid);
2931 return rc;
2932 }
2933
2934 oparms.tcon = tcon;
2935 oparms.desired_access = access_flags;
2936 oparms.disposition = FILE_OPEN;
2937 oparms.path = path;
2938 oparms.fid = &fid;
2939 oparms.reconnect = false;
2940
2941 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL);
2942 kfree(utf16_path);
2943 if (!rc) {
2944 rc = SMB2_set_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
2945 fid.volatile_fid, pnntsd, acllen, aclflag);
2946 SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2947 }
2948
2949 cifs_put_tlink(tlink);
2950 free_xid(xid);
2951 return rc;
2952}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002953
2954/* Retrieve an ACL from the server */
2955static struct cifs_ntsd *
2956get_smb2_acl(struct cifs_sb_info *cifs_sb,
2957 struct inode *inode, const char *path,
2958 u32 *pacllen)
2959{
2960 struct cifs_ntsd *pntsd = NULL;
2961 struct cifsFileInfo *open_file = NULL;
2962
2963 if (inode)
2964 open_file = find_readable_file(CIFS_I(inode), true);
2965 if (!open_file)
2966 return get_smb2_acl_by_path(cifs_sb, path, pacllen);
2967
2968 pntsd = get_smb2_acl_by_fid(cifs_sb, &open_file->fid, pacllen);
2969 cifsFileInfo_put(open_file);
2970 return pntsd;
2971}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002972
2973static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
2974 loff_t offset, loff_t len, bool keep_size)
2975{
David Brazdil0f672f62019-12-10 10:32:29 +00002976 struct cifs_ses *ses = tcon->ses;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002977 struct inode *inode;
2978 struct cifsInodeInfo *cifsi;
2979 struct cifsFileInfo *cfile = file->private_data;
2980 struct file_zero_data_information fsctl_buf;
2981 long rc;
2982 unsigned int xid;
David Brazdil0f672f62019-12-10 10:32:29 +00002983 __le64 eof;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002984
2985 xid = get_xid();
2986
2987 inode = d_inode(cfile->dentry);
2988 cifsi = CIFS_I(inode);
2989
David Brazdil0f672f62019-12-10 10:32:29 +00002990 trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
2991 ses->Suid, offset, len);
2992
Olivier Deprez0e641232021-09-23 10:07:05 +02002993 /*
2994 * We zero the range through ioctl, so we need remove the page caches
2995 * first, otherwise the data may be inconsistent with the server.
2996 */
2997 truncate_pagecache_range(inode, offset, offset + len - 1);
David Brazdil0f672f62019-12-10 10:32:29 +00002998
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002999 /* if file not oplocked can't be sure whether asking to extend size */
3000 if (!CIFS_CACHE_READ(cifsi))
3001 if (keep_size == false) {
3002 rc = -EOPNOTSUPP;
David Brazdil0f672f62019-12-10 10:32:29 +00003003 trace_smb3_zero_err(xid, cfile->fid.persistent_fid,
3004 tcon->tid, ses->Suid, offset, len, rc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003005 free_xid(xid);
3006 return rc;
3007 }
3008
David Brazdil0f672f62019-12-10 10:32:29 +00003009 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003010
3011 fsctl_buf.FileOffset = cpu_to_le64(offset);
3012 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
3013
3014 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
David Brazdil0f672f62019-12-10 10:32:29 +00003015 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, true,
3016 (char *)&fsctl_buf,
3017 sizeof(struct file_zero_data_information),
3018 0, NULL, NULL);
3019 if (rc)
3020 goto zero_range_exit;
3021
3022 /*
3023 * do we also need to change the size of the file?
3024 */
3025 if (keep_size == false && i_size_read(inode) < offset + len) {
3026 eof = cpu_to_le64(offset + len);
3027 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3028 cfile->fid.volatile_fid, cfile->pid, &eof);
3029 }
3030
3031 zero_range_exit:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003032 free_xid(xid);
David Brazdil0f672f62019-12-10 10:32:29 +00003033 if (rc)
3034 trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid,
3035 ses->Suid, offset, len, rc);
3036 else
3037 trace_smb3_zero_done(xid, cfile->fid.persistent_fid, tcon->tid,
3038 ses->Suid, offset, len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003039 return rc;
3040}
3041
3042static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
3043 loff_t offset, loff_t len)
3044{
3045 struct inode *inode;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003046 struct cifsFileInfo *cfile = file->private_data;
3047 struct file_zero_data_information fsctl_buf;
3048 long rc;
3049 unsigned int xid;
3050 __u8 set_sparse = 1;
3051
3052 xid = get_xid();
3053
3054 inode = d_inode(cfile->dentry);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003055
3056 /* Need to make file sparse, if not already, before freeing range. */
3057 /* Consider adding equivalent for compressed since it could also work */
3058 if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
3059 rc = -EOPNOTSUPP;
3060 free_xid(xid);
3061 return rc;
3062 }
3063
Olivier Deprez0e641232021-09-23 10:07:05 +02003064 /*
3065 * We implement the punch hole through ioctl, so we need remove the page
3066 * caches first, otherwise the data may be inconsistent with the server.
3067 */
3068 truncate_pagecache_range(inode, offset, offset + len - 1);
3069
David Brazdil0f672f62019-12-10 10:32:29 +00003070 cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003071
3072 fsctl_buf.FileOffset = cpu_to_le64(offset);
3073 fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
3074
3075 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3076 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
3077 true /* is_fctl */, (char *)&fsctl_buf,
David Brazdil0f672f62019-12-10 10:32:29 +00003078 sizeof(struct file_zero_data_information),
3079 CIFSMaxBufSize, NULL, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003080 free_xid(xid);
3081 return rc;
3082}
3083
3084static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
3085 loff_t off, loff_t len, bool keep_size)
3086{
3087 struct inode *inode;
3088 struct cifsInodeInfo *cifsi;
3089 struct cifsFileInfo *cfile = file->private_data;
3090 long rc = -EOPNOTSUPP;
3091 unsigned int xid;
David Brazdil0f672f62019-12-10 10:32:29 +00003092 __le64 eof;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003093
3094 xid = get_xid();
3095
3096 inode = d_inode(cfile->dentry);
3097 cifsi = CIFS_I(inode);
3098
David Brazdil0f672f62019-12-10 10:32:29 +00003099 trace_smb3_falloc_enter(xid, cfile->fid.persistent_fid, tcon->tid,
3100 tcon->ses->Suid, off, len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003101 /* if file not oplocked can't be sure whether asking to extend size */
3102 if (!CIFS_CACHE_READ(cifsi))
3103 if (keep_size == false) {
David Brazdil0f672f62019-12-10 10:32:29 +00003104 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
3105 tcon->tid, tcon->ses->Suid, off, len, rc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003106 free_xid(xid);
3107 return rc;
3108 }
3109
3110 /*
3111 * Files are non-sparse by default so falloc may be a no-op
3112 * Must check if file sparse. If not sparse, and not extending
3113 * then no need to do anything since file already allocated
3114 */
3115 if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) {
3116 if (keep_size == true)
3117 rc = 0;
3118 /* check if extending file */
3119 else if (i_size_read(inode) >= off + len)
3120 /* not extending file and already not sparse */
3121 rc = 0;
3122 /* BB: in future add else clause to extend file */
3123 else
3124 rc = -EOPNOTSUPP;
David Brazdil0f672f62019-12-10 10:32:29 +00003125 if (rc)
3126 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
3127 tcon->tid, tcon->ses->Suid, off, len, rc);
3128 else
3129 trace_smb3_falloc_done(xid, cfile->fid.persistent_fid,
3130 tcon->tid, tcon->ses->Suid, off, len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003131 free_xid(xid);
3132 return rc;
3133 }
3134
3135 if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
3136 /*
3137 * Check if falloc starts within first few pages of file
3138 * and ends within a few pages of the end of file to
3139 * ensure that most of file is being forced to be
3140 * fallocated now. If so then setting whole file sparse
3141 * ie potentially making a few extra pages at the beginning
3142 * or end of the file non-sparse via set_sparse is harmless.
3143 */
3144 if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) {
3145 rc = -EOPNOTSUPP;
David Brazdil0f672f62019-12-10 10:32:29 +00003146 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
3147 tcon->tid, tcon->ses->Suid, off, len, rc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003148 free_xid(xid);
3149 return rc;
3150 }
3151
David Brazdil0f672f62019-12-10 10:32:29 +00003152 smb2_set_sparse(xid, tcon, cfile, inode, false);
3153 rc = 0;
3154 } else {
3155 smb2_set_sparse(xid, tcon, cfile, inode, false);
3156 rc = 0;
3157 if (i_size_read(inode) < off + len) {
3158 eof = cpu_to_le64(off + len);
3159 rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3160 cfile->fid.volatile_fid, cfile->pid,
3161 &eof);
3162 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003163 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003164
David Brazdil0f672f62019-12-10 10:32:29 +00003165 if (rc)
3166 trace_smb3_falloc_err(xid, cfile->fid.persistent_fid, tcon->tid,
3167 tcon->ses->Suid, off, len, rc);
3168 else
3169 trace_smb3_falloc_done(xid, cfile->fid.persistent_fid, tcon->tid,
3170 tcon->ses->Suid, off, len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003171
3172 free_xid(xid);
3173 return rc;
3174}
3175
David Brazdil0f672f62019-12-10 10:32:29 +00003176static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offset, int whence)
3177{
3178 struct cifsFileInfo *wrcfile, *cfile = file->private_data;
3179 struct cifsInodeInfo *cifsi;
3180 struct inode *inode;
3181 int rc = 0;
3182 struct file_allocated_range_buffer in_data, *out_data = NULL;
3183 u32 out_data_len;
3184 unsigned int xid;
3185
3186 if (whence != SEEK_HOLE && whence != SEEK_DATA)
3187 return generic_file_llseek(file, offset, whence);
3188
3189 inode = d_inode(cfile->dentry);
3190 cifsi = CIFS_I(inode);
3191
3192 if (offset < 0 || offset >= i_size_read(inode))
3193 return -ENXIO;
3194
3195 xid = get_xid();
3196 /*
3197 * We need to be sure that all dirty pages are written as they
3198 * might fill holes on the server.
3199 * Note that we also MUST flush any written pages since at least
3200 * some servers (Windows2016) will not reflect recent writes in
3201 * QUERY_ALLOCATED_RANGES until SMB2_flush is called.
3202 */
Olivier Deprez0e641232021-09-23 10:07:05 +02003203 wrcfile = find_writable_file(cifsi, FIND_WR_ANY);
David Brazdil0f672f62019-12-10 10:32:29 +00003204 if (wrcfile) {
3205 filemap_write_and_wait(inode->i_mapping);
3206 smb2_flush_file(xid, tcon, &wrcfile->fid);
3207 cifsFileInfo_put(wrcfile);
3208 }
3209
3210 if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) {
3211 if (whence == SEEK_HOLE)
3212 offset = i_size_read(inode);
3213 goto lseek_exit;
3214 }
3215
3216 in_data.file_offset = cpu_to_le64(offset);
3217 in_data.length = cpu_to_le64(i_size_read(inode));
3218
3219 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3220 cfile->fid.volatile_fid,
3221 FSCTL_QUERY_ALLOCATED_RANGES, true,
3222 (char *)&in_data, sizeof(in_data),
3223 sizeof(struct file_allocated_range_buffer),
3224 (char **)&out_data, &out_data_len);
3225 if (rc == -E2BIG)
3226 rc = 0;
3227 if (rc)
3228 goto lseek_exit;
3229
3230 if (whence == SEEK_HOLE && out_data_len == 0)
3231 goto lseek_exit;
3232
3233 if (whence == SEEK_DATA && out_data_len == 0) {
3234 rc = -ENXIO;
3235 goto lseek_exit;
3236 }
3237
3238 if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
3239 rc = -EINVAL;
3240 goto lseek_exit;
3241 }
3242 if (whence == SEEK_DATA) {
3243 offset = le64_to_cpu(out_data->file_offset);
3244 goto lseek_exit;
3245 }
3246 if (offset < le64_to_cpu(out_data->file_offset))
3247 goto lseek_exit;
3248
3249 offset = le64_to_cpu(out_data->file_offset) + le64_to_cpu(out_data->length);
3250
3251 lseek_exit:
3252 free_xid(xid);
3253 kfree(out_data);
3254 if (!rc)
3255 return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3256 else
3257 return rc;
3258}
3259
3260static int smb3_fiemap(struct cifs_tcon *tcon,
3261 struct cifsFileInfo *cfile,
3262 struct fiemap_extent_info *fei, u64 start, u64 len)
3263{
3264 unsigned int xid;
3265 struct file_allocated_range_buffer in_data, *out_data;
3266 u32 out_data_len;
3267 int i, num, rc, flags, last_blob;
3268 u64 next;
3269
3270 if (fiemap_check_flags(fei, FIEMAP_FLAG_SYNC))
3271 return -EBADR;
3272
3273 xid = get_xid();
3274 again:
3275 in_data.file_offset = cpu_to_le64(start);
3276 in_data.length = cpu_to_le64(len);
3277
3278 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3279 cfile->fid.volatile_fid,
3280 FSCTL_QUERY_ALLOCATED_RANGES, true,
3281 (char *)&in_data, sizeof(in_data),
3282 1024 * sizeof(struct file_allocated_range_buffer),
3283 (char **)&out_data, &out_data_len);
3284 if (rc == -E2BIG) {
3285 last_blob = 0;
3286 rc = 0;
3287 } else
3288 last_blob = 1;
3289 if (rc)
3290 goto out;
3291
Olivier Deprez0e641232021-09-23 10:07:05 +02003292 if (out_data_len && out_data_len < sizeof(struct file_allocated_range_buffer)) {
David Brazdil0f672f62019-12-10 10:32:29 +00003293 rc = -EINVAL;
3294 goto out;
3295 }
3296 if (out_data_len % sizeof(struct file_allocated_range_buffer)) {
3297 rc = -EINVAL;
3298 goto out;
3299 }
3300
3301 num = out_data_len / sizeof(struct file_allocated_range_buffer);
3302 for (i = 0; i < num; i++) {
3303 flags = 0;
3304 if (i == num - 1 && last_blob)
3305 flags |= FIEMAP_EXTENT_LAST;
3306
3307 rc = fiemap_fill_next_extent(fei,
3308 le64_to_cpu(out_data[i].file_offset),
3309 le64_to_cpu(out_data[i].file_offset),
3310 le64_to_cpu(out_data[i].length),
3311 flags);
3312 if (rc < 0)
3313 goto out;
3314 if (rc == 1) {
3315 rc = 0;
3316 goto out;
3317 }
3318 }
3319
3320 if (!last_blob) {
3321 next = le64_to_cpu(out_data[num - 1].file_offset) +
3322 le64_to_cpu(out_data[num - 1].length);
3323 len = len - (next - start);
3324 start = next;
3325 goto again;
3326 }
3327
3328 out:
3329 free_xid(xid);
3330 kfree(out_data);
3331 return rc;
3332}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003333
3334static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
3335 loff_t off, loff_t len)
3336{
3337 /* KEEP_SIZE already checked for by do_fallocate */
3338 if (mode & FALLOC_FL_PUNCH_HOLE)
3339 return smb3_punch_hole(file, tcon, off, len);
3340 else if (mode & FALLOC_FL_ZERO_RANGE) {
3341 if (mode & FALLOC_FL_KEEP_SIZE)
3342 return smb3_zero_range(file, tcon, off, len, true);
3343 return smb3_zero_range(file, tcon, off, len, false);
3344 } else if (mode == FALLOC_FL_KEEP_SIZE)
3345 return smb3_simple_falloc(file, tcon, off, len, true);
3346 else if (mode == 0)
3347 return smb3_simple_falloc(file, tcon, off, len, false);
3348
3349 return -EOPNOTSUPP;
3350}
3351
3352static void
3353smb2_downgrade_oplock(struct TCP_Server_Info *server,
Olivier Deprez0e641232021-09-23 10:07:05 +02003354 struct cifsInodeInfo *cinode, __u32 oplock,
3355 unsigned int epoch, bool *purge_cache)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003356{
Olivier Deprez0e641232021-09-23 10:07:05 +02003357 server->ops->set_oplock_level(cinode, oplock, 0, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003358}
3359
3360static void
Olivier Deprez0e641232021-09-23 10:07:05 +02003361smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3362 unsigned int epoch, bool *purge_cache);
3363
3364static void
3365smb3_downgrade_oplock(struct TCP_Server_Info *server,
3366 struct cifsInodeInfo *cinode, __u32 oplock,
3367 unsigned int epoch, bool *purge_cache)
David Brazdil0f672f62019-12-10 10:32:29 +00003368{
Olivier Deprez0e641232021-09-23 10:07:05 +02003369 unsigned int old_state = cinode->oplock;
3370 unsigned int old_epoch = cinode->epoch;
3371 unsigned int new_state;
3372
3373 if (epoch > old_epoch) {
3374 smb21_set_oplock_level(cinode, oplock, 0, NULL);
3375 cinode->epoch = epoch;
3376 }
3377
3378 new_state = cinode->oplock;
3379 *purge_cache = false;
3380
3381 if ((old_state & CIFS_CACHE_READ_FLG) != 0 &&
3382 (new_state & CIFS_CACHE_READ_FLG) == 0)
3383 *purge_cache = true;
3384 else if (old_state == new_state && (epoch - old_epoch > 1))
3385 *purge_cache = true;
David Brazdil0f672f62019-12-10 10:32:29 +00003386}
3387
3388static void
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003389smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3390 unsigned int epoch, bool *purge_cache)
3391{
3392 oplock &= 0xFF;
3393 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
3394 return;
3395 if (oplock == SMB2_OPLOCK_LEVEL_BATCH) {
3396 cinode->oplock = CIFS_CACHE_RHW_FLG;
3397 cifs_dbg(FYI, "Batch Oplock granted on inode %p\n",
3398 &cinode->vfs_inode);
3399 } else if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
3400 cinode->oplock = CIFS_CACHE_RW_FLG;
3401 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
3402 &cinode->vfs_inode);
3403 } else if (oplock == SMB2_OPLOCK_LEVEL_II) {
3404 cinode->oplock = CIFS_CACHE_READ_FLG;
3405 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
3406 &cinode->vfs_inode);
3407 } else
3408 cinode->oplock = 0;
3409}
3410
3411static void
3412smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3413 unsigned int epoch, bool *purge_cache)
3414{
3415 char message[5] = {0};
David Brazdil0f672f62019-12-10 10:32:29 +00003416 unsigned int new_oplock = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003417
3418 oplock &= 0xFF;
3419 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
3420 return;
3421
David Brazdil0f672f62019-12-10 10:32:29 +00003422 /* Check if the server granted an oplock rather than a lease */
3423 if (oplock & SMB2_OPLOCK_LEVEL_EXCLUSIVE)
3424 return smb2_set_oplock_level(cinode, oplock, epoch,
3425 purge_cache);
3426
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003427 if (oplock & SMB2_LEASE_READ_CACHING_HE) {
David Brazdil0f672f62019-12-10 10:32:29 +00003428 new_oplock |= CIFS_CACHE_READ_FLG;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003429 strcat(message, "R");
3430 }
3431 if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) {
David Brazdil0f672f62019-12-10 10:32:29 +00003432 new_oplock |= CIFS_CACHE_HANDLE_FLG;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003433 strcat(message, "H");
3434 }
3435 if (oplock & SMB2_LEASE_WRITE_CACHING_HE) {
David Brazdil0f672f62019-12-10 10:32:29 +00003436 new_oplock |= CIFS_CACHE_WRITE_FLG;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003437 strcat(message, "W");
3438 }
David Brazdil0f672f62019-12-10 10:32:29 +00003439 if (!new_oplock)
3440 strncpy(message, "None", sizeof(message));
3441
3442 cinode->oplock = new_oplock;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003443 cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
3444 &cinode->vfs_inode);
3445}
3446
3447static void
3448smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3449 unsigned int epoch, bool *purge_cache)
3450{
3451 unsigned int old_oplock = cinode->oplock;
3452
3453 smb21_set_oplock_level(cinode, oplock, epoch, purge_cache);
3454
3455 if (purge_cache) {
3456 *purge_cache = false;
3457 if (old_oplock == CIFS_CACHE_READ_FLG) {
3458 if (cinode->oplock == CIFS_CACHE_READ_FLG &&
3459 (epoch - cinode->epoch > 0))
3460 *purge_cache = true;
3461 else if (cinode->oplock == CIFS_CACHE_RH_FLG &&
3462 (epoch - cinode->epoch > 1))
3463 *purge_cache = true;
3464 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
3465 (epoch - cinode->epoch > 1))
3466 *purge_cache = true;
3467 else if (cinode->oplock == 0 &&
3468 (epoch - cinode->epoch > 0))
3469 *purge_cache = true;
3470 } else if (old_oplock == CIFS_CACHE_RH_FLG) {
3471 if (cinode->oplock == CIFS_CACHE_RH_FLG &&
3472 (epoch - cinode->epoch > 0))
3473 *purge_cache = true;
3474 else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
3475 (epoch - cinode->epoch > 1))
3476 *purge_cache = true;
3477 }
3478 cinode->epoch = epoch;
3479 }
3480}
3481
3482static bool
3483smb2_is_read_op(__u32 oplock)
3484{
3485 return oplock == SMB2_OPLOCK_LEVEL_II;
3486}
3487
3488static bool
3489smb21_is_read_op(__u32 oplock)
3490{
3491 return (oplock & SMB2_LEASE_READ_CACHING_HE) &&
3492 !(oplock & SMB2_LEASE_WRITE_CACHING_HE);
3493}
3494
3495static __le32
3496map_oplock_to_lease(u8 oplock)
3497{
3498 if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
3499 return SMB2_LEASE_WRITE_CACHING | SMB2_LEASE_READ_CACHING;
3500 else if (oplock == SMB2_OPLOCK_LEVEL_II)
3501 return SMB2_LEASE_READ_CACHING;
3502 else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
3503 return SMB2_LEASE_HANDLE_CACHING | SMB2_LEASE_READ_CACHING |
3504 SMB2_LEASE_WRITE_CACHING;
3505 return 0;
3506}
3507
3508static char *
3509smb2_create_lease_buf(u8 *lease_key, u8 oplock)
3510{
3511 struct create_lease *buf;
3512
3513 buf = kzalloc(sizeof(struct create_lease), GFP_KERNEL);
3514 if (!buf)
3515 return NULL;
3516
3517 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
3518 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
3519
3520 buf->ccontext.DataOffset = cpu_to_le16(offsetof
3521 (struct create_lease, lcontext));
3522 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context));
3523 buf->ccontext.NameOffset = cpu_to_le16(offsetof
3524 (struct create_lease, Name));
3525 buf->ccontext.NameLength = cpu_to_le16(4);
3526 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
3527 buf->Name[0] = 'R';
3528 buf->Name[1] = 'q';
3529 buf->Name[2] = 'L';
3530 buf->Name[3] = 's';
3531 return (char *)buf;
3532}
3533
3534static char *
3535smb3_create_lease_buf(u8 *lease_key, u8 oplock)
3536{
3537 struct create_lease_v2 *buf;
3538
3539 buf = kzalloc(sizeof(struct create_lease_v2), GFP_KERNEL);
3540 if (!buf)
3541 return NULL;
3542
3543 memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
3544 buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
3545
3546 buf->ccontext.DataOffset = cpu_to_le16(offsetof
3547 (struct create_lease_v2, lcontext));
3548 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2));
3549 buf->ccontext.NameOffset = cpu_to_le16(offsetof
3550 (struct create_lease_v2, Name));
3551 buf->ccontext.NameLength = cpu_to_le16(4);
3552 /* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
3553 buf->Name[0] = 'R';
3554 buf->Name[1] = 'q';
3555 buf->Name[2] = 'L';
3556 buf->Name[3] = 's';
3557 return (char *)buf;
3558}
3559
3560static __u8
3561smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
3562{
3563 struct create_lease *lc = (struct create_lease *)buf;
3564
3565 *epoch = 0; /* not used */
3566 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
3567 return SMB2_OPLOCK_LEVEL_NOCHANGE;
3568 return le32_to_cpu(lc->lcontext.LeaseState);
3569}
3570
3571static __u8
3572smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
3573{
3574 struct create_lease_v2 *lc = (struct create_lease_v2 *)buf;
3575
3576 *epoch = le16_to_cpu(lc->lcontext.Epoch);
3577 if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
3578 return SMB2_OPLOCK_LEVEL_NOCHANGE;
3579 if (lease_key)
3580 memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
3581 return le32_to_cpu(lc->lcontext.LeaseState);
3582}
3583
3584static unsigned int
3585smb2_wp_retry_size(struct inode *inode)
3586{
3587 return min_t(unsigned int, CIFS_SB(inode->i_sb)->wsize,
3588 SMB2_MAX_BUFFER_SIZE);
3589}
3590
3591static bool
3592smb2_dir_needs_close(struct cifsFileInfo *cfile)
3593{
3594 return !cfile->invalidHandle;
3595}
3596
3597static void
3598fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
David Brazdil0f672f62019-12-10 10:32:29 +00003599 struct smb_rqst *old_rq, __le16 cipher_type)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003600{
3601 struct smb2_sync_hdr *shdr =
3602 (struct smb2_sync_hdr *)old_rq->rq_iov[0].iov_base;
3603
3604 memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr));
3605 tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
3606 tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len);
3607 tr_hdr->Flags = cpu_to_le16(0x01);
David Brazdil0f672f62019-12-10 10:32:29 +00003608 if (cipher_type == SMB2_ENCRYPTION_AES128_GCM)
3609 get_random_bytes(&tr_hdr->Nonce, SMB3_AES128GCM_NONCE);
3610 else
3611 get_random_bytes(&tr_hdr->Nonce, SMB3_AES128CCM_NONCE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003612 memcpy(&tr_hdr->SessionId, &shdr->SessionId, 8);
3613}
3614
3615/* We can not use the normal sg_set_buf() as we will sometimes pass a
3616 * stack object as buf.
3617 */
3618static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
3619 unsigned int buflen)
3620{
David Brazdil0f672f62019-12-10 10:32:29 +00003621 void *addr;
3622 /*
3623 * VMAP_STACK (at least) puts stack into the vmalloc address space
3624 */
3625 if (is_vmalloc_addr(buf))
3626 addr = vmalloc_to_page(buf);
3627 else
3628 addr = virt_to_page(buf);
3629 sg_set_page(sg, addr, buflen, offset_in_page(buf));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003630}
3631
3632/* Assumes the first rqst has a transform header as the first iov.
3633 * I.e.
3634 * rqst[0].rq_iov[0] is transform header
3635 * rqst[0].rq_iov[1+] data to be encrypted/decrypted
3636 * rqst[1+].rq_iov[0+] data to be encrypted/decrypted
3637 */
3638static struct scatterlist *
3639init_sg(int num_rqst, struct smb_rqst *rqst, u8 *sign)
3640{
3641 unsigned int sg_len;
3642 struct scatterlist *sg;
3643 unsigned int i;
3644 unsigned int j;
3645 unsigned int idx = 0;
3646 int skip;
3647
3648 sg_len = 1;
3649 for (i = 0; i < num_rqst; i++)
3650 sg_len += rqst[i].rq_nvec + rqst[i].rq_npages;
3651
3652 sg = kmalloc_array(sg_len, sizeof(struct scatterlist), GFP_KERNEL);
3653 if (!sg)
3654 return NULL;
3655
3656 sg_init_table(sg, sg_len);
3657 for (i = 0; i < num_rqst; i++) {
3658 for (j = 0; j < rqst[i].rq_nvec; j++) {
3659 /*
3660 * The first rqst has a transform header where the
3661 * first 20 bytes are not part of the encrypted blob
3662 */
3663 skip = (i == 0) && (j == 0) ? 20 : 0;
3664 smb2_sg_set_buf(&sg[idx++],
3665 rqst[i].rq_iov[j].iov_base + skip,
3666 rqst[i].rq_iov[j].iov_len - skip);
David Brazdil0f672f62019-12-10 10:32:29 +00003667 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003668
3669 for (j = 0; j < rqst[i].rq_npages; j++) {
3670 unsigned int len, offset;
3671
3672 rqst_page_get_length(&rqst[i], j, &len, &offset);
3673 sg_set_page(&sg[idx++], rqst[i].rq_pages[j], len, offset);
3674 }
3675 }
3676 smb2_sg_set_buf(&sg[idx], sign, SMB2_SIGNATURE_SIZE);
3677 return sg;
3678}
3679
3680static int
3681smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
3682{
3683 struct cifs_ses *ses;
3684 u8 *ses_enc_key;
3685
3686 spin_lock(&cifs_tcp_ses_lock);
3687 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
3688 if (ses->Suid != ses_id)
3689 continue;
3690 ses_enc_key = enc ? ses->smb3encryptionkey :
3691 ses->smb3decryptionkey;
3692 memcpy(key, ses_enc_key, SMB3_SIGN_KEY_SIZE);
3693 spin_unlock(&cifs_tcp_ses_lock);
3694 return 0;
3695 }
3696 spin_unlock(&cifs_tcp_ses_lock);
3697
Olivier Deprez0e641232021-09-23 10:07:05 +02003698 return -EAGAIN;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003699}
3700/*
3701 * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
3702 * iov[0] - transform header (associate data),
3703 * iov[1-N] - SMB2 header and pages - data to encrypt.
3704 * On success return encrypted data in iov[1-N] and pages, leave iov[0]
3705 * untouched.
3706 */
3707static int
3708crypt_message(struct TCP_Server_Info *server, int num_rqst,
3709 struct smb_rqst *rqst, int enc)
3710{
3711 struct smb2_transform_hdr *tr_hdr =
3712 (struct smb2_transform_hdr *)rqst[0].rq_iov[0].iov_base;
3713 unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
3714 int rc = 0;
3715 struct scatterlist *sg;
3716 u8 sign[SMB2_SIGNATURE_SIZE] = {};
3717 u8 key[SMB3_SIGN_KEY_SIZE];
3718 struct aead_request *req;
3719 char *iv;
3720 unsigned int iv_len;
3721 DECLARE_CRYPTO_WAIT(wait);
3722 struct crypto_aead *tfm;
3723 unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
3724
3725 rc = smb2_get_enc_key(server, tr_hdr->SessionId, enc, key);
3726 if (rc) {
David Brazdil0f672f62019-12-10 10:32:29 +00003727 cifs_server_dbg(VFS, "%s: Could not get %scryption key\n", __func__,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003728 enc ? "en" : "de");
Olivier Deprez0e641232021-09-23 10:07:05 +02003729 return rc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003730 }
3731
3732 rc = smb3_crypto_aead_allocate(server);
3733 if (rc) {
David Brazdil0f672f62019-12-10 10:32:29 +00003734 cifs_server_dbg(VFS, "%s: crypto alloc failed\n", __func__);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003735 return rc;
3736 }
3737
3738 tfm = enc ? server->secmech.ccmaesencrypt :
3739 server->secmech.ccmaesdecrypt;
3740 rc = crypto_aead_setkey(tfm, key, SMB3_SIGN_KEY_SIZE);
3741 if (rc) {
David Brazdil0f672f62019-12-10 10:32:29 +00003742 cifs_server_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003743 return rc;
3744 }
3745
3746 rc = crypto_aead_setauthsize(tfm, SMB2_SIGNATURE_SIZE);
3747 if (rc) {
David Brazdil0f672f62019-12-10 10:32:29 +00003748 cifs_server_dbg(VFS, "%s: Failed to set authsize %d\n", __func__, rc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003749 return rc;
3750 }
3751
3752 req = aead_request_alloc(tfm, GFP_KERNEL);
3753 if (!req) {
David Brazdil0f672f62019-12-10 10:32:29 +00003754 cifs_server_dbg(VFS, "%s: Failed to alloc aead request\n", __func__);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003755 return -ENOMEM;
3756 }
3757
3758 if (!enc) {
3759 memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE);
3760 crypt_len += SMB2_SIGNATURE_SIZE;
3761 }
3762
3763 sg = init_sg(num_rqst, rqst, sign);
3764 if (!sg) {
David Brazdil0f672f62019-12-10 10:32:29 +00003765 cifs_server_dbg(VFS, "%s: Failed to init sg\n", __func__);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003766 rc = -ENOMEM;
3767 goto free_req;
3768 }
3769
3770 iv_len = crypto_aead_ivsize(tfm);
3771 iv = kzalloc(iv_len, GFP_KERNEL);
3772 if (!iv) {
David Brazdil0f672f62019-12-10 10:32:29 +00003773 cifs_server_dbg(VFS, "%s: Failed to alloc iv\n", __func__);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003774 rc = -ENOMEM;
3775 goto free_sg;
3776 }
David Brazdil0f672f62019-12-10 10:32:29 +00003777
3778 if (server->cipher_type == SMB2_ENCRYPTION_AES128_GCM)
3779 memcpy(iv, (char *)tr_hdr->Nonce, SMB3_AES128GCM_NONCE);
3780 else {
3781 iv[0] = 3;
3782 memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES128CCM_NONCE);
3783 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003784
3785 aead_request_set_crypt(req, sg, sg, crypt_len, iv);
3786 aead_request_set_ad(req, assoc_data_len);
3787
3788 aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
3789 crypto_req_done, &wait);
3790
3791 rc = crypto_wait_req(enc ? crypto_aead_encrypt(req)
3792 : crypto_aead_decrypt(req), &wait);
3793
3794 if (!rc && enc)
3795 memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
3796
3797 kfree(iv);
3798free_sg:
3799 kfree(sg);
3800free_req:
3801 kfree(req);
3802 return rc;
3803}
3804
3805void
3806smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst)
3807{
3808 int i, j;
3809
3810 for (i = 0; i < num_rqst; i++) {
3811 if (rqst[i].rq_pages) {
3812 for (j = rqst[i].rq_npages - 1; j >= 0; j--)
3813 put_page(rqst[i].rq_pages[j]);
3814 kfree(rqst[i].rq_pages);
3815 }
3816 }
3817}
3818
3819/*
3820 * This function will initialize new_rq and encrypt the content.
3821 * The first entry, new_rq[0], only contains a single iov which contains
3822 * a smb2_transform_hdr and is pre-allocated by the caller.
3823 * This function then populates new_rq[1+] with the content from olq_rq[0+].
3824 *
3825 * The end result is an array of smb_rqst structures where the first structure
3826 * only contains a single iov for the transform header which we then can pass
3827 * to crypt_message().
3828 *
3829 * new_rq[0].rq_iov[0] : smb2_transform_hdr pre-allocated by the caller
3830 * new_rq[1+].rq_iov[*] == old_rq[0+].rq_iov[*] : SMB2/3 requests
3831 */
3832static int
3833smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
3834 struct smb_rqst *new_rq, struct smb_rqst *old_rq)
3835{
3836 struct page **pages;
3837 struct smb2_transform_hdr *tr_hdr = new_rq[0].rq_iov[0].iov_base;
3838 unsigned int npages;
3839 unsigned int orig_len = 0;
3840 int i, j;
3841 int rc = -ENOMEM;
3842
3843 for (i = 1; i < num_rqst; i++) {
3844 npages = old_rq[i - 1].rq_npages;
3845 pages = kmalloc_array(npages, sizeof(struct page *),
3846 GFP_KERNEL);
3847 if (!pages)
3848 goto err_free;
3849
3850 new_rq[i].rq_pages = pages;
3851 new_rq[i].rq_npages = npages;
3852 new_rq[i].rq_offset = old_rq[i - 1].rq_offset;
3853 new_rq[i].rq_pagesz = old_rq[i - 1].rq_pagesz;
3854 new_rq[i].rq_tailsz = old_rq[i - 1].rq_tailsz;
3855 new_rq[i].rq_iov = old_rq[i - 1].rq_iov;
3856 new_rq[i].rq_nvec = old_rq[i - 1].rq_nvec;
3857
3858 orig_len += smb_rqst_len(server, &old_rq[i - 1]);
3859
3860 for (j = 0; j < npages; j++) {
3861 pages[j] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3862 if (!pages[j])
3863 goto err_free;
3864 }
3865
3866 /* copy pages form the old */
3867 for (j = 0; j < npages; j++) {
3868 char *dst, *src;
3869 unsigned int offset, len;
3870
3871 rqst_page_get_length(&new_rq[i], j, &len, &offset);
3872
3873 dst = (char *) kmap(new_rq[i].rq_pages[j]) + offset;
3874 src = (char *) kmap(old_rq[i - 1].rq_pages[j]) + offset;
3875
3876 memcpy(dst, src, len);
3877 kunmap(new_rq[i].rq_pages[j]);
3878 kunmap(old_rq[i - 1].rq_pages[j]);
3879 }
3880 }
3881
3882 /* fill the 1st iov with a transform header */
David Brazdil0f672f62019-12-10 10:32:29 +00003883 fill_transform_hdr(tr_hdr, orig_len, old_rq, server->cipher_type);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003884
3885 rc = crypt_message(server, num_rqst, new_rq, 1);
David Brazdil0f672f62019-12-10 10:32:29 +00003886 cifs_dbg(FYI, "Encrypt message returned %d\n", rc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003887 if (rc)
3888 goto err_free;
3889
3890 return rc;
3891
3892err_free:
3893 smb3_free_compound_rqst(num_rqst - 1, &new_rq[1]);
3894 return rc;
3895}
3896
3897static int
3898smb3_is_transform_hdr(void *buf)
3899{
3900 struct smb2_transform_hdr *trhdr = buf;
3901
3902 return trhdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM;
3903}
3904
3905static int
3906decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
3907 unsigned int buf_data_size, struct page **pages,
Olivier Deprez0e641232021-09-23 10:07:05 +02003908 unsigned int npages, unsigned int page_data_size,
3909 bool is_offloaded)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003910{
3911 struct kvec iov[2];
3912 struct smb_rqst rqst = {NULL};
3913 int rc;
3914
3915 iov[0].iov_base = buf;
3916 iov[0].iov_len = sizeof(struct smb2_transform_hdr);
3917 iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
3918 iov[1].iov_len = buf_data_size;
3919
3920 rqst.rq_iov = iov;
3921 rqst.rq_nvec = 2;
3922 rqst.rq_pages = pages;
3923 rqst.rq_npages = npages;
3924 rqst.rq_pagesz = PAGE_SIZE;
3925 rqst.rq_tailsz = (page_data_size % PAGE_SIZE) ? : PAGE_SIZE;
3926
3927 rc = crypt_message(server, 1, &rqst, 0);
David Brazdil0f672f62019-12-10 10:32:29 +00003928 cifs_dbg(FYI, "Decrypt message returned %d\n", rc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003929
3930 if (rc)
3931 return rc;
3932
3933 memmove(buf, iov[1].iov_base, buf_data_size);
3934
Olivier Deprez0e641232021-09-23 10:07:05 +02003935 if (!is_offloaded)
3936 server->total_read = buf_data_size + page_data_size;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003937
3938 return rc;
3939}
3940
3941static int
3942read_data_into_pages(struct TCP_Server_Info *server, struct page **pages,
3943 unsigned int npages, unsigned int len)
3944{
3945 int i;
3946 int length;
3947
3948 for (i = 0; i < npages; i++) {
3949 struct page *page = pages[i];
3950 size_t n;
3951
3952 n = len;
3953 if (len >= PAGE_SIZE) {
3954 /* enough data to fill the page */
3955 n = PAGE_SIZE;
3956 len -= n;
3957 } else {
3958 zero_user(page, len, PAGE_SIZE - len);
3959 len = 0;
3960 }
3961 length = cifs_read_page_from_socket(server, page, 0, n);
3962 if (length < 0)
3963 return length;
3964 server->total_read += length;
3965 }
3966
3967 return 0;
3968}
3969
3970static int
3971init_read_bvec(struct page **pages, unsigned int npages, unsigned int data_size,
3972 unsigned int cur_off, struct bio_vec **page_vec)
3973{
3974 struct bio_vec *bvec;
3975 int i;
3976
3977 bvec = kcalloc(npages, sizeof(struct bio_vec), GFP_KERNEL);
3978 if (!bvec)
3979 return -ENOMEM;
3980
3981 for (i = 0; i < npages; i++) {
3982 bvec[i].bv_page = pages[i];
3983 bvec[i].bv_offset = (i == 0) ? cur_off : 0;
3984 bvec[i].bv_len = min_t(unsigned int, PAGE_SIZE, data_size);
3985 data_size -= bvec[i].bv_len;
3986 }
3987
3988 if (data_size != 0) {
3989 cifs_dbg(VFS, "%s: something went wrong\n", __func__);
3990 kfree(bvec);
3991 return -EIO;
3992 }
3993
3994 *page_vec = bvec;
3995 return 0;
3996}
3997
3998static int
3999handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
4000 char *buf, unsigned int buf_len, struct page **pages,
Olivier Deprez0e641232021-09-23 10:07:05 +02004001 unsigned int npages, unsigned int page_data_size,
4002 bool is_offloaded)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004003{
4004 unsigned int data_offset;
4005 unsigned int data_len;
4006 unsigned int cur_off;
4007 unsigned int cur_page_idx;
4008 unsigned int pad_len;
4009 struct cifs_readdata *rdata = mid->callback_data;
4010 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
4011 struct bio_vec *bvec = NULL;
4012 struct iov_iter iter;
4013 struct kvec iov;
4014 int length;
4015 bool use_rdma_mr = false;
4016
4017 if (shdr->Command != SMB2_READ) {
David Brazdil0f672f62019-12-10 10:32:29 +00004018 cifs_server_dbg(VFS, "only big read responses are supported\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004019 return -ENOTSUPP;
4020 }
4021
4022 if (server->ops->is_session_expired &&
4023 server->ops->is_session_expired(buf)) {
Olivier Deprez0e641232021-09-23 10:07:05 +02004024 if (!is_offloaded)
4025 cifs_reconnect(server);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004026 wake_up(&server->response_q);
4027 return -1;
4028 }
4029
4030 if (server->ops->is_status_pending &&
David Brazdil0f672f62019-12-10 10:32:29 +00004031 server->ops->is_status_pending(buf, server))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004032 return -1;
4033
David Brazdil0f672f62019-12-10 10:32:29 +00004034 /* set up first two iov to get credits */
4035 rdata->iov[0].iov_base = buf;
4036 rdata->iov[0].iov_len = 0;
4037 rdata->iov[1].iov_base = buf;
4038 rdata->iov[1].iov_len =
4039 min_t(unsigned int, buf_len, server->vals->read_rsp_size);
4040 cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
4041 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
4042 cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
4043 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
4044
4045 rdata->result = server->ops->map_error(buf, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004046 if (rdata->result != 0) {
4047 cifs_dbg(FYI, "%s: server returned error %d\n",
4048 __func__, rdata->result);
David Brazdil0f672f62019-12-10 10:32:29 +00004049 /* normal error on read response */
Olivier Deprez0e641232021-09-23 10:07:05 +02004050 if (is_offloaded)
4051 mid->mid_state = MID_RESPONSE_RECEIVED;
4052 else
4053 dequeue_mid(mid, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004054 return 0;
4055 }
4056
4057 data_offset = server->ops->read_data_offset(buf);
4058#ifdef CONFIG_CIFS_SMB_DIRECT
4059 use_rdma_mr = rdata->mr;
4060#endif
4061 data_len = server->ops->read_data_length(buf, use_rdma_mr);
4062
4063 if (data_offset < server->vals->read_rsp_size) {
4064 /*
4065 * win2k8 sometimes sends an offset of 0 when the read
4066 * is beyond the EOF. Treat it as if the data starts just after
4067 * the header.
4068 */
4069 cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
4070 __func__, data_offset);
4071 data_offset = server->vals->read_rsp_size;
4072 } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
4073 /* data_offset is beyond the end of smallbuf */
4074 cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
4075 __func__, data_offset);
4076 rdata->result = -EIO;
Olivier Deprez0e641232021-09-23 10:07:05 +02004077 if (is_offloaded)
4078 mid->mid_state = MID_RESPONSE_MALFORMED;
4079 else
4080 dequeue_mid(mid, rdata->result);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004081 return 0;
4082 }
4083
4084 pad_len = data_offset - server->vals->read_rsp_size;
4085
4086 if (buf_len <= data_offset) {
4087 /* read response payload is in pages */
4088 cur_page_idx = pad_len / PAGE_SIZE;
4089 cur_off = pad_len % PAGE_SIZE;
4090
4091 if (cur_page_idx != 0) {
4092 /* data offset is beyond the 1st page of response */
4093 cifs_dbg(FYI, "%s: data offset (%u) beyond 1st page of response\n",
4094 __func__, data_offset);
4095 rdata->result = -EIO;
Olivier Deprez0e641232021-09-23 10:07:05 +02004096 if (is_offloaded)
4097 mid->mid_state = MID_RESPONSE_MALFORMED;
4098 else
4099 dequeue_mid(mid, rdata->result);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004100 return 0;
4101 }
4102
4103 if (data_len > page_data_size - pad_len) {
4104 /* data_len is corrupt -- discard frame */
4105 rdata->result = -EIO;
Olivier Deprez0e641232021-09-23 10:07:05 +02004106 if (is_offloaded)
4107 mid->mid_state = MID_RESPONSE_MALFORMED;
4108 else
4109 dequeue_mid(mid, rdata->result);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004110 return 0;
4111 }
4112
4113 rdata->result = init_read_bvec(pages, npages, page_data_size,
4114 cur_off, &bvec);
4115 if (rdata->result != 0) {
Olivier Deprez0e641232021-09-23 10:07:05 +02004116 if (is_offloaded)
4117 mid->mid_state = MID_RESPONSE_MALFORMED;
4118 else
4119 dequeue_mid(mid, rdata->result);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004120 return 0;
4121 }
4122
David Brazdil0f672f62019-12-10 10:32:29 +00004123 iov_iter_bvec(&iter, WRITE, bvec, npages, data_len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004124 } else if (buf_len >= data_offset + data_len) {
4125 /* read response payload is in buf */
4126 WARN_ONCE(npages > 0, "read data can be either in buf or in pages");
4127 iov.iov_base = buf + data_offset;
4128 iov.iov_len = data_len;
David Brazdil0f672f62019-12-10 10:32:29 +00004129 iov_iter_kvec(&iter, WRITE, &iov, 1, data_len);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004130 } else {
4131 /* read response payload cannot be in both buf and pages */
4132 WARN_ONCE(1, "buf can not contain only a part of read data");
4133 rdata->result = -EIO;
Olivier Deprez0e641232021-09-23 10:07:05 +02004134 if (is_offloaded)
4135 mid->mid_state = MID_RESPONSE_MALFORMED;
4136 else
4137 dequeue_mid(mid, rdata->result);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004138 return 0;
4139 }
4140
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004141 length = rdata->copy_into_pages(server, rdata, &iter);
4142
4143 kfree(bvec);
4144
4145 if (length < 0)
4146 return length;
4147
Olivier Deprez0e641232021-09-23 10:07:05 +02004148 if (is_offloaded)
4149 mid->mid_state = MID_RESPONSE_RECEIVED;
4150 else
4151 dequeue_mid(mid, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004152 return length;
4153}
4154
David Brazdil0f672f62019-12-10 10:32:29 +00004155struct smb2_decrypt_work {
4156 struct work_struct decrypt;
4157 struct TCP_Server_Info *server;
4158 struct page **ppages;
4159 char *buf;
4160 unsigned int npages;
4161 unsigned int len;
4162};
4163
4164
4165static void smb2_decrypt_offload(struct work_struct *work)
4166{
4167 struct smb2_decrypt_work *dw = container_of(work,
4168 struct smb2_decrypt_work, decrypt);
4169 int i, rc;
4170 struct mid_q_entry *mid;
4171
4172 rc = decrypt_raw_data(dw->server, dw->buf, dw->server->vals->read_rsp_size,
Olivier Deprez0e641232021-09-23 10:07:05 +02004173 dw->ppages, dw->npages, dw->len, true);
David Brazdil0f672f62019-12-10 10:32:29 +00004174 if (rc) {
4175 cifs_dbg(VFS, "error decrypting rc=%d\n", rc);
4176 goto free_pages;
4177 }
4178
4179 dw->server->lstrp = jiffies;
Olivier Deprez0e641232021-09-23 10:07:05 +02004180 mid = smb2_find_dequeue_mid(dw->server, dw->buf);
David Brazdil0f672f62019-12-10 10:32:29 +00004181 if (mid == NULL)
4182 cifs_dbg(FYI, "mid not found\n");
4183 else {
4184 mid->decrypted = true;
4185 rc = handle_read_data(dw->server, mid, dw->buf,
4186 dw->server->vals->read_rsp_size,
Olivier Deprez0e641232021-09-23 10:07:05 +02004187 dw->ppages, dw->npages, dw->len,
4188 true);
4189 if (rc >= 0) {
4190#ifdef CONFIG_CIFS_STATS2
4191 mid->when_received = jiffies;
4192#endif
4193 mid->callback(mid);
4194 } else {
4195 spin_lock(&GlobalMid_Lock);
4196 if (dw->server->tcpStatus == CifsNeedReconnect) {
4197 mid->mid_state = MID_RETRY_NEEDED;
4198 spin_unlock(&GlobalMid_Lock);
4199 mid->callback(mid);
4200 } else {
4201 mid->mid_state = MID_REQUEST_SUBMITTED;
4202 mid->mid_flags &= ~(MID_DELETED);
4203 list_add_tail(&mid->qhead,
4204 &dw->server->pending_mid_q);
4205 spin_unlock(&GlobalMid_Lock);
4206 }
4207 }
David Brazdil0f672f62019-12-10 10:32:29 +00004208 cifs_mid_q_entry_release(mid);
4209 }
4210
4211free_pages:
4212 for (i = dw->npages-1; i >= 0; i--)
4213 put_page(dw->ppages[i]);
4214
4215 kfree(dw->ppages);
4216 cifs_small_buf_release(dw->buf);
4217 kfree(dw);
4218}
4219
4220
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004221static int
David Brazdil0f672f62019-12-10 10:32:29 +00004222receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
4223 int *num_mids)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004224{
4225 char *buf = server->smallbuf;
4226 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
4227 unsigned int npages;
4228 struct page **pages;
4229 unsigned int len;
4230 unsigned int buflen = server->pdu_size;
4231 int rc;
4232 int i = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00004233 struct smb2_decrypt_work *dw;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004234
David Brazdil0f672f62019-12-10 10:32:29 +00004235 *num_mids = 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004236 len = min_t(unsigned int, buflen, server->vals->read_rsp_size +
4237 sizeof(struct smb2_transform_hdr)) - HEADER_SIZE(server) + 1;
4238
4239 rc = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, len);
4240 if (rc < 0)
4241 return rc;
4242 server->total_read += rc;
4243
4244 len = le32_to_cpu(tr_hdr->OriginalMessageSize) -
4245 server->vals->read_rsp_size;
4246 npages = DIV_ROUND_UP(len, PAGE_SIZE);
4247
4248 pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
4249 if (!pages) {
4250 rc = -ENOMEM;
4251 goto discard_data;
4252 }
4253
4254 for (; i < npages; i++) {
4255 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
4256 if (!pages[i]) {
4257 rc = -ENOMEM;
4258 goto discard_data;
4259 }
4260 }
4261
4262 /* read read data into pages */
4263 rc = read_data_into_pages(server, pages, npages, len);
4264 if (rc)
4265 goto free_pages;
4266
4267 rc = cifs_discard_remaining_data(server);
4268 if (rc)
4269 goto free_pages;
4270
David Brazdil0f672f62019-12-10 10:32:29 +00004271 /*
4272 * For large reads, offload to different thread for better performance,
4273 * use more cores decrypting which can be expensive
4274 */
4275
4276 if ((server->min_offload) && (server->in_flight > 1) &&
4277 (server->pdu_size >= server->min_offload)) {
4278 dw = kmalloc(sizeof(struct smb2_decrypt_work), GFP_KERNEL);
4279 if (dw == NULL)
4280 goto non_offloaded_decrypt;
4281
4282 dw->buf = server->smallbuf;
4283 server->smallbuf = (char *)cifs_small_buf_get();
4284
4285 INIT_WORK(&dw->decrypt, smb2_decrypt_offload);
4286
4287 dw->npages = npages;
4288 dw->server = server;
4289 dw->ppages = pages;
4290 dw->len = len;
4291 queue_work(decrypt_wq, &dw->decrypt);
4292 *num_mids = 0; /* worker thread takes care of finding mid */
4293 return -1;
4294 }
4295
4296non_offloaded_decrypt:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004297 rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size,
Olivier Deprez0e641232021-09-23 10:07:05 +02004298 pages, npages, len, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004299 if (rc)
4300 goto free_pages;
4301
4302 *mid = smb2_find_mid(server, buf);
4303 if (*mid == NULL)
4304 cifs_dbg(FYI, "mid not found\n");
4305 else {
4306 cifs_dbg(FYI, "mid found\n");
4307 (*mid)->decrypted = true;
4308 rc = handle_read_data(server, *mid, buf,
4309 server->vals->read_rsp_size,
Olivier Deprez0e641232021-09-23 10:07:05 +02004310 pages, npages, len, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004311 }
4312
4313free_pages:
4314 for (i = i - 1; i >= 0; i--)
4315 put_page(pages[i]);
4316 kfree(pages);
4317 return rc;
4318discard_data:
4319 cifs_discard_remaining_data(server);
4320 goto free_pages;
4321}
4322
4323static int
4324receive_encrypted_standard(struct TCP_Server_Info *server,
4325 struct mid_q_entry **mids, char **bufs,
4326 int *num_mids)
4327{
4328 int ret, length;
4329 char *buf = server->smallbuf;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004330 struct smb2_sync_hdr *shdr;
4331 unsigned int pdu_length = server->pdu_size;
4332 unsigned int buf_size;
4333 struct mid_q_entry *mid_entry;
4334 int next_is_large;
4335 char *next_buffer = NULL;
4336
4337 *num_mids = 0;
4338
4339 /* switch to large buffer if too big for a small one */
4340 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE) {
4341 server->large_buf = true;
4342 memcpy(server->bigbuf, buf, server->total_read);
4343 buf = server->bigbuf;
4344 }
4345
4346 /* now read the rest */
4347 length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
4348 pdu_length - HEADER_SIZE(server) + 1);
4349 if (length < 0)
4350 return length;
4351 server->total_read += length;
4352
4353 buf_size = pdu_length - sizeof(struct smb2_transform_hdr);
Olivier Deprez0e641232021-09-23 10:07:05 +02004354 length = decrypt_raw_data(server, buf, buf_size, NULL, 0, 0, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004355 if (length)
4356 return length;
4357
4358 next_is_large = server->large_buf;
David Brazdil0f672f62019-12-10 10:32:29 +00004359one_more:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004360 shdr = (struct smb2_sync_hdr *)buf;
4361 if (shdr->NextCommand) {
David Brazdil0f672f62019-12-10 10:32:29 +00004362 if (next_is_large)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004363 next_buffer = (char *)cifs_buf_get();
David Brazdil0f672f62019-12-10 10:32:29 +00004364 else
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004365 next_buffer = (char *)cifs_small_buf_get();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004366 memcpy(next_buffer,
David Brazdil0f672f62019-12-10 10:32:29 +00004367 buf + le32_to_cpu(shdr->NextCommand),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004368 pdu_length - le32_to_cpu(shdr->NextCommand));
4369 }
4370
4371 mid_entry = smb2_find_mid(server, buf);
4372 if (mid_entry == NULL)
4373 cifs_dbg(FYI, "mid not found\n");
4374 else {
4375 cifs_dbg(FYI, "mid found\n");
4376 mid_entry->decrypted = true;
4377 mid_entry->resp_buf_size = server->pdu_size;
4378 }
4379
4380 if (*num_mids >= MAX_COMPOUND) {
David Brazdil0f672f62019-12-10 10:32:29 +00004381 cifs_server_dbg(VFS, "too many PDUs in compound\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004382 return -1;
4383 }
4384 bufs[*num_mids] = buf;
4385 mids[(*num_mids)++] = mid_entry;
4386
4387 if (mid_entry && mid_entry->handle)
4388 ret = mid_entry->handle(server, mid_entry);
4389 else
4390 ret = cifs_handle_standard(server, mid_entry);
4391
4392 if (ret == 0 && shdr->NextCommand) {
4393 pdu_length -= le32_to_cpu(shdr->NextCommand);
4394 server->large_buf = next_is_large;
4395 if (next_is_large)
David Brazdil0f672f62019-12-10 10:32:29 +00004396 server->bigbuf = buf = next_buffer;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004397 else
David Brazdil0f672f62019-12-10 10:32:29 +00004398 server->smallbuf = buf = next_buffer;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004399 goto one_more;
David Brazdil0f672f62019-12-10 10:32:29 +00004400 } else if (ret != 0) {
4401 /*
4402 * ret != 0 here means that we didn't get to handle_mid() thus
4403 * server->smallbuf and server->bigbuf are still valid. We need
4404 * to free next_buffer because it is not going to be used
4405 * anywhere.
4406 */
4407 if (next_is_large)
4408 free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer);
4409 else
4410 free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004411 }
4412
4413 return ret;
4414}
4415
4416static int
4417smb3_receive_transform(struct TCP_Server_Info *server,
4418 struct mid_q_entry **mids, char **bufs, int *num_mids)
4419{
4420 char *buf = server->smallbuf;
4421 unsigned int pdu_length = server->pdu_size;
4422 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
4423 unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
4424
4425 if (pdu_length < sizeof(struct smb2_transform_hdr) +
4426 sizeof(struct smb2_sync_hdr)) {
David Brazdil0f672f62019-12-10 10:32:29 +00004427 cifs_server_dbg(VFS, "Transform message is too small (%u)\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004428 pdu_length);
4429 cifs_reconnect(server);
4430 wake_up(&server->response_q);
4431 return -ECONNABORTED;
4432 }
4433
4434 if (pdu_length < orig_len + sizeof(struct smb2_transform_hdr)) {
David Brazdil0f672f62019-12-10 10:32:29 +00004435 cifs_server_dbg(VFS, "Transform message is broken\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004436 cifs_reconnect(server);
4437 wake_up(&server->response_q);
4438 return -ECONNABORTED;
4439 }
4440
4441 /* TODO: add support for compounds containing READ. */
David Brazdil0f672f62019-12-10 10:32:29 +00004442 if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) {
4443 return receive_encrypted_read(server, &mids[0], num_mids);
4444 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004445
4446 return receive_encrypted_standard(server, mids, bufs, num_mids);
4447}
4448
4449int
4450smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid)
4451{
4452 char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
4453
4454 return handle_read_data(server, mid, buf, server->pdu_size,
Olivier Deprez0e641232021-09-23 10:07:05 +02004455 NULL, 0, 0, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004456}
4457
4458static int
4459smb2_next_header(char *buf)
4460{
4461 struct smb2_sync_hdr *hdr = (struct smb2_sync_hdr *)buf;
4462 struct smb2_transform_hdr *t_hdr = (struct smb2_transform_hdr *)buf;
4463
4464 if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM)
4465 return sizeof(struct smb2_transform_hdr) +
4466 le32_to_cpu(t_hdr->OriginalMessageSize);
4467
4468 return le32_to_cpu(hdr->NextCommand);
4469}
4470
David Brazdil0f672f62019-12-10 10:32:29 +00004471static int
4472smb2_make_node(unsigned int xid, struct inode *inode,
4473 struct dentry *dentry, struct cifs_tcon *tcon,
4474 char *full_path, umode_t mode, dev_t dev)
4475{
4476 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
4477 int rc = -EPERM;
4478 int create_options = CREATE_NOT_DIR | CREATE_OPTION_SPECIAL;
4479 FILE_ALL_INFO *buf = NULL;
4480 struct cifs_io_parms io_parms;
4481 __u32 oplock = 0;
4482 struct cifs_fid fid;
4483 struct cifs_open_parms oparms;
4484 unsigned int bytes_written;
4485 struct win_dev *pdev;
4486 struct kvec iov[2];
4487
4488 /*
4489 * Check if mounted with mount parm 'sfu' mount parm.
4490 * SFU emulation should work with all servers, but only
4491 * supports block and char device (no socket & fifo),
4492 * and was used by default in earlier versions of Windows
4493 */
4494 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
4495 goto out;
4496
4497 /*
4498 * TODO: Add ability to create instead via reparse point. Windows (e.g.
4499 * their current NFS server) uses this approach to expose special files
4500 * over SMB2/SMB3 and Samba will do this with SMB3.1.1 POSIX Extensions
4501 */
4502
4503 if (!S_ISCHR(mode) && !S_ISBLK(mode))
4504 goto out;
4505
4506 cifs_dbg(FYI, "sfu compat create special file\n");
4507
4508 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
4509 if (buf == NULL) {
4510 rc = -ENOMEM;
4511 goto out;
4512 }
4513
4514 if (backup_cred(cifs_sb))
4515 create_options |= CREATE_OPEN_BACKUP_INTENT;
4516
4517 oparms.tcon = tcon;
4518 oparms.cifs_sb = cifs_sb;
4519 oparms.desired_access = GENERIC_WRITE;
4520 oparms.create_options = create_options;
4521 oparms.disposition = FILE_CREATE;
4522 oparms.path = full_path;
4523 oparms.fid = &fid;
4524 oparms.reconnect = false;
4525
4526 if (tcon->ses->server->oplocks)
4527 oplock = REQ_OPLOCK;
4528 else
4529 oplock = 0;
4530 rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, buf);
4531 if (rc)
4532 goto out;
4533
4534 /*
4535 * BB Do not bother to decode buf since no local inode yet to put
4536 * timestamps in, but we can reuse it safely.
4537 */
4538
4539 pdev = (struct win_dev *)buf;
4540 io_parms.pid = current->tgid;
4541 io_parms.tcon = tcon;
4542 io_parms.offset = 0;
4543 io_parms.length = sizeof(struct win_dev);
4544 iov[1].iov_base = buf;
4545 iov[1].iov_len = sizeof(struct win_dev);
4546 if (S_ISCHR(mode)) {
4547 memcpy(pdev->type, "IntxCHR", 8);
4548 pdev->major = cpu_to_le64(MAJOR(dev));
4549 pdev->minor = cpu_to_le64(MINOR(dev));
4550 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
4551 &bytes_written, iov, 1);
4552 } else if (S_ISBLK(mode)) {
4553 memcpy(pdev->type, "IntxBLK", 8);
4554 pdev->major = cpu_to_le64(MAJOR(dev));
4555 pdev->minor = cpu_to_le64(MINOR(dev));
4556 rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
4557 &bytes_written, iov, 1);
4558 }
4559 tcon->ses->server->ops->close(xid, tcon, &fid);
4560 d_drop(dentry);
4561
4562 /* FIXME: add code here to set EAs */
4563out:
4564 kfree(buf);
4565 return rc;
4566}
4567
4568
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004569struct smb_version_operations smb20_operations = {
4570 .compare_fids = smb2_compare_fids,
4571 .setup_request = smb2_setup_request,
4572 .setup_async_request = smb2_setup_async_request,
4573 .check_receive = smb2_check_receive,
4574 .add_credits = smb2_add_credits,
4575 .set_credits = smb2_set_credits,
4576 .get_credits_field = smb2_get_credits_field,
4577 .get_credits = smb2_get_credits,
4578 .wait_mtu_credits = cifs_wait_mtu_credits,
4579 .get_next_mid = smb2_get_next_mid,
David Brazdil0f672f62019-12-10 10:32:29 +00004580 .revert_current_mid = smb2_revert_current_mid,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004581 .read_data_offset = smb2_read_data_offset,
4582 .read_data_length = smb2_read_data_length,
4583 .map_error = map_smb2_to_linux_error,
4584 .find_mid = smb2_find_mid,
4585 .check_message = smb2_check_message,
4586 .dump_detail = smb2_dump_detail,
4587 .clear_stats = smb2_clear_stats,
4588 .print_stats = smb2_print_stats,
4589 .is_oplock_break = smb2_is_valid_oplock_break,
4590 .handle_cancelled_mid = smb2_handle_cancelled_mid,
4591 .downgrade_oplock = smb2_downgrade_oplock,
4592 .need_neg = smb2_need_neg,
4593 .negotiate = smb2_negotiate,
4594 .negotiate_wsize = smb2_negotiate_wsize,
4595 .negotiate_rsize = smb2_negotiate_rsize,
4596 .sess_setup = SMB2_sess_setup,
4597 .logoff = SMB2_logoff,
4598 .tree_connect = SMB2_tcon,
4599 .tree_disconnect = SMB2_tdis,
4600 .qfs_tcon = smb2_qfs_tcon,
4601 .is_path_accessible = smb2_is_path_accessible,
4602 .can_echo = smb2_can_echo,
4603 .echo = SMB2_echo,
4604 .query_path_info = smb2_query_path_info,
4605 .get_srv_inum = smb2_get_srv_inum,
4606 .query_file_info = smb2_query_file_info,
4607 .set_path_size = smb2_set_path_size,
4608 .set_file_size = smb2_set_file_size,
4609 .set_file_info = smb2_set_file_info,
4610 .set_compression = smb2_set_compression,
4611 .mkdir = smb2_mkdir,
4612 .mkdir_setinfo = smb2_mkdir_setinfo,
4613 .rmdir = smb2_rmdir,
4614 .unlink = smb2_unlink,
4615 .rename = smb2_rename_path,
4616 .create_hardlink = smb2_create_hardlink,
4617 .query_symlink = smb2_query_symlink,
4618 .query_mf_symlink = smb3_query_mf_symlink,
4619 .create_mf_symlink = smb3_create_mf_symlink,
4620 .open = smb2_open_file,
4621 .set_fid = smb2_set_fid,
4622 .close = smb2_close_file,
4623 .flush = smb2_flush_file,
4624 .async_readv = smb2_async_readv,
4625 .async_writev = smb2_async_writev,
4626 .sync_read = smb2_sync_read,
4627 .sync_write = smb2_sync_write,
4628 .query_dir_first = smb2_query_dir_first,
4629 .query_dir_next = smb2_query_dir_next,
4630 .close_dir = smb2_close_dir,
4631 .calc_smb_size = smb2_calc_size,
4632 .is_status_pending = smb2_is_status_pending,
4633 .is_session_expired = smb2_is_session_expired,
4634 .oplock_response = smb2_oplock_response,
4635 .queryfs = smb2_queryfs,
4636 .mand_lock = smb2_mand_lock,
4637 .mand_unlock_range = smb2_unlock_range,
4638 .push_mand_locks = smb2_push_mandatory_locks,
4639 .get_lease_key = smb2_get_lease_key,
4640 .set_lease_key = smb2_set_lease_key,
4641 .new_lease_key = smb2_new_lease_key,
4642 .calc_signature = smb2_calc_signature,
4643 .is_read_op = smb2_is_read_op,
4644 .set_oplock_level = smb2_set_oplock_level,
4645 .create_lease_buf = smb2_create_lease_buf,
4646 .parse_lease_buf = smb2_parse_lease_buf,
4647 .copychunk_range = smb2_copychunk_range,
4648 .wp_retry_size = smb2_wp_retry_size,
4649 .dir_needs_close = smb2_dir_needs_close,
4650 .get_dfs_refer = smb2_get_dfs_refer,
4651 .select_sectype = smb2_select_sectype,
4652#ifdef CONFIG_CIFS_XATTR
4653 .query_all_EAs = smb2_query_eas,
4654 .set_EA = smb2_set_ea,
4655#endif /* CIFS_XATTR */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004656 .get_acl = get_smb2_acl,
4657 .get_acl_by_fid = get_smb2_acl_by_fid,
4658 .set_acl = set_smb2_acl,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004659 .next_header = smb2_next_header,
David Brazdil0f672f62019-12-10 10:32:29 +00004660 .ioctl_query_info = smb2_ioctl_query_info,
4661 .make_node = smb2_make_node,
4662 .fiemap = smb3_fiemap,
4663 .llseek = smb3_llseek,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004664};
4665
4666struct smb_version_operations smb21_operations = {
4667 .compare_fids = smb2_compare_fids,
4668 .setup_request = smb2_setup_request,
4669 .setup_async_request = smb2_setup_async_request,
4670 .check_receive = smb2_check_receive,
4671 .add_credits = smb2_add_credits,
4672 .set_credits = smb2_set_credits,
4673 .get_credits_field = smb2_get_credits_field,
4674 .get_credits = smb2_get_credits,
4675 .wait_mtu_credits = smb2_wait_mtu_credits,
David Brazdil0f672f62019-12-10 10:32:29 +00004676 .adjust_credits = smb2_adjust_credits,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004677 .get_next_mid = smb2_get_next_mid,
David Brazdil0f672f62019-12-10 10:32:29 +00004678 .revert_current_mid = smb2_revert_current_mid,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004679 .read_data_offset = smb2_read_data_offset,
4680 .read_data_length = smb2_read_data_length,
4681 .map_error = map_smb2_to_linux_error,
4682 .find_mid = smb2_find_mid,
4683 .check_message = smb2_check_message,
4684 .dump_detail = smb2_dump_detail,
4685 .clear_stats = smb2_clear_stats,
4686 .print_stats = smb2_print_stats,
4687 .is_oplock_break = smb2_is_valid_oplock_break,
4688 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Olivier Deprez0e641232021-09-23 10:07:05 +02004689 .downgrade_oplock = smb2_downgrade_oplock,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004690 .need_neg = smb2_need_neg,
4691 .negotiate = smb2_negotiate,
4692 .negotiate_wsize = smb2_negotiate_wsize,
4693 .negotiate_rsize = smb2_negotiate_rsize,
4694 .sess_setup = SMB2_sess_setup,
4695 .logoff = SMB2_logoff,
4696 .tree_connect = SMB2_tcon,
4697 .tree_disconnect = SMB2_tdis,
4698 .qfs_tcon = smb2_qfs_tcon,
4699 .is_path_accessible = smb2_is_path_accessible,
4700 .can_echo = smb2_can_echo,
4701 .echo = SMB2_echo,
4702 .query_path_info = smb2_query_path_info,
4703 .get_srv_inum = smb2_get_srv_inum,
4704 .query_file_info = smb2_query_file_info,
4705 .set_path_size = smb2_set_path_size,
4706 .set_file_size = smb2_set_file_size,
4707 .set_file_info = smb2_set_file_info,
4708 .set_compression = smb2_set_compression,
4709 .mkdir = smb2_mkdir,
4710 .mkdir_setinfo = smb2_mkdir_setinfo,
4711 .rmdir = smb2_rmdir,
4712 .unlink = smb2_unlink,
4713 .rename = smb2_rename_path,
4714 .create_hardlink = smb2_create_hardlink,
4715 .query_symlink = smb2_query_symlink,
4716 .query_mf_symlink = smb3_query_mf_symlink,
4717 .create_mf_symlink = smb3_create_mf_symlink,
4718 .open = smb2_open_file,
4719 .set_fid = smb2_set_fid,
4720 .close = smb2_close_file,
4721 .flush = smb2_flush_file,
4722 .async_readv = smb2_async_readv,
4723 .async_writev = smb2_async_writev,
4724 .sync_read = smb2_sync_read,
4725 .sync_write = smb2_sync_write,
4726 .query_dir_first = smb2_query_dir_first,
4727 .query_dir_next = smb2_query_dir_next,
4728 .close_dir = smb2_close_dir,
4729 .calc_smb_size = smb2_calc_size,
4730 .is_status_pending = smb2_is_status_pending,
4731 .is_session_expired = smb2_is_session_expired,
4732 .oplock_response = smb2_oplock_response,
4733 .queryfs = smb2_queryfs,
4734 .mand_lock = smb2_mand_lock,
4735 .mand_unlock_range = smb2_unlock_range,
4736 .push_mand_locks = smb2_push_mandatory_locks,
4737 .get_lease_key = smb2_get_lease_key,
4738 .set_lease_key = smb2_set_lease_key,
4739 .new_lease_key = smb2_new_lease_key,
4740 .calc_signature = smb2_calc_signature,
4741 .is_read_op = smb21_is_read_op,
4742 .set_oplock_level = smb21_set_oplock_level,
4743 .create_lease_buf = smb2_create_lease_buf,
4744 .parse_lease_buf = smb2_parse_lease_buf,
4745 .copychunk_range = smb2_copychunk_range,
4746 .wp_retry_size = smb2_wp_retry_size,
4747 .dir_needs_close = smb2_dir_needs_close,
4748 .enum_snapshots = smb3_enum_snapshots,
4749 .get_dfs_refer = smb2_get_dfs_refer,
4750 .select_sectype = smb2_select_sectype,
4751#ifdef CONFIG_CIFS_XATTR
4752 .query_all_EAs = smb2_query_eas,
4753 .set_EA = smb2_set_ea,
4754#endif /* CIFS_XATTR */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004755 .get_acl = get_smb2_acl,
4756 .get_acl_by_fid = get_smb2_acl_by_fid,
4757 .set_acl = set_smb2_acl,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004758 .next_header = smb2_next_header,
David Brazdil0f672f62019-12-10 10:32:29 +00004759 .ioctl_query_info = smb2_ioctl_query_info,
4760 .make_node = smb2_make_node,
4761 .fiemap = smb3_fiemap,
4762 .llseek = smb3_llseek,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004763};
4764
4765struct smb_version_operations smb30_operations = {
4766 .compare_fids = smb2_compare_fids,
4767 .setup_request = smb2_setup_request,
4768 .setup_async_request = smb2_setup_async_request,
4769 .check_receive = smb2_check_receive,
4770 .add_credits = smb2_add_credits,
4771 .set_credits = smb2_set_credits,
4772 .get_credits_field = smb2_get_credits_field,
4773 .get_credits = smb2_get_credits,
4774 .wait_mtu_credits = smb2_wait_mtu_credits,
David Brazdil0f672f62019-12-10 10:32:29 +00004775 .adjust_credits = smb2_adjust_credits,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004776 .get_next_mid = smb2_get_next_mid,
David Brazdil0f672f62019-12-10 10:32:29 +00004777 .revert_current_mid = smb2_revert_current_mid,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004778 .read_data_offset = smb2_read_data_offset,
4779 .read_data_length = smb2_read_data_length,
4780 .map_error = map_smb2_to_linux_error,
4781 .find_mid = smb2_find_mid,
4782 .check_message = smb2_check_message,
4783 .dump_detail = smb2_dump_detail,
4784 .clear_stats = smb2_clear_stats,
4785 .print_stats = smb2_print_stats,
4786 .dump_share_caps = smb2_dump_share_caps,
4787 .is_oplock_break = smb2_is_valid_oplock_break,
4788 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Olivier Deprez0e641232021-09-23 10:07:05 +02004789 .downgrade_oplock = smb3_downgrade_oplock,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004790 .need_neg = smb2_need_neg,
4791 .negotiate = smb2_negotiate,
David Brazdil0f672f62019-12-10 10:32:29 +00004792 .negotiate_wsize = smb3_negotiate_wsize,
4793 .negotiate_rsize = smb3_negotiate_rsize,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004794 .sess_setup = SMB2_sess_setup,
4795 .logoff = SMB2_logoff,
4796 .tree_connect = SMB2_tcon,
4797 .tree_disconnect = SMB2_tdis,
4798 .qfs_tcon = smb3_qfs_tcon,
4799 .is_path_accessible = smb2_is_path_accessible,
4800 .can_echo = smb2_can_echo,
4801 .echo = SMB2_echo,
4802 .query_path_info = smb2_query_path_info,
4803 .get_srv_inum = smb2_get_srv_inum,
4804 .query_file_info = smb2_query_file_info,
4805 .set_path_size = smb2_set_path_size,
4806 .set_file_size = smb2_set_file_size,
4807 .set_file_info = smb2_set_file_info,
4808 .set_compression = smb2_set_compression,
4809 .mkdir = smb2_mkdir,
4810 .mkdir_setinfo = smb2_mkdir_setinfo,
4811 .rmdir = smb2_rmdir,
4812 .unlink = smb2_unlink,
4813 .rename = smb2_rename_path,
4814 .create_hardlink = smb2_create_hardlink,
4815 .query_symlink = smb2_query_symlink,
4816 .query_mf_symlink = smb3_query_mf_symlink,
4817 .create_mf_symlink = smb3_create_mf_symlink,
4818 .open = smb2_open_file,
4819 .set_fid = smb2_set_fid,
4820 .close = smb2_close_file,
4821 .flush = smb2_flush_file,
4822 .async_readv = smb2_async_readv,
4823 .async_writev = smb2_async_writev,
4824 .sync_read = smb2_sync_read,
4825 .sync_write = smb2_sync_write,
4826 .query_dir_first = smb2_query_dir_first,
4827 .query_dir_next = smb2_query_dir_next,
4828 .close_dir = smb2_close_dir,
4829 .calc_smb_size = smb2_calc_size,
4830 .is_status_pending = smb2_is_status_pending,
4831 .is_session_expired = smb2_is_session_expired,
4832 .oplock_response = smb2_oplock_response,
4833 .queryfs = smb2_queryfs,
4834 .mand_lock = smb2_mand_lock,
4835 .mand_unlock_range = smb2_unlock_range,
4836 .push_mand_locks = smb2_push_mandatory_locks,
4837 .get_lease_key = smb2_get_lease_key,
4838 .set_lease_key = smb2_set_lease_key,
4839 .new_lease_key = smb2_new_lease_key,
4840 .generate_signingkey = generate_smb30signingkey,
4841 .calc_signature = smb3_calc_signature,
4842 .set_integrity = smb3_set_integrity,
4843 .is_read_op = smb21_is_read_op,
4844 .set_oplock_level = smb3_set_oplock_level,
4845 .create_lease_buf = smb3_create_lease_buf,
4846 .parse_lease_buf = smb3_parse_lease_buf,
4847 .copychunk_range = smb2_copychunk_range,
4848 .duplicate_extents = smb2_duplicate_extents,
4849 .validate_negotiate = smb3_validate_negotiate,
4850 .wp_retry_size = smb2_wp_retry_size,
4851 .dir_needs_close = smb2_dir_needs_close,
4852 .fallocate = smb3_fallocate,
4853 .enum_snapshots = smb3_enum_snapshots,
4854 .init_transform_rq = smb3_init_transform_rq,
4855 .is_transform_hdr = smb3_is_transform_hdr,
4856 .receive_transform = smb3_receive_transform,
4857 .get_dfs_refer = smb2_get_dfs_refer,
4858 .select_sectype = smb2_select_sectype,
4859#ifdef CONFIG_CIFS_XATTR
4860 .query_all_EAs = smb2_query_eas,
4861 .set_EA = smb2_set_ea,
4862#endif /* CIFS_XATTR */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004863 .get_acl = get_smb2_acl,
4864 .get_acl_by_fid = get_smb2_acl_by_fid,
4865 .set_acl = set_smb2_acl,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004866 .next_header = smb2_next_header,
David Brazdil0f672f62019-12-10 10:32:29 +00004867 .ioctl_query_info = smb2_ioctl_query_info,
4868 .make_node = smb2_make_node,
4869 .fiemap = smb3_fiemap,
4870 .llseek = smb3_llseek,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004871};
4872
4873struct smb_version_operations smb311_operations = {
4874 .compare_fids = smb2_compare_fids,
4875 .setup_request = smb2_setup_request,
4876 .setup_async_request = smb2_setup_async_request,
4877 .check_receive = smb2_check_receive,
4878 .add_credits = smb2_add_credits,
4879 .set_credits = smb2_set_credits,
4880 .get_credits_field = smb2_get_credits_field,
4881 .get_credits = smb2_get_credits,
4882 .wait_mtu_credits = smb2_wait_mtu_credits,
David Brazdil0f672f62019-12-10 10:32:29 +00004883 .adjust_credits = smb2_adjust_credits,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004884 .get_next_mid = smb2_get_next_mid,
David Brazdil0f672f62019-12-10 10:32:29 +00004885 .revert_current_mid = smb2_revert_current_mid,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004886 .read_data_offset = smb2_read_data_offset,
4887 .read_data_length = smb2_read_data_length,
4888 .map_error = map_smb2_to_linux_error,
4889 .find_mid = smb2_find_mid,
4890 .check_message = smb2_check_message,
4891 .dump_detail = smb2_dump_detail,
4892 .clear_stats = smb2_clear_stats,
4893 .print_stats = smb2_print_stats,
4894 .dump_share_caps = smb2_dump_share_caps,
4895 .is_oplock_break = smb2_is_valid_oplock_break,
4896 .handle_cancelled_mid = smb2_handle_cancelled_mid,
Olivier Deprez0e641232021-09-23 10:07:05 +02004897 .downgrade_oplock = smb3_downgrade_oplock,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004898 .need_neg = smb2_need_neg,
4899 .negotiate = smb2_negotiate,
David Brazdil0f672f62019-12-10 10:32:29 +00004900 .negotiate_wsize = smb3_negotiate_wsize,
4901 .negotiate_rsize = smb3_negotiate_rsize,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004902 .sess_setup = SMB2_sess_setup,
4903 .logoff = SMB2_logoff,
4904 .tree_connect = SMB2_tcon,
4905 .tree_disconnect = SMB2_tdis,
4906 .qfs_tcon = smb3_qfs_tcon,
4907 .is_path_accessible = smb2_is_path_accessible,
4908 .can_echo = smb2_can_echo,
4909 .echo = SMB2_echo,
4910 .query_path_info = smb2_query_path_info,
4911 .get_srv_inum = smb2_get_srv_inum,
4912 .query_file_info = smb2_query_file_info,
4913 .set_path_size = smb2_set_path_size,
4914 .set_file_size = smb2_set_file_size,
4915 .set_file_info = smb2_set_file_info,
4916 .set_compression = smb2_set_compression,
4917 .mkdir = smb2_mkdir,
4918 .mkdir_setinfo = smb2_mkdir_setinfo,
4919 .posix_mkdir = smb311_posix_mkdir,
4920 .rmdir = smb2_rmdir,
4921 .unlink = smb2_unlink,
4922 .rename = smb2_rename_path,
4923 .create_hardlink = smb2_create_hardlink,
4924 .query_symlink = smb2_query_symlink,
4925 .query_mf_symlink = smb3_query_mf_symlink,
4926 .create_mf_symlink = smb3_create_mf_symlink,
4927 .open = smb2_open_file,
4928 .set_fid = smb2_set_fid,
4929 .close = smb2_close_file,
4930 .flush = smb2_flush_file,
4931 .async_readv = smb2_async_readv,
4932 .async_writev = smb2_async_writev,
4933 .sync_read = smb2_sync_read,
4934 .sync_write = smb2_sync_write,
4935 .query_dir_first = smb2_query_dir_first,
4936 .query_dir_next = smb2_query_dir_next,
4937 .close_dir = smb2_close_dir,
4938 .calc_smb_size = smb2_calc_size,
4939 .is_status_pending = smb2_is_status_pending,
4940 .is_session_expired = smb2_is_session_expired,
4941 .oplock_response = smb2_oplock_response,
4942 .queryfs = smb311_queryfs,
4943 .mand_lock = smb2_mand_lock,
4944 .mand_unlock_range = smb2_unlock_range,
4945 .push_mand_locks = smb2_push_mandatory_locks,
4946 .get_lease_key = smb2_get_lease_key,
4947 .set_lease_key = smb2_set_lease_key,
4948 .new_lease_key = smb2_new_lease_key,
4949 .generate_signingkey = generate_smb311signingkey,
4950 .calc_signature = smb3_calc_signature,
4951 .set_integrity = smb3_set_integrity,
4952 .is_read_op = smb21_is_read_op,
4953 .set_oplock_level = smb3_set_oplock_level,
4954 .create_lease_buf = smb3_create_lease_buf,
4955 .parse_lease_buf = smb3_parse_lease_buf,
4956 .copychunk_range = smb2_copychunk_range,
4957 .duplicate_extents = smb2_duplicate_extents,
4958/* .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */
4959 .wp_retry_size = smb2_wp_retry_size,
4960 .dir_needs_close = smb2_dir_needs_close,
4961 .fallocate = smb3_fallocate,
4962 .enum_snapshots = smb3_enum_snapshots,
4963 .init_transform_rq = smb3_init_transform_rq,
4964 .is_transform_hdr = smb3_is_transform_hdr,
4965 .receive_transform = smb3_receive_transform,
4966 .get_dfs_refer = smb2_get_dfs_refer,
4967 .select_sectype = smb2_select_sectype,
4968#ifdef CONFIG_CIFS_XATTR
4969 .query_all_EAs = smb2_query_eas,
4970 .set_EA = smb2_set_ea,
4971#endif /* CIFS_XATTR */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004972 .get_acl = get_smb2_acl,
4973 .get_acl_by_fid = get_smb2_acl_by_fid,
4974 .set_acl = set_smb2_acl,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004975 .next_header = smb2_next_header,
David Brazdil0f672f62019-12-10 10:32:29 +00004976 .ioctl_query_info = smb2_ioctl_query_info,
4977 .make_node = smb2_make_node,
4978 .fiemap = smb3_fiemap,
4979 .llseek = smb3_llseek,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004980};
4981
4982struct smb_version_values smb20_values = {
4983 .version_string = SMB20_VERSION_STRING,
4984 .protocol_id = SMB20_PROT_ID,
4985 .req_capabilities = 0, /* MBZ */
4986 .large_lock_type = 0,
4987 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
4988 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
4989 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
4990 .header_size = sizeof(struct smb2_sync_hdr),
4991 .header_preamble_size = 0,
4992 .max_header_size = MAX_SMB2_HDR_SIZE,
4993 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
4994 .lock_cmd = SMB2_LOCK,
4995 .cap_unix = 0,
4996 .cap_nt_find = SMB2_NT_FIND,
4997 .cap_large_files = SMB2_LARGE_FILES,
4998 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
4999 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5000 .create_lease_size = sizeof(struct create_lease),
5001};
5002
5003struct smb_version_values smb21_values = {
5004 .version_string = SMB21_VERSION_STRING,
5005 .protocol_id = SMB21_PROT_ID,
5006 .req_capabilities = 0, /* MBZ on negotiate req until SMB3 dialect */
5007 .large_lock_type = 0,
5008 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5009 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5010 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
5011 .header_size = sizeof(struct smb2_sync_hdr),
5012 .header_preamble_size = 0,
5013 .max_header_size = MAX_SMB2_HDR_SIZE,
5014 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5015 .lock_cmd = SMB2_LOCK,
5016 .cap_unix = 0,
5017 .cap_nt_find = SMB2_NT_FIND,
5018 .cap_large_files = SMB2_LARGE_FILES,
5019 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5020 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5021 .create_lease_size = sizeof(struct create_lease),
5022};
5023
5024struct smb_version_values smb3any_values = {
5025 .version_string = SMB3ANY_VERSION_STRING,
5026 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
5027 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
5028 .large_lock_type = 0,
5029 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5030 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5031 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
5032 .header_size = sizeof(struct smb2_sync_hdr),
5033 .header_preamble_size = 0,
5034 .max_header_size = MAX_SMB2_HDR_SIZE,
5035 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5036 .lock_cmd = SMB2_LOCK,
5037 .cap_unix = 0,
5038 .cap_nt_find = SMB2_NT_FIND,
5039 .cap_large_files = SMB2_LARGE_FILES,
5040 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5041 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5042 .create_lease_size = sizeof(struct create_lease_v2),
5043};
5044
5045struct smb_version_values smbdefault_values = {
5046 .version_string = SMBDEFAULT_VERSION_STRING,
5047 .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
5048 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
5049 .large_lock_type = 0,
5050 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5051 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5052 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
5053 .header_size = sizeof(struct smb2_sync_hdr),
5054 .header_preamble_size = 0,
5055 .max_header_size = MAX_SMB2_HDR_SIZE,
5056 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5057 .lock_cmd = SMB2_LOCK,
5058 .cap_unix = 0,
5059 .cap_nt_find = SMB2_NT_FIND,
5060 .cap_large_files = SMB2_LARGE_FILES,
5061 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5062 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5063 .create_lease_size = sizeof(struct create_lease_v2),
5064};
5065
5066struct smb_version_values smb30_values = {
5067 .version_string = SMB30_VERSION_STRING,
5068 .protocol_id = SMB30_PROT_ID,
5069 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
5070 .large_lock_type = 0,
5071 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5072 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5073 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
5074 .header_size = sizeof(struct smb2_sync_hdr),
5075 .header_preamble_size = 0,
5076 .max_header_size = MAX_SMB2_HDR_SIZE,
5077 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5078 .lock_cmd = SMB2_LOCK,
5079 .cap_unix = 0,
5080 .cap_nt_find = SMB2_NT_FIND,
5081 .cap_large_files = SMB2_LARGE_FILES,
5082 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5083 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5084 .create_lease_size = sizeof(struct create_lease_v2),
5085};
5086
5087struct smb_version_values smb302_values = {
5088 .version_string = SMB302_VERSION_STRING,
5089 .protocol_id = SMB302_PROT_ID,
5090 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
5091 .large_lock_type = 0,
5092 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5093 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5094 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
5095 .header_size = sizeof(struct smb2_sync_hdr),
5096 .header_preamble_size = 0,
5097 .max_header_size = MAX_SMB2_HDR_SIZE,
5098 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5099 .lock_cmd = SMB2_LOCK,
5100 .cap_unix = 0,
5101 .cap_nt_find = SMB2_NT_FIND,
5102 .cap_large_files = SMB2_LARGE_FILES,
5103 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5104 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5105 .create_lease_size = sizeof(struct create_lease_v2),
5106};
5107
5108struct smb_version_values smb311_values = {
5109 .version_string = SMB311_VERSION_STRING,
5110 .protocol_id = SMB311_PROT_ID,
5111 .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
5112 .large_lock_type = 0,
5113 .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
5114 .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
5115 .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
5116 .header_size = sizeof(struct smb2_sync_hdr),
5117 .header_preamble_size = 0,
5118 .max_header_size = MAX_SMB2_HDR_SIZE,
5119 .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
5120 .lock_cmd = SMB2_LOCK,
5121 .cap_unix = 0,
5122 .cap_nt_find = SMB2_NT_FIND,
5123 .cap_large_files = SMB2_LARGE_FILES,
5124 .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5125 .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5126 .create_lease_size = sizeof(struct create_lease_v2),
5127};