1// SPDX-License-Identifier: GPL-2.0
2/*
3 *  SMB2 version specific operations
4 *
5 *  Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
6 */
7
8#include <linux/pagemap.h>
9#include <linux/vfs.h>
10#include <linux/falloc.h>
11#include <linux/scatterlist.h>
12#include <linux/uuid.h>
13#include <linux/sort.h>
14#include <crypto/aead.h>
15#include <linux/fiemap.h>
16#include <uapi/linux/magic.h>
17#include "cifsfs.h"
18#include "cifsglob.h"
19#include "smb2pdu.h"
20#include "smb2proto.h"
21#include "cifsproto.h"
22#include "cifs_debug.h"
23#include "cifs_unicode.h"
24#include "smb2status.h"
25#include "smb2glob.h"
26#include "cifs_ioctl.h"
27#include "smbdirect.h"
28#include "fscache.h"
29#include "fs_context.h"
30#include "cached_dir.h"
31#include "reparse.h"
32
33/* Change credits for different ops and return the total number of credits */
34static int
35change_conf(struct TCP_Server_Info *server)
36{
37	server->credits += server->echo_credits + server->oplock_credits;
38	if (server->credits > server->max_credits)
39		server->credits = server->max_credits;
40	server->oplock_credits = server->echo_credits = 0;
41	switch (server->credits) {
42	case 0:
43		return 0;
44	case 1:
45		server->echoes = false;
46		server->oplocks = false;
47		break;
48	case 2:
49		server->echoes = true;
50		server->oplocks = false;
51		server->echo_credits = 1;
52		break;
53	default:
54		server->echoes = true;
55		if (enable_oplocks) {
56			server->oplocks = true;
57			server->oplock_credits = 1;
58		} else
59			server->oplocks = false;
60
61		server->echo_credits = 1;
62	}
63	server->credits -= server->echo_credits + server->oplock_credits;
64	return server->credits + server->echo_credits + server->oplock_credits;
65}
66
67static void
68smb2_add_credits(struct TCP_Server_Info *server,
69		 const struct cifs_credits *credits, const int optype)
70{
71	int *val, rc = -1;
72	int scredits, in_flight;
73	unsigned int add = credits->value;
74	unsigned int instance = credits->instance;
75	bool reconnect_detected = false;
76	bool reconnect_with_invalid_credits = false;
77
78	spin_lock(&server->req_lock);
79	val = server->ops->get_credits_field(server, optype);
80
81	/* eg found case where write overlapping reconnect messed up credits */
82	if (((optype & CIFS_OP_MASK) == CIFS_NEG_OP) && (*val != 0))
83		reconnect_with_invalid_credits = true;
84
85	if ((instance == 0) || (instance == server->reconnect_instance))
86		*val += add;
87	else
88		reconnect_detected = true;
89
90	if (*val > 65000) {
91		*val = 65000; /* Don't get near 64K credits, avoid srv bugs */
92		pr_warn_once("server overflowed SMB3 credits\n");
93		trace_smb3_overflow_credits(server->CurrentMid,
94					    server->conn_id, server->hostname, *val,
95					    add, server->in_flight);
96	}
97	WARN_ON_ONCE(server->in_flight == 0);
98	server->in_flight--;
99	if (server->in_flight == 0 &&
100	   ((optype & CIFS_OP_MASK) != CIFS_NEG_OP) &&
101	   ((optype & CIFS_OP_MASK) != CIFS_SESS_OP))
102		rc = change_conf(server);
103	/*
104	 * Sometimes server returns 0 credits on oplock break ack - we need to
105	 * rebalance credits in this case.
106	 */
107	else if (server->in_flight > 0 && server->oplock_credits == 0 &&
108		 server->oplocks) {
109		if (server->credits > 1) {
110			server->credits--;
111			server->oplock_credits++;
112		}
113	} else if ((server->in_flight > 0) && (server->oplock_credits > 3) &&
114		   ((optype & CIFS_OP_MASK) == CIFS_OBREAK_OP))
115		/* if now have too many oplock credits, rebalance so don't starve normal ops */
116		change_conf(server);
117
118	scredits = *val;
119	in_flight = server->in_flight;
120	spin_unlock(&server->req_lock);
121	wake_up(&server->request_q);
122
123	if (reconnect_detected) {
124		trace_smb3_reconnect_detected(server->CurrentMid,
125			server->conn_id, server->hostname, scredits, add, in_flight);
126
127		cifs_dbg(FYI, "trying to put %d credits from the old server instance %d\n",
128			 add, instance);
129	}
130
131	if (reconnect_with_invalid_credits) {
132		trace_smb3_reconnect_with_invalid_credits(server->CurrentMid,
133			server->conn_id, server->hostname, scredits, add, in_flight);
134		cifs_dbg(FYI, "Negotiate operation when server credits is non-zero. Optype: %d, server credits: %d, credits added: %d\n",
135			 optype, scredits, add);
136	}
137
138	spin_lock(&server->srv_lock);
139	if (server->tcpStatus == CifsNeedReconnect
140	    || server->tcpStatus == CifsExiting) {
141		spin_unlock(&server->srv_lock);
142		return;
143	}
144	spin_unlock(&server->srv_lock);
145
146	switch (rc) {
147	case -1:
148		/* change_conf hasn't been executed */
149		break;
150	case 0:
151		cifs_server_dbg(VFS, "Possible client or server bug - zero credits\n");
152		break;
153	case 1:
154		cifs_server_dbg(VFS, "disabling echoes and oplocks\n");
155		break;
156	case 2:
157		cifs_dbg(FYI, "disabling oplocks\n");
158		break;
159	default:
160		/* change_conf rebalanced credits for different types */
161		break;
162	}
163
164	trace_smb3_add_credits(server->CurrentMid,
165			server->conn_id, server->hostname, scredits, add, in_flight);
166	cifs_dbg(FYI, "%s: added %u credits total=%d\n", __func__, add, scredits);
167}
168
169static void
170smb2_set_credits(struct TCP_Server_Info *server, const int val)
171{
172	int scredits, in_flight;
173
174	spin_lock(&server->req_lock);
175	server->credits = val;
176	if (val == 1) {
177		server->reconnect_instance++;
178		/*
179		 * ChannelSequence updated for all channels in primary channel so that consistent
180		 * across SMB3 requests sent on any channel. See MS-SMB2 3.2.4.1 and 3.2.7.1
181		 */
182		if (SERVER_IS_CHAN(server))
183			server->primary_server->channel_sequence_num++;
184		else
185			server->channel_sequence_num++;
186	}
187	scredits = server->credits;
188	in_flight = server->in_flight;
189	spin_unlock(&server->req_lock);
190
191	trace_smb3_set_credits(server->CurrentMid,
192			server->conn_id, server->hostname, scredits, val, in_flight);
193	cifs_dbg(FYI, "%s: set %u credits\n", __func__, val);
194
195	/* don't log while holding the lock */
196	if (val == 1)
197		cifs_dbg(FYI, "set credits to 1 due to smb2 reconnect\n");
198}
199
200static int *
201smb2_get_credits_field(struct TCP_Server_Info *server, const int optype)
202{
203	switch (optype) {
204	case CIFS_ECHO_OP:
205		return &server->echo_credits;
206	case CIFS_OBREAK_OP:
207		return &server->oplock_credits;
208	default:
209		return &server->credits;
210	}
211}
212
213static unsigned int
214smb2_get_credits(struct mid_q_entry *mid)
215{
216	return mid->credits_received;
217}
218
219static int
220smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
221		      unsigned int *num, struct cifs_credits *credits)
222{
223	int rc = 0;
224	unsigned int scredits, in_flight;
225
226	spin_lock(&server->req_lock);
227	while (1) {
228		spin_unlock(&server->req_lock);
229
230		spin_lock(&server->srv_lock);
231		if (server->tcpStatus == CifsExiting) {
232			spin_unlock(&server->srv_lock);
233			return -ENOENT;
234		}
235		spin_unlock(&server->srv_lock);
236
237		spin_lock(&server->req_lock);
238		if (server->credits <= 0) {
239			spin_unlock(&server->req_lock);
240			cifs_num_waiters_inc(server);
241			rc = wait_event_killable(server->request_q,
242				has_credits(server, &server->credits, 1));
243			cifs_num_waiters_dec(server);
244			if (rc)
245				return rc;
246			spin_lock(&server->req_lock);
247		} else {
248			scredits = server->credits;
249			/* can deadlock with reopen */
250			if (scredits <= 8) {
251				*num = SMB2_MAX_BUFFER_SIZE;
252				credits->value = 0;
253				credits->instance = 0;
254				break;
255			}
256
257			/* leave some credits for reopen and other ops */
258			scredits -= 8;
259			*num = min_t(unsigned int, size,
260				     scredits * SMB2_MAX_BUFFER_SIZE);
261
262			credits->value =
263				DIV_ROUND_UP(*num, SMB2_MAX_BUFFER_SIZE);
264			credits->instance = server->reconnect_instance;
265			server->credits -= credits->value;
266			server->in_flight++;
267			if (server->in_flight > server->max_in_flight)
268				server->max_in_flight = server->in_flight;
269			break;
270		}
271	}
272	scredits = server->credits;
273	in_flight = server->in_flight;
274	spin_unlock(&server->req_lock);
275
276	trace_smb3_wait_credits(server->CurrentMid,
277			server->conn_id, server->hostname, scredits, -(credits->value), in_flight);
278	cifs_dbg(FYI, "%s: removed %u credits total=%d\n",
279			__func__, credits->value, scredits);
280
281	return rc;
282}
283
284static int
285smb2_adjust_credits(struct TCP_Server_Info *server,
286		    struct cifs_credits *credits,
287		    const unsigned int payload_size)
288{
289	int new_val = DIV_ROUND_UP(payload_size, SMB2_MAX_BUFFER_SIZE);
290	int scredits, in_flight;
291
292	if (!credits->value || credits->value == new_val)
293		return 0;
294
295	if (credits->value < new_val) {
296		trace_smb3_too_many_credits(server->CurrentMid,
297				server->conn_id, server->hostname, 0, credits->value - new_val, 0);
298		cifs_server_dbg(VFS, "request has less credits (%d) than required (%d)",
299				credits->value, new_val);
300
301		return -EOPNOTSUPP;
302	}
303
304	spin_lock(&server->req_lock);
305
306	if (server->reconnect_instance != credits->instance) {
307		scredits = server->credits;
308		in_flight = server->in_flight;
309		spin_unlock(&server->req_lock);
310
311		trace_smb3_reconnect_detected(server->CurrentMid,
312			server->conn_id, server->hostname, scredits,
313			credits->value - new_val, in_flight);
314		cifs_server_dbg(VFS, "trying to return %d credits to old session\n",
315			 credits->value - new_val);
316		return -EAGAIN;
317	}
318
319	server->credits += credits->value - new_val;
320	scredits = server->credits;
321	in_flight = server->in_flight;
322	spin_unlock(&server->req_lock);
323	wake_up(&server->request_q);
324
325	trace_smb3_adj_credits(server->CurrentMid,
326			server->conn_id, server->hostname, scredits,
327			credits->value - new_val, in_flight);
328	cifs_dbg(FYI, "%s: adjust added %u credits total=%d\n",
329			__func__, credits->value - new_val, scredits);
330
331	credits->value = new_val;
332
333	return 0;
334}
335
336static __u64
337smb2_get_next_mid(struct TCP_Server_Info *server)
338{
339	__u64 mid;
340	/* for SMB2 we need the current value */
341	spin_lock(&server->mid_lock);
342	mid = server->CurrentMid++;
343	spin_unlock(&server->mid_lock);
344	return mid;
345}
346
347static void
348smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
349{
350	spin_lock(&server->mid_lock);
351	if (server->CurrentMid >= val)
352		server->CurrentMid -= val;
353	spin_unlock(&server->mid_lock);
354}
355
356static struct mid_q_entry *
357__smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue)
358{
359	struct mid_q_entry *mid;
360	struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
361	__u64 wire_mid = le64_to_cpu(shdr->MessageId);
362
363	if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
364		cifs_server_dbg(VFS, "Encrypted frame parsing not supported yet\n");
365		return NULL;
366	}
367
368	spin_lock(&server->mid_lock);
369	list_for_each_entry(mid, &server->pending_mid_q, qhead) {
370		if ((mid->mid == wire_mid) &&
371		    (mid->mid_state == MID_REQUEST_SUBMITTED) &&
372		    (mid->command == shdr->Command)) {
373			kref_get(&mid->refcount);
374			if (dequeue) {
375				list_del_init(&mid->qhead);
376				mid->mid_flags |= MID_DELETED;
377			}
378			spin_unlock(&server->mid_lock);
379			return mid;
380		}
381	}
382	spin_unlock(&server->mid_lock);
383	return NULL;
384}
385
386static struct mid_q_entry *
387smb2_find_mid(struct TCP_Server_Info *server, char *buf)
388{
389	return __smb2_find_mid(server, buf, false);
390}
391
392static struct mid_q_entry *
393smb2_find_dequeue_mid(struct TCP_Server_Info *server, char *buf)
394{
395	return __smb2_find_mid(server, buf, true);
396}
397
398static void
399smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
400{
401#ifdef CONFIG_CIFS_DEBUG2
402	struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
403
404	cifs_server_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d\n",
405		 shdr->Command, shdr->Status, shdr->Flags, shdr->MessageId,
406		 shdr->Id.SyncId.ProcessId);
407	if (!server->ops->check_message(buf, server->total_read, server)) {
408		cifs_server_dbg(VFS, "smb buf %p len %u\n", buf,
409				server->ops->calc_smb_size(buf));
410	}
411#endif
412}
413
414static bool
415smb2_need_neg(struct TCP_Server_Info *server)
416{
417	return server->max_read == 0;
418}
419
420static int
421smb2_negotiate(const unsigned int xid,
422	       struct cifs_ses *ses,
423	       struct TCP_Server_Info *server)
424{
425	int rc;
426
427	spin_lock(&server->mid_lock);
428	server->CurrentMid = 0;
429	spin_unlock(&server->mid_lock);
430	rc = SMB2_negotiate(xid, ses, server);
431	/* BB we probably don't need to retry with modern servers */
432	if (rc == -EAGAIN)
433		rc = -EHOSTDOWN;
434	return rc;
435}
436
437static unsigned int
438smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
439{
440	struct TCP_Server_Info *server = tcon->ses->server;
441	unsigned int wsize;
442
443	/* start with specified wsize, or default */
444	wsize = ctx->wsize ? ctx->wsize : CIFS_DEFAULT_IOSIZE;
445	wsize = min_t(unsigned int, wsize, server->max_write);
446	if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
447		wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
448
449	return wsize;
450}
451
452static unsigned int
453smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
454{
455	struct TCP_Server_Info *server = tcon->ses->server;
456	unsigned int wsize;
457
458	/* start with specified wsize, or default */
459	wsize = ctx->wsize ? ctx->wsize : SMB3_DEFAULT_IOSIZE;
460	wsize = min_t(unsigned int, wsize, server->max_write);
461#ifdef CONFIG_CIFS_SMB_DIRECT
462	if (server->rdma) {
463		if (server->sign)
464			/*
465			 * Account for SMB2 data transfer packet header and
466			 * possible encryption header
467			 */
468			wsize = min_t(unsigned int,
469				wsize,
470				server->smbd_conn->max_fragmented_send_size -
471					SMB2_READWRITE_PDU_HEADER_SIZE -
472					sizeof(struct smb2_transform_hdr));
473		else
474			wsize = min_t(unsigned int,
475				wsize, server->smbd_conn->max_readwrite_size);
476	}
477#endif
478	if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
479		wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
480
481	return wsize;
482}
483
484static unsigned int
485smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
486{
487	struct TCP_Server_Info *server = tcon->ses->server;
488	unsigned int rsize;
489
490	/* start with specified rsize, or default */
491	rsize = ctx->rsize ? ctx->rsize : CIFS_DEFAULT_IOSIZE;
492	rsize = min_t(unsigned int, rsize, server->max_read);
493
494	if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
495		rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
496
497	return rsize;
498}
499
500static unsigned int
501smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
502{
503	struct TCP_Server_Info *server = tcon->ses->server;
504	unsigned int rsize;
505
506	/* start with specified rsize, or default */
507	rsize = ctx->rsize ? ctx->rsize : SMB3_DEFAULT_IOSIZE;
508	rsize = min_t(unsigned int, rsize, server->max_read);
509#ifdef CONFIG_CIFS_SMB_DIRECT
510	if (server->rdma) {
511		if (server->sign)
512			/*
513			 * Account for SMB2 data transfer packet header and
514			 * possible encryption header
515			 */
516			rsize = min_t(unsigned int,
517				rsize,
518				server->smbd_conn->max_fragmented_recv_size -
519					SMB2_READWRITE_PDU_HEADER_SIZE -
520					sizeof(struct smb2_transform_hdr));
521		else
522			rsize = min_t(unsigned int,
523				rsize, server->smbd_conn->max_readwrite_size);
524	}
525#endif
526
527	if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
528		rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
529
530	return rsize;
531}
532
533/*
534 * compare two interfaces a and b
535 * return 0 if everything matches.
536 * return 1 if a is rdma capable, or rss capable, or has higher link speed
537 * return -1 otherwise.
538 */
539static int
540iface_cmp(struct cifs_server_iface *a, struct cifs_server_iface *b)
541{
542	int cmp_ret = 0;
543
544	WARN_ON(!a || !b);
545	if (a->rdma_capable == b->rdma_capable) {
546		if (a->rss_capable == b->rss_capable) {
547			if (a->speed == b->speed) {
548				cmp_ret = cifs_ipaddr_cmp((struct sockaddr *) &a->sockaddr,
549							  (struct sockaddr *) &b->sockaddr);
550				if (!cmp_ret)
551					return 0;
552				else if (cmp_ret > 0)
553					return 1;
554				else
555					return -1;
556			} else if (a->speed > b->speed)
557				return 1;
558			else
559				return -1;
560		} else if (a->rss_capable > b->rss_capable)
561			return 1;
562		else
563			return -1;
564	} else if (a->rdma_capable > b->rdma_capable)
565		return 1;
566	else
567		return -1;
568}
569
570static int
571parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
572			size_t buf_len, struct cifs_ses *ses, bool in_mount)
573{
574	struct network_interface_info_ioctl_rsp *p;
575	struct sockaddr_in *addr4;
576	struct sockaddr_in6 *addr6;
577	struct iface_info_ipv4 *p4;
578	struct iface_info_ipv6 *p6;
579	struct cifs_server_iface *info = NULL, *iface = NULL, *niface = NULL;
580	struct cifs_server_iface tmp_iface;
581	ssize_t bytes_left;
582	size_t next = 0;
583	int nb_iface = 0;
584	int rc = 0, ret = 0;
585
586	bytes_left = buf_len;
587	p = buf;
588
589	spin_lock(&ses->iface_lock);
590	/* do not query too frequently, this time with lock held */
591	if (ses->iface_last_update &&
592	    time_before(jiffies, ses->iface_last_update +
593			(SMB_INTERFACE_POLL_INTERVAL * HZ))) {
594		spin_unlock(&ses->iface_lock);
595		return 0;
596	}
597
598	/*
599	 * Go through iface_list and mark them as inactive
600	 */
601	list_for_each_entry_safe(iface, niface, &ses->iface_list,
602				 iface_head)
603		iface->is_active = 0;
604
605	spin_unlock(&ses->iface_lock);
606
607	/*
608	 * Samba server e.g. can return an empty interface list in some cases,
609	 * which would only be a problem if we were requesting multichannel
610	 */
611	if (bytes_left == 0) {
612		/* avoid spamming logs every 10 minutes, so log only in mount */
613		if ((ses->chan_max > 1) && in_mount)
614			cifs_dbg(VFS,
615				 "multichannel not available\n"
616				 "Empty network interface list returned by server %s\n",
617				 ses->server->hostname);
618		rc = -EOPNOTSUPP;
619		ses->iface_last_update = jiffies;
620		goto out;
621	}
622
623	while (bytes_left >= (ssize_t)sizeof(*p)) {
624		memset(&tmp_iface, 0, sizeof(tmp_iface));
625		tmp_iface.speed = le64_to_cpu(p->LinkSpeed);
626		tmp_iface.rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0;
627		tmp_iface.rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0;
628
629		switch (p->Family) {
630		/*
631		 * The kernel and wire socket structures have the same
632		 * layout and use network byte order but make the
633		 * conversion explicit in case either one changes.
634		 */
635		case INTERNETWORK:
636			addr4 = (struct sockaddr_in *)&tmp_iface.sockaddr;
637			p4 = (struct iface_info_ipv4 *)p->Buffer;
638			addr4->sin_family = AF_INET;
639			memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
640
641			/* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
642			addr4->sin_port = cpu_to_be16(CIFS_PORT);
643
644			cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
645				 &addr4->sin_addr);
646			break;
647		case INTERNETWORKV6:
648			addr6 =	(struct sockaddr_in6 *)&tmp_iface.sockaddr;
649			p6 = (struct iface_info_ipv6 *)p->Buffer;
650			addr6->sin6_family = AF_INET6;
651			memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16);
652
653			/* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
654			addr6->sin6_flowinfo = 0;
655			addr6->sin6_scope_id = 0;
656			addr6->sin6_port = cpu_to_be16(CIFS_PORT);
657
658			cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
659				 &addr6->sin6_addr);
660			break;
661		default:
662			cifs_dbg(VFS,
663				 "%s: skipping unsupported socket family\n",
664				 __func__);
665			goto next_iface;
666		}
667
668		/*
669		 * The iface_list is assumed to be sorted by speed.
670		 * Check if the new interface exists in that list.
671		 * NEVER change iface. it could be in use.
672		 * Add a new one instead
673		 */
674		spin_lock(&ses->iface_lock);
675		list_for_each_entry_safe(iface, niface, &ses->iface_list,
676					 iface_head) {
677			ret = iface_cmp(iface, &tmp_iface);
678			if (!ret) {
679				iface->is_active = 1;
680				spin_unlock(&ses->iface_lock);
681				goto next_iface;
682			} else if (ret < 0) {
683				/* all remaining ifaces are slower */
684				kref_get(&iface->refcount);
685				break;
686			}
687		}
688		spin_unlock(&ses->iface_lock);
689
690		/* no match. insert the entry in the list */
691		info = kmalloc(sizeof(struct cifs_server_iface),
692			       GFP_KERNEL);
693		if (!info) {
694			rc = -ENOMEM;
695			goto out;
696		}
697		memcpy(info, &tmp_iface, sizeof(tmp_iface));
698
699		/* add this new entry to the list */
700		kref_init(&info->refcount);
701		info->is_active = 1;
702
703		cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, ses->iface_count);
704		cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
705		cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
706			 le32_to_cpu(p->Capability));
707
708		spin_lock(&ses->iface_lock);
709		if (!list_entry_is_head(iface, &ses->iface_list, iface_head)) {
710			list_add_tail(&info->iface_head, &iface->iface_head);
711			kref_put(&iface->refcount, release_iface);
712		} else
713			list_add_tail(&info->iface_head, &ses->iface_list);
714
715		ses->iface_count++;
716		spin_unlock(&ses->iface_lock);
717next_iface:
718		nb_iface++;
719		next = le32_to_cpu(p->Next);
720		if (!next) {
721			bytes_left -= sizeof(*p);
722			break;
723		}
724		p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
725		bytes_left -= next;
726	}
727
728	if (!nb_iface) {
729		cifs_dbg(VFS, "%s: malformed interface info\n", __func__);
730		rc = -EINVAL;
731		goto out;
732	}
733
734	/* Azure rounds the buffer size up 8, to a 16 byte boundary */
735	if ((bytes_left > 8) || p->Next)
736		cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
737
738	ses->iface_last_update = jiffies;
739
740out:
741	/*
742	 * Go through the list again and put the inactive entries
743	 */
744	spin_lock(&ses->iface_lock);
745	list_for_each_entry_safe(iface, niface, &ses->iface_list,
746				 iface_head) {
747		if (!iface->is_active) {
748			list_del(&iface->iface_head);
749			kref_put(&iface->refcount, release_iface);
750			ses->iface_count--;
751		}
752	}
753	spin_unlock(&ses->iface_lock);
754
755	return rc;
756}
757
758int
759SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_mount)
760{
761	int rc;
762	unsigned int ret_data_len = 0;
763	struct network_interface_info_ioctl_rsp *out_buf = NULL;
764	struct cifs_ses *ses = tcon->ses;
765	struct TCP_Server_Info *pserver;
766
767	/* do not query too frequently */
768	if (ses->iface_last_update &&
769	    time_before(jiffies, ses->iface_last_update +
770			(SMB_INTERFACE_POLL_INTERVAL * HZ)))
771		return 0;
772
773	rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
774			FSCTL_QUERY_NETWORK_INTERFACE_INFO,
775			NULL /* no data input */, 0 /* no data input */,
776			CIFSMaxBufSize, (char **)&out_buf, &ret_data_len);
777	if (rc == -EOPNOTSUPP) {
778		cifs_dbg(FYI,
779			 "server does not support query network interfaces\n");
780		ret_data_len = 0;
781	} else if (rc != 0) {
782		cifs_tcon_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
783		goto out;
784	}
785
786	rc = parse_server_interfaces(out_buf, ret_data_len, ses, in_mount);
787	if (rc)
788		goto out;
789
790	/* check if iface is still active */
791	spin_lock(&ses->chan_lock);
792	pserver = ses->chans[0].server;
793	if (pserver && !cifs_chan_is_iface_active(ses, pserver)) {
794		spin_unlock(&ses->chan_lock);
795		cifs_chan_update_iface(ses, pserver);
796		spin_lock(&ses->chan_lock);
797	}
798	spin_unlock(&ses->chan_lock);
799
800out:
801	kfree(out_buf);
802	return rc;
803}
804
805static void
806smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
807	      struct cifs_sb_info *cifs_sb)
808{
809	int rc;
810	__le16 srch_path = 0; /* Null - open root of share */
811	u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
812	struct cifs_open_parms oparms;
813	struct cifs_fid fid;
814	struct cached_fid *cfid = NULL;
815
816	oparms = (struct cifs_open_parms) {
817		.tcon = tcon,
818		.path = "",
819		.desired_access = FILE_READ_ATTRIBUTES,
820		.disposition = FILE_OPEN,
821		.create_options = cifs_create_options(cifs_sb, 0),
822		.fid = &fid,
823	};
824
825	rc = open_cached_dir(xid, tcon, "", cifs_sb, false, &cfid);
826	if (rc == 0)
827		memcpy(&fid, &cfid->fid, sizeof(struct cifs_fid));
828	else
829		rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
830			       NULL, NULL);
831	if (rc)
832		return;
833
834	SMB3_request_interfaces(xid, tcon, true /* called during  mount */);
835
836	SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
837			FS_ATTRIBUTE_INFORMATION);
838	SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
839			FS_DEVICE_INFORMATION);
840	SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
841			FS_VOLUME_INFORMATION);
842	SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
843			FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */
844	if (cfid == NULL)
845		SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
846	else
847		close_cached_dir(cfid);
848}
849
850static void
851smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
852	      struct cifs_sb_info *cifs_sb)
853{
854	int rc;
855	__le16 srch_path = 0; /* Null - open root of share */
856	u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
857	struct cifs_open_parms oparms;
858	struct cifs_fid fid;
859
860	oparms = (struct cifs_open_parms) {
861		.tcon = tcon,
862		.path = "",
863		.desired_access = FILE_READ_ATTRIBUTES,
864		.disposition = FILE_OPEN,
865		.create_options = cifs_create_options(cifs_sb, 0),
866		.fid = &fid,
867	};
868
869	rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
870		       NULL, NULL);
871	if (rc)
872		return;
873
874	SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
875			FS_ATTRIBUTE_INFORMATION);
876	SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
877			FS_DEVICE_INFORMATION);
878	SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
879}
880
881static int
882smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
883			struct cifs_sb_info *cifs_sb, const char *full_path)
884{
885	__le16 *utf16_path;
886	__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
887	int err_buftype = CIFS_NO_BUFFER;
888	struct cifs_open_parms oparms;
889	struct kvec err_iov = {};
890	struct cifs_fid fid;
891	struct cached_fid *cfid;
892	bool islink;
893	int rc, rc2;
894
895	rc = open_cached_dir(xid, tcon, full_path, cifs_sb, true, &cfid);
896	if (!rc) {
897		if (cfid->has_lease) {
898			close_cached_dir(cfid);
899			return 0;
900		}
901		close_cached_dir(cfid);
902	}
903
904	utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
905	if (!utf16_path)
906		return -ENOMEM;
907
908	oparms = (struct cifs_open_parms) {
909		.tcon = tcon,
910		.path = full_path,
911		.desired_access = FILE_READ_ATTRIBUTES,
912		.disposition = FILE_OPEN,
913		.create_options = cifs_create_options(cifs_sb, 0),
914		.fid = &fid,
915	};
916
917	rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
918		       &err_iov, &err_buftype);
919	if (rc) {
920		struct smb2_hdr *hdr = err_iov.iov_base;
921
922		if (unlikely(!hdr || err_buftype == CIFS_NO_BUFFER))
923			goto out;
924
925		if (rc != -EREMOTE && hdr->Status == STATUS_OBJECT_NAME_INVALID) {
926			rc2 = cifs_inval_name_dfs_link_error(xid, tcon, cifs_sb,
927							     full_path, &islink);
928			if (rc2) {
929				rc = rc2;
930				goto out;
931			}
932			if (islink)
933				rc = -EREMOTE;
934		}
935		if (rc == -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) && cifs_sb &&
936		    (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS))
937			rc = -EOPNOTSUPP;
938		goto out;
939	}
940
941	rc = SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
942
943out:
944	free_rsp_buf(err_buftype, err_iov.iov_base);
945	kfree(utf16_path);
946	return rc;
947}
948
949static int smb2_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
950			     struct cifs_sb_info *cifs_sb, const char *full_path,
951			     u64 *uniqueid, struct cifs_open_info_data *data)
952{
953	*uniqueid = le64_to_cpu(data->fi.IndexNumber);
954	return 0;
955}
956
957static int smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
958				struct cifsFileInfo *cfile, struct cifs_open_info_data *data)
959{
960	struct cifs_fid *fid = &cfile->fid;
961
962	if (cfile->symlink_target) {
963		data->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL);
964		if (!data->symlink_target)
965			return -ENOMEM;
966	}
967	return SMB2_query_info(xid, tcon, fid->persistent_fid, fid->volatile_fid, &data->fi);
968}
969
970#ifdef CONFIG_CIFS_XATTR
971static ssize_t
972move_smb2_ea_to_cifs(char *dst, size_t dst_size,
973		     struct smb2_file_full_ea_info *src, size_t src_size,
974		     const unsigned char *ea_name)
975{
976	int rc = 0;
977	unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
978	char *name, *value;
979	size_t buf_size = dst_size;
980	size_t name_len, value_len, user_name_len;
981
982	while (src_size > 0) {
983		name_len = (size_t)src->ea_name_length;
984		value_len = (size_t)le16_to_cpu(src->ea_value_length);
985
986		if (name_len == 0)
987			break;
988
989		if (src_size < 8 + name_len + 1 + value_len) {
990			cifs_dbg(FYI, "EA entry goes beyond length of list\n");
991			rc = -EIO;
992			goto out;
993		}
994
995		name = &src->ea_data[0];
996		value = &src->ea_data[src->ea_name_length + 1];
997
998		if (ea_name) {
999			if (ea_name_len == name_len &&
1000			    memcmp(ea_name, name, name_len) == 0) {
1001				rc = value_len;
1002				if (dst_size == 0)
1003					goto out;
1004				if (dst_size < value_len) {
1005					rc = -ERANGE;
1006					goto out;
1007				}
1008				memcpy(dst, value, value_len);
1009				goto out;
1010			}
1011		} else {
1012			/* 'user.' plus a terminating null */
1013			user_name_len = 5 + 1 + name_len;
1014
1015			if (buf_size == 0) {
1016				/* skip copy - calc size only */
1017				rc += user_name_len;
1018			} else if (dst_size >= user_name_len) {
1019				dst_size -= user_name_len;
1020				memcpy(dst, "user.", 5);
1021				dst += 5;
1022				memcpy(dst, src->ea_data, name_len);
1023				dst += name_len;
1024				*dst = 0;
1025				++dst;
1026				rc += user_name_len;
1027			} else {
1028				/* stop before overrun buffer */
1029				rc = -ERANGE;
1030				break;
1031			}
1032		}
1033
1034		if (!src->next_entry_offset)
1035			break;
1036
1037		if (src_size < le32_to_cpu(src->next_entry_offset)) {
1038			/* stop before overrun buffer */
1039			rc = -ERANGE;
1040			break;
1041		}
1042		src_size -= le32_to_cpu(src->next_entry_offset);
1043		src = (void *)((char *)src +
1044			       le32_to_cpu(src->next_entry_offset));
1045	}
1046
1047	/* didn't find the named attribute */
1048	if (ea_name)
1049		rc = -ENODATA;
1050
1051out:
1052	return (ssize_t)rc;
1053}
1054
1055static ssize_t
1056smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
1057	       const unsigned char *path, const unsigned char *ea_name,
1058	       char *ea_data, size_t buf_size,
1059	       struct cifs_sb_info *cifs_sb)
1060{
1061	int rc;
1062	struct kvec rsp_iov = {NULL, 0};
1063	int buftype = CIFS_NO_BUFFER;
1064	struct smb2_query_info_rsp *rsp;
1065	struct smb2_file_full_ea_info *info = NULL;
1066
1067	rc = smb2_query_info_compound(xid, tcon, path,
1068				      FILE_READ_EA,
1069				      FILE_FULL_EA_INFORMATION,
1070				      SMB2_O_INFO_FILE,
1071				      CIFSMaxBufSize -
1072				      MAX_SMB2_CREATE_RESPONSE_SIZE -
1073				      MAX_SMB2_CLOSE_RESPONSE_SIZE,
1074				      &rsp_iov, &buftype, cifs_sb);
1075	if (rc) {
1076		/*
1077		 * If ea_name is NULL (listxattr) and there are no EAs,
1078		 * return 0 as it's not an error. Otherwise, the specified
1079		 * ea_name was not found.
1080		 */
1081		if (!ea_name && rc == -ENODATA)
1082			rc = 0;
1083		goto qeas_exit;
1084	}
1085
1086	rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
1087	rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
1088			       le32_to_cpu(rsp->OutputBufferLength),
1089			       &rsp_iov,
1090			       sizeof(struct smb2_file_full_ea_info));
1091	if (rc)
1092		goto qeas_exit;
1093
1094	info = (struct smb2_file_full_ea_info *)(
1095			le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
1096	rc = move_smb2_ea_to_cifs(ea_data, buf_size, info,
1097			le32_to_cpu(rsp->OutputBufferLength), ea_name);
1098
1099 qeas_exit:
1100	free_rsp_buf(buftype, rsp_iov.iov_base);
1101	return rc;
1102}
1103
1104static int
1105smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
1106	    const char *path, const char *ea_name, const void *ea_value,
1107	    const __u16 ea_value_len, const struct nls_table *nls_codepage,
1108	    struct cifs_sb_info *cifs_sb)
1109{
1110	struct smb2_compound_vars *vars;
1111	struct cifs_ses *ses = tcon->ses;
1112	struct TCP_Server_Info *server;
1113	struct smb_rqst *rqst;
1114	struct kvec *rsp_iov;
1115	__le16 *utf16_path = NULL;
1116	int ea_name_len = strlen(ea_name);
1117	int flags = CIFS_CP_CREATE_CLOSE_OP;
1118	int len;
1119	int resp_buftype[3];
1120	struct cifs_open_parms oparms;
1121	__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1122	struct cifs_fid fid;
1123	unsigned int size[1];
1124	void *data[1];
1125	struct smb2_file_full_ea_info *ea = NULL;
1126	struct smb2_query_info_rsp *rsp;
1127	int rc, used_len = 0;
1128	int retries = 0, cur_sleep = 1;
1129
1130replay_again:
1131	/* reinitialize for possible replay */
1132	flags = CIFS_CP_CREATE_CLOSE_OP;
1133	oplock = SMB2_OPLOCK_LEVEL_NONE;
1134	server = cifs_pick_channel(ses);
1135
1136	if (smb3_encryption_required(tcon))
1137		flags |= CIFS_TRANSFORM_REQ;
1138
1139	if (ea_name_len > 255)
1140		return -EINVAL;
1141
1142	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
1143	if (!utf16_path)
1144		return -ENOMEM;
1145
1146	resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1147	vars = kzalloc(sizeof(*vars), GFP_KERNEL);
1148	if (!vars) {
1149		rc = -ENOMEM;
1150		goto out_free_path;
1151	}
1152	rqst = vars->rqst;
1153	rsp_iov = vars->rsp_iov;
1154
1155	if (ses->server->ops->query_all_EAs) {
1156		if (!ea_value) {
1157			rc = ses->server->ops->query_all_EAs(xid, tcon, path,
1158							     ea_name, NULL, 0,
1159							     cifs_sb);
1160			if (rc == -ENODATA)
1161				goto sea_exit;
1162		} else {
1163			/* If we are adding a attribute we should first check
1164			 * if there will be enough space available to store
1165			 * the new EA. If not we should not add it since we
1166			 * would not be able to even read the EAs back.
1167			 */
1168			rc = smb2_query_info_compound(xid, tcon, path,
1169				      FILE_READ_EA,
1170				      FILE_FULL_EA_INFORMATION,
1171				      SMB2_O_INFO_FILE,
1172				      CIFSMaxBufSize -
1173				      MAX_SMB2_CREATE_RESPONSE_SIZE -
1174				      MAX_SMB2_CLOSE_RESPONSE_SIZE,
1175				      &rsp_iov[1], &resp_buftype[1], cifs_sb);
1176			if (rc == 0) {
1177				rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
1178				used_len = le32_to_cpu(rsp->OutputBufferLength);
1179			}
1180			free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1181			resp_buftype[1] = CIFS_NO_BUFFER;
1182			memset(&rsp_iov[1], 0, sizeof(rsp_iov[1]));
1183			rc = 0;
1184
1185			/* Use a fudge factor of 256 bytes in case we collide
1186			 * with a different set_EAs command.
1187			 */
1188			if (CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
1189			   MAX_SMB2_CLOSE_RESPONSE_SIZE - 256 <
1190			   used_len + ea_name_len + ea_value_len + 1) {
1191				rc = -ENOSPC;
1192				goto sea_exit;
1193			}
1194		}
1195	}
1196
1197	/* Open */
1198	rqst[0].rq_iov = vars->open_iov;
1199	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
1200
1201	oparms = (struct cifs_open_parms) {
1202		.tcon = tcon,
1203		.path = path,
1204		.desired_access = FILE_WRITE_EA,
1205		.disposition = FILE_OPEN,
1206		.create_options = cifs_create_options(cifs_sb, 0),
1207		.fid = &fid,
1208		.replay = !!(retries),
1209	};
1210
1211	rc = SMB2_open_init(tcon, server,
1212			    &rqst[0], &oplock, &oparms, utf16_path);
1213	if (rc)
1214		goto sea_exit;
1215	smb2_set_next_command(tcon, &rqst[0]);
1216
1217
1218	/* Set Info */
1219	rqst[1].rq_iov = vars->si_iov;
1220	rqst[1].rq_nvec = 1;
1221
1222	len = sizeof(*ea) + ea_name_len + ea_value_len + 1;
1223	ea = kzalloc(len, GFP_KERNEL);
1224	if (ea == NULL) {
1225		rc = -ENOMEM;
1226		goto sea_exit;
1227	}
1228
1229	ea->ea_name_length = ea_name_len;
1230	ea->ea_value_length = cpu_to_le16(ea_value_len);
1231	memcpy(ea->ea_data, ea_name, ea_name_len + 1);
1232	memcpy(ea->ea_data + ea_name_len + 1, ea_value, ea_value_len);
1233
1234	size[0] = len;
1235	data[0] = ea;
1236
1237	rc = SMB2_set_info_init(tcon, server,
1238				&rqst[1], COMPOUND_FID,
1239				COMPOUND_FID, current->tgid,
1240				FILE_FULL_EA_INFORMATION,
1241				SMB2_O_INFO_FILE, 0, data, size);
1242	if (rc)
1243		goto sea_exit;
1244	smb2_set_next_command(tcon, &rqst[1]);
1245	smb2_set_related(&rqst[1]);
1246
1247	/* Close */
1248	rqst[2].rq_iov = &vars->close_iov;
1249	rqst[2].rq_nvec = 1;
1250	rc = SMB2_close_init(tcon, server,
1251			     &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
1252	if (rc)
1253		goto sea_exit;
1254	smb2_set_related(&rqst[2]);
1255
1256	if (retries) {
1257		smb2_set_replay(server, &rqst[0]);
1258		smb2_set_replay(server, &rqst[1]);
1259		smb2_set_replay(server, &rqst[2]);
1260	}
1261
1262	rc = compound_send_recv(xid, ses, server,
1263				flags, 3, rqst,
1264				resp_buftype, rsp_iov);
1265	/* no need to bump num_remote_opens because handle immediately closed */
1266
1267 sea_exit:
1268	kfree(ea);
1269	SMB2_open_free(&rqst[0]);
1270	SMB2_set_info_free(&rqst[1]);
1271	SMB2_close_free(&rqst[2]);
1272	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1273	free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1274	free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
1275	kfree(vars);
1276out_free_path:
1277	kfree(utf16_path);
1278
1279	if (is_replayable_error(rc) &&
1280	    smb2_should_replay(tcon, &retries, &cur_sleep))
1281		goto replay_again;
1282
1283	return rc;
1284}
1285#endif
1286
1287static bool
1288smb2_can_echo(struct TCP_Server_Info *server)
1289{
1290	return server->echoes;
1291}
1292
1293static void
1294smb2_clear_stats(struct cifs_tcon *tcon)
1295{
1296	int i;
1297
1298	for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
1299		atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
1300		atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
1301	}
1302}
1303
1304static void
1305smb2_dump_share_caps(struct seq_file *m, struct cifs_tcon *tcon)
1306{
1307	seq_puts(m, "\n\tShare Capabilities:");
1308	if (tcon->capabilities & SMB2_SHARE_CAP_DFS)
1309		seq_puts(m, " DFS,");
1310	if (tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY)
1311		seq_puts(m, " CONTINUOUS AVAILABILITY,");
1312	if (tcon->capabilities & SMB2_SHARE_CAP_SCALEOUT)
1313		seq_puts(m, " SCALEOUT,");
1314	if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER)
1315		seq_puts(m, " CLUSTER,");
1316	if (tcon->capabilities & SMB2_SHARE_CAP_ASYMMETRIC)
1317		seq_puts(m, " ASYMMETRIC,");
1318	if (tcon->capabilities == 0)
1319		seq_puts(m, " None");
1320	if (tcon->ss_flags & SSINFO_FLAGS_ALIGNED_DEVICE)
1321		seq_puts(m, " Aligned,");
1322	if (tcon->ss_flags & SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE)
1323		seq_puts(m, " Partition Aligned,");
1324	if (tcon->ss_flags & SSINFO_FLAGS_NO_SEEK_PENALTY)
1325		seq_puts(m, " SSD,");
1326	if (tcon->ss_flags & SSINFO_FLAGS_TRIM_ENABLED)
1327		seq_puts(m, " TRIM-support,");
1328
1329	seq_printf(m, "\tShare Flags: 0x%x", tcon->share_flags);
1330	seq_printf(m, "\n\ttid: 0x%x", tcon->tid);
1331	if (tcon->perf_sector_size)
1332		seq_printf(m, "\tOptimal sector size: 0x%x",
1333			   tcon->perf_sector_size);
1334	seq_printf(m, "\tMaximal Access: 0x%x", tcon->maximal_access);
1335}
1336
1337static void
1338smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
1339{
1340	atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
1341	atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
1342
1343	/*
1344	 *  Can't display SMB2_NEGOTIATE, SESSION_SETUP, LOGOFF, CANCEL and ECHO
1345	 *  totals (requests sent) since those SMBs are per-session not per tcon
1346	 */
1347	seq_printf(m, "\nBytes read: %llu  Bytes written: %llu",
1348		   (long long)(tcon->bytes_read),
1349		   (long long)(tcon->bytes_written));
1350	seq_printf(m, "\nOpen files: %d total (local), %d open on server",
1351		   atomic_read(&tcon->num_local_opens),
1352		   atomic_read(&tcon->num_remote_opens));
1353	seq_printf(m, "\nTreeConnects: %d total %d failed",
1354		   atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
1355		   atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
1356	seq_printf(m, "\nTreeDisconnects: %d total %d failed",
1357		   atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
1358		   atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
1359	seq_printf(m, "\nCreates: %d total %d failed",
1360		   atomic_read(&sent[SMB2_CREATE_HE]),
1361		   atomic_read(&failed[SMB2_CREATE_HE]));
1362	seq_printf(m, "\nCloses: %d total %d failed",
1363		   atomic_read(&sent[SMB2_CLOSE_HE]),
1364		   atomic_read(&failed[SMB2_CLOSE_HE]));
1365	seq_printf(m, "\nFlushes: %d total %d failed",
1366		   atomic_read(&sent[SMB2_FLUSH_HE]),
1367		   atomic_read(&failed[SMB2_FLUSH_HE]));
1368	seq_printf(m, "\nReads: %d total %d failed",
1369		   atomic_read(&sent[SMB2_READ_HE]),
1370		   atomic_read(&failed[SMB2_READ_HE]));
1371	seq_printf(m, "\nWrites: %d total %d failed",
1372		   atomic_read(&sent[SMB2_WRITE_HE]),
1373		   atomic_read(&failed[SMB2_WRITE_HE]));
1374	seq_printf(m, "\nLocks: %d total %d failed",
1375		   atomic_read(&sent[SMB2_LOCK_HE]),
1376		   atomic_read(&failed[SMB2_LOCK_HE]));
1377	seq_printf(m, "\nIOCTLs: %d total %d failed",
1378		   atomic_read(&sent[SMB2_IOCTL_HE]),
1379		   atomic_read(&failed[SMB2_IOCTL_HE]));
1380	seq_printf(m, "\nQueryDirectories: %d total %d failed",
1381		   atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
1382		   atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
1383	seq_printf(m, "\nChangeNotifies: %d total %d failed",
1384		   atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
1385		   atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
1386	seq_printf(m, "\nQueryInfos: %d total %d failed",
1387		   atomic_read(&sent[SMB2_QUERY_INFO_HE]),
1388		   atomic_read(&failed[SMB2_QUERY_INFO_HE]));
1389	seq_printf(m, "\nSetInfos: %d total %d failed",
1390		   atomic_read(&sent[SMB2_SET_INFO_HE]),
1391		   atomic_read(&failed[SMB2_SET_INFO_HE]));
1392	seq_printf(m, "\nOplockBreaks: %d sent %d failed",
1393		   atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
1394		   atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
1395}
1396
1397static void
1398smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
1399{
1400	struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1401	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1402
1403	cfile->fid.persistent_fid = fid->persistent_fid;
1404	cfile->fid.volatile_fid = fid->volatile_fid;
1405	cfile->fid.access = fid->access;
1406#ifdef CONFIG_CIFS_DEBUG2
1407	cfile->fid.mid = fid->mid;
1408#endif /* CIFS_DEBUG2 */
1409	server->ops->set_oplock_level(cinode, oplock, fid->epoch,
1410				      &fid->purge_cache);
1411	cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
1412	memcpy(cfile->fid.create_guid, fid->create_guid, 16);
1413}
1414
1415static int
1416smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon,
1417		struct cifs_fid *fid)
1418{
1419	return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
1420}
1421
1422static int
1423smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
1424		   struct cifsFileInfo *cfile)
1425{
1426	struct smb2_file_network_open_info file_inf;
1427	struct inode *inode;
1428	int rc;
1429
1430	rc = __SMB2_close(xid, tcon, cfile->fid.persistent_fid,
1431		   cfile->fid.volatile_fid, &file_inf);
1432	if (rc)
1433		return rc;
1434
1435	inode = d_inode(cfile->dentry);
1436
1437	spin_lock(&inode->i_lock);
1438	CIFS_I(inode)->time = jiffies;
1439
1440	/* Creation time should not need to be updated on close */
1441	if (file_inf.LastWriteTime)
1442		inode_set_mtime_to_ts(inode,
1443				      cifs_NTtimeToUnix(file_inf.LastWriteTime));
1444	if (file_inf.ChangeTime)
1445		inode_set_ctime_to_ts(inode,
1446				      cifs_NTtimeToUnix(file_inf.ChangeTime));
1447	if (file_inf.LastAccessTime)
1448		inode_set_atime_to_ts(inode,
1449				      cifs_NTtimeToUnix(file_inf.LastAccessTime));
1450
1451	/*
1452	 * i_blocks is not related to (i_size / i_blksize),
1453	 * but instead 512 byte (2**9) size is required for
1454	 * calculating num blocks.
1455	 */
1456	if (le64_to_cpu(file_inf.AllocationSize) > 4096)
1457		inode->i_blocks =
1458			(512 - 1 + le64_to_cpu(file_inf.AllocationSize)) >> 9;
1459
1460	/* End of file and Attributes should not have to be updated on close */
1461	spin_unlock(&inode->i_lock);
1462	return rc;
1463}
1464
1465static int
1466SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
1467		     u64 persistent_fid, u64 volatile_fid,
1468		     struct copychunk_ioctl *pcchunk)
1469{
1470	int rc;
1471	unsigned int ret_data_len;
1472	struct resume_key_req *res_key;
1473
1474	rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
1475			FSCTL_SRV_REQUEST_RESUME_KEY, NULL, 0 /* no input */,
1476			CIFSMaxBufSize, (char **)&res_key, &ret_data_len);
1477
1478	if (rc == -EOPNOTSUPP) {
1479		pr_warn_once("Server share %s does not support copy range\n", tcon->tree_name);
1480		goto req_res_key_exit;
1481	} else if (rc) {
1482		cifs_tcon_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc);
1483		goto req_res_key_exit;
1484	}
1485	if (ret_data_len < sizeof(struct resume_key_req)) {
1486		cifs_tcon_dbg(VFS, "Invalid refcopy resume key length\n");
1487		rc = -EINVAL;
1488		goto req_res_key_exit;
1489	}
1490	memcpy(pcchunk->SourceKey, res_key->ResumeKey, COPY_CHUNK_RES_KEY_SIZE);
1491
1492req_res_key_exit:
1493	kfree(res_key);
1494	return rc;
1495}
1496
1497static int
1498smb2_ioctl_query_info(const unsigned int xid,
1499		      struct cifs_tcon *tcon,
1500		      struct cifs_sb_info *cifs_sb,
1501		      __le16 *path, int is_dir,
1502		      unsigned long p)
1503{
1504	struct smb2_compound_vars *vars;
1505	struct smb_rqst *rqst;
1506	struct kvec *rsp_iov;
1507	struct cifs_ses *ses = tcon->ses;
1508	struct TCP_Server_Info *server;
1509	char __user *arg = (char __user *)p;
1510	struct smb_query_info qi;
1511	struct smb_query_info __user *pqi;
1512	int rc = 0;
1513	int flags = CIFS_CP_CREATE_CLOSE_OP;
1514	struct smb2_query_info_rsp *qi_rsp = NULL;
1515	struct smb2_ioctl_rsp *io_rsp = NULL;
1516	void *buffer = NULL;
1517	int resp_buftype[3];
1518	struct cifs_open_parms oparms;
1519	u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
1520	struct cifs_fid fid;
1521	unsigned int size[2];
1522	void *data[2];
1523	int create_options = is_dir ? CREATE_NOT_FILE : CREATE_NOT_DIR;
1524	void (*free_req1_func)(struct smb_rqst *r);
1525	int retries = 0, cur_sleep = 1;
1526
1527replay_again:
1528	/* reinitialize for possible replay */
1529	flags = CIFS_CP_CREATE_CLOSE_OP;
1530	oplock = SMB2_OPLOCK_LEVEL_NONE;
1531	server = cifs_pick_channel(ses);
1532
1533	vars = kzalloc(sizeof(*vars), GFP_ATOMIC);
1534	if (vars == NULL)
1535		return -ENOMEM;
1536	rqst = &vars->rqst[0];
1537	rsp_iov = &vars->rsp_iov[0];
1538
1539	resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
1540
1541	if (copy_from_user(&qi, arg, sizeof(struct smb_query_info))) {
1542		rc = -EFAULT;
1543		goto free_vars;
1544	}
1545	if (qi.output_buffer_length > 1024) {
1546		rc = -EINVAL;
1547		goto free_vars;
1548	}
1549
1550	if (!ses || !server) {
1551		rc = -EIO;
1552		goto free_vars;
1553	}
1554
1555	if (smb3_encryption_required(tcon))
1556		flags |= CIFS_TRANSFORM_REQ;
1557
1558	if (qi.output_buffer_length) {
1559		buffer = memdup_user(arg + sizeof(struct smb_query_info), qi.output_buffer_length);
1560		if (IS_ERR(buffer)) {
1561			rc = PTR_ERR(buffer);
1562			goto free_vars;
1563		}
1564	}
1565
1566	/* Open */
1567	rqst[0].rq_iov = &vars->open_iov[0];
1568	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
1569
1570	oparms = (struct cifs_open_parms) {
1571		.tcon = tcon,
1572		.disposition = FILE_OPEN,
1573		.create_options = cifs_create_options(cifs_sb, create_options),
1574		.fid = &fid,
1575		.replay = !!(retries),
1576	};
1577
1578	if (qi.flags & PASSTHRU_FSCTL) {
1579		switch (qi.info_type & FSCTL_DEVICE_ACCESS_MASK) {
1580		case FSCTL_DEVICE_ACCESS_FILE_READ_WRITE_ACCESS:
1581			oparms.desired_access = FILE_READ_DATA | FILE_WRITE_DATA | FILE_READ_ATTRIBUTES | SYNCHRONIZE;
1582			break;
1583		case FSCTL_DEVICE_ACCESS_FILE_ANY_ACCESS:
1584			oparms.desired_access = GENERIC_ALL;
1585			break;
1586		case FSCTL_DEVICE_ACCESS_FILE_READ_ACCESS:
1587			oparms.desired_access = GENERIC_READ;
1588			break;
1589		case FSCTL_DEVICE_ACCESS_FILE_WRITE_ACCESS:
1590			oparms.desired_access = GENERIC_WRITE;
1591			break;
1592		}
1593	} else if (qi.flags & PASSTHRU_SET_INFO) {
1594		oparms.desired_access = GENERIC_WRITE;
1595	} else {
1596		oparms.desired_access = FILE_READ_ATTRIBUTES | READ_CONTROL;
1597	}
1598
1599	rc = SMB2_open_init(tcon, server,
1600			    &rqst[0], &oplock, &oparms, path);
1601	if (rc)
1602		goto free_output_buffer;
1603	smb2_set_next_command(tcon, &rqst[0]);
1604
1605	/* Query */
1606	if (qi.flags & PASSTHRU_FSCTL) {
1607		/* Can eventually relax perm check since server enforces too */
1608		if (!capable(CAP_SYS_ADMIN)) {
1609			rc = -EPERM;
1610			goto free_open_req;
1611		}
1612		rqst[1].rq_iov = &vars->io_iov[0];
1613		rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
1614
1615		rc = SMB2_ioctl_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID,
1616				     qi.info_type, buffer, qi.output_buffer_length,
1617				     CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
1618				     MAX_SMB2_CLOSE_RESPONSE_SIZE);
1619		free_req1_func = SMB2_ioctl_free;
1620	} else if (qi.flags == PASSTHRU_SET_INFO) {
1621		/* Can eventually relax perm check since server enforces too */
1622		if (!capable(CAP_SYS_ADMIN)) {
1623			rc = -EPERM;
1624			goto free_open_req;
1625		}
1626		if (qi.output_buffer_length < 8) {
1627			rc = -EINVAL;
1628			goto free_open_req;
1629		}
1630		rqst[1].rq_iov = vars->si_iov;
1631		rqst[1].rq_nvec = 1;
1632
1633		/* MS-FSCC 2.4.13 FileEndOfFileInformation */
1634		size[0] = 8;
1635		data[0] = buffer;
1636
1637		rc = SMB2_set_info_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID,
1638					current->tgid, FILE_END_OF_FILE_INFORMATION,
1639					SMB2_O_INFO_FILE, 0, data, size);
1640		free_req1_func = SMB2_set_info_free;
1641	} else if (qi.flags == PASSTHRU_QUERY_INFO) {
1642		rqst[1].rq_iov = &vars->qi_iov;
1643		rqst[1].rq_nvec = 1;
1644
1645		rc = SMB2_query_info_init(tcon, server,
1646				  &rqst[1], COMPOUND_FID,
1647				  COMPOUND_FID, qi.file_info_class,
1648				  qi.info_type, qi.additional_information,
1649				  qi.input_buffer_length,
1650				  qi.output_buffer_length, buffer);
1651		free_req1_func = SMB2_query_info_free;
1652	} else { /* unknown flags */
1653		cifs_tcon_dbg(VFS, "Invalid passthru query flags: 0x%x\n",
1654			      qi.flags);
1655		rc = -EINVAL;
1656	}
1657
1658	if (rc)
1659		goto free_open_req;
1660	smb2_set_next_command(tcon, &rqst[1]);
1661	smb2_set_related(&rqst[1]);
1662
1663	/* Close */
1664	rqst[2].rq_iov = &vars->close_iov;
1665	rqst[2].rq_nvec = 1;
1666
1667	rc = SMB2_close_init(tcon, server,
1668			     &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
1669	if (rc)
1670		goto free_req_1;
1671	smb2_set_related(&rqst[2]);
1672
1673	if (retries) {
1674		smb2_set_replay(server, &rqst[0]);
1675		smb2_set_replay(server, &rqst[1]);
1676		smb2_set_replay(server, &rqst[2]);
1677	}
1678
1679	rc = compound_send_recv(xid, ses, server,
1680				flags, 3, rqst,
1681				resp_buftype, rsp_iov);
1682	if (rc)
1683		goto out;
1684
1685	/* No need to bump num_remote_opens since handle immediately closed */
1686	if (qi.flags & PASSTHRU_FSCTL) {
1687		pqi = (struct smb_query_info __user *)arg;
1688		io_rsp = (struct smb2_ioctl_rsp *)rsp_iov[1].iov_base;
1689		if (le32_to_cpu(io_rsp->OutputCount) < qi.input_buffer_length)
1690			qi.input_buffer_length = le32_to_cpu(io_rsp->OutputCount);
1691		if (qi.input_buffer_length > 0 &&
1692		    le32_to_cpu(io_rsp->OutputOffset) + qi.input_buffer_length
1693		    > rsp_iov[1].iov_len) {
1694			rc = -EFAULT;
1695			goto out;
1696		}
1697
1698		if (copy_to_user(&pqi->input_buffer_length,
1699				 &qi.input_buffer_length,
1700				 sizeof(qi.input_buffer_length))) {
1701			rc = -EFAULT;
1702			goto out;
1703		}
1704
1705		if (copy_to_user((void __user *)pqi + sizeof(struct smb_query_info),
1706				 (const void *)io_rsp + le32_to_cpu(io_rsp->OutputOffset),
1707				 qi.input_buffer_length))
1708			rc = -EFAULT;
1709	} else {
1710		pqi = (struct smb_query_info __user *)arg;
1711		qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
1712		if (le32_to_cpu(qi_rsp->OutputBufferLength) < qi.input_buffer_length)
1713			qi.input_buffer_length = le32_to_cpu(qi_rsp->OutputBufferLength);
1714		if (copy_to_user(&pqi->input_buffer_length,
1715				 &qi.input_buffer_length,
1716				 sizeof(qi.input_buffer_length))) {
1717			rc = -EFAULT;
1718			goto out;
1719		}
1720
1721		if (copy_to_user(pqi + 1, qi_rsp->Buffer,
1722				 qi.input_buffer_length))
1723			rc = -EFAULT;
1724	}
1725
1726out:
1727	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
1728	free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
1729	free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
1730	SMB2_close_free(&rqst[2]);
1731free_req_1:
1732	free_req1_func(&rqst[1]);
1733free_open_req:
1734	SMB2_open_free(&rqst[0]);
1735free_output_buffer:
1736	kfree(buffer);
1737free_vars:
1738	kfree(vars);
1739
1740	if (is_replayable_error(rc) &&
1741	    smb2_should_replay(tcon, &retries, &cur_sleep))
1742		goto replay_again;
1743
1744	return rc;
1745}
1746
1747static ssize_t
1748smb2_copychunk_range(const unsigned int xid,
1749			struct cifsFileInfo *srcfile,
1750			struct cifsFileInfo *trgtfile, u64 src_off,
1751			u64 len, u64 dest_off)
1752{
1753	int rc;
1754	unsigned int ret_data_len;
1755	struct copychunk_ioctl *pcchunk;
1756	struct copychunk_ioctl_rsp *retbuf = NULL;
1757	struct cifs_tcon *tcon;
1758	int chunks_copied = 0;
1759	bool chunk_sizes_updated = false;
1760	ssize_t bytes_written, total_bytes_written = 0;
1761
1762	pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
1763	if (pcchunk == NULL)
1764		return -ENOMEM;
1765
1766	cifs_dbg(FYI, "%s: about to call request res key\n", __func__);
1767	/* Request a key from the server to identify the source of the copy */
1768	rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
1769				srcfile->fid.persistent_fid,
1770				srcfile->fid.volatile_fid, pcchunk);
1771
1772	/* Note: request_res_key sets res_key null only if rc !=0 */
1773	if (rc)
1774		goto cchunk_out;
1775
1776	/* For now array only one chunk long, will make more flexible later */
1777	pcchunk->ChunkCount = cpu_to_le32(1);
1778	pcchunk->Reserved = 0;
1779	pcchunk->Reserved2 = 0;
1780
1781	tcon = tlink_tcon(trgtfile->tlink);
1782
1783	while (len > 0) {
1784		pcchunk->SourceOffset = cpu_to_le64(src_off);
1785		pcchunk->TargetOffset = cpu_to_le64(dest_off);
1786		pcchunk->Length =
1787			cpu_to_le32(min_t(u64, len, tcon->max_bytes_chunk));
1788
1789		/* Request server copy to target from src identified by key */
1790		kfree(retbuf);
1791		retbuf = NULL;
1792		rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
1793			trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
1794			(char *)pcchunk, sizeof(struct copychunk_ioctl),
1795			CIFSMaxBufSize, (char **)&retbuf, &ret_data_len);
1796		if (rc == 0) {
1797			if (ret_data_len !=
1798					sizeof(struct copychunk_ioctl_rsp)) {
1799				cifs_tcon_dbg(VFS, "Invalid cchunk response size\n");
1800				rc = -EIO;
1801				goto cchunk_out;
1802			}
1803			if (retbuf->TotalBytesWritten == 0) {
1804				cifs_dbg(FYI, "no bytes copied\n");
1805				rc = -EIO;
1806				goto cchunk_out;
1807			}
1808			/*
1809			 * Check if server claimed to write more than we asked
1810			 */
1811			if (le32_to_cpu(retbuf->TotalBytesWritten) >
1812			    le32_to_cpu(pcchunk->Length)) {
1813				cifs_tcon_dbg(VFS, "Invalid copy chunk response\n");
1814				rc = -EIO;
1815				goto cchunk_out;
1816			}
1817			if (le32_to_cpu(retbuf->ChunksWritten) != 1) {
1818				cifs_tcon_dbg(VFS, "Invalid num chunks written\n");
1819				rc = -EIO;
1820				goto cchunk_out;
1821			}
1822			chunks_copied++;
1823
1824			bytes_written = le32_to_cpu(retbuf->TotalBytesWritten);
1825			src_off += bytes_written;
1826			dest_off += bytes_written;
1827			len -= bytes_written;
1828			total_bytes_written += bytes_written;
1829
1830			cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n",
1831				le32_to_cpu(retbuf->ChunksWritten),
1832				le32_to_cpu(retbuf->ChunkBytesWritten),
1833				bytes_written);
1834		} else if (rc == -EINVAL) {
1835			if (ret_data_len != sizeof(struct copychunk_ioctl_rsp))
1836				goto cchunk_out;
1837
1838			cifs_dbg(FYI, "MaxChunks %d BytesChunk %d MaxCopy %d\n",
1839				le32_to_cpu(retbuf->ChunksWritten),
1840				le32_to_cpu(retbuf->ChunkBytesWritten),
1841				le32_to_cpu(retbuf->TotalBytesWritten));
1842
1843			/*
1844			 * Check if this is the first request using these sizes,
1845			 * (ie check if copy succeed once with original sizes
1846			 * and check if the server gave us different sizes after
1847			 * we already updated max sizes on previous request).
1848			 * if not then why is the server returning an error now
1849			 */
1850			if ((chunks_copied != 0) || chunk_sizes_updated)
1851				goto cchunk_out;
1852
1853			/* Check that server is not asking us to grow size */
1854			if (le32_to_cpu(retbuf->ChunkBytesWritten) <
1855					tcon->max_bytes_chunk)
1856				tcon->max_bytes_chunk =
1857					le32_to_cpu(retbuf->ChunkBytesWritten);
1858			else
1859				goto cchunk_out; /* server gave us bogus size */
1860
1861			/* No need to change MaxChunks since already set to 1 */
1862			chunk_sizes_updated = true;
1863		} else
1864			goto cchunk_out;
1865	}
1866
1867cchunk_out:
1868	kfree(pcchunk);
1869	kfree(retbuf);
1870	if (rc)
1871		return rc;
1872	else
1873		return total_bytes_written;
1874}
1875
1876static int
1877smb2_flush_file(const unsigned int xid, struct cifs_tcon *tcon,
1878		struct cifs_fid *fid)
1879{
1880	return SMB2_flush(xid, tcon, fid->persistent_fid, fid->volatile_fid);
1881}
1882
1883static unsigned int
1884smb2_read_data_offset(char *buf)
1885{
1886	struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
1887
1888	return rsp->DataOffset;
1889}
1890
1891static unsigned int
1892smb2_read_data_length(char *buf, bool in_remaining)
1893{
1894	struct smb2_read_rsp *rsp = (struct smb2_read_rsp *)buf;
1895
1896	if (in_remaining)
1897		return le32_to_cpu(rsp->DataRemaining);
1898
1899	return le32_to_cpu(rsp->DataLength);
1900}
1901
1902
1903static int
1904smb2_sync_read(const unsigned int xid, struct cifs_fid *pfid,
1905	       struct cifs_io_parms *parms, unsigned int *bytes_read,
1906	       char **buf, int *buf_type)
1907{
1908	parms->persistent_fid = pfid->persistent_fid;
1909	parms->volatile_fid = pfid->volatile_fid;
1910	return SMB2_read(xid, parms, bytes_read, buf, buf_type);
1911}
1912
1913static int
1914smb2_sync_write(const unsigned int xid, struct cifs_fid *pfid,
1915		struct cifs_io_parms *parms, unsigned int *written,
1916		struct kvec *iov, unsigned long nr_segs)
1917{
1918
1919	parms->persistent_fid = pfid->persistent_fid;
1920	parms->volatile_fid = pfid->volatile_fid;
1921	return SMB2_write(xid, parms, written, iov, nr_segs);
1922}
1923
1924/* Set or clear the SPARSE_FILE attribute based on value passed in setsparse */
1925static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
1926		struct cifsFileInfo *cfile, struct inode *inode, __u8 setsparse)
1927{
1928	struct cifsInodeInfo *cifsi;
1929	int rc;
1930
1931	cifsi = CIFS_I(inode);
1932
1933	/* if file already sparse don't bother setting sparse again */
1934	if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && setsparse)
1935		return true; /* already sparse */
1936
1937	if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && !setsparse)
1938		return true; /* already not sparse */
1939
1940	/*
1941	 * Can't check for sparse support on share the usual way via the
1942	 * FS attribute info (FILE_SUPPORTS_SPARSE_FILES) on the share
1943	 * since Samba server doesn't set the flag on the share, yet
1944	 * supports the set sparse FSCTL and returns sparse correctly
1945	 * in the file attributes. If we fail setting sparse though we
1946	 * mark that server does not support sparse files for this share
1947	 * to avoid repeatedly sending the unsupported fsctl to server
1948	 * if the file is repeatedly extended.
1949	 */
1950	if (tcon->broken_sparse_sup)
1951		return false;
1952
1953	rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1954			cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
1955			&setsparse, 1, CIFSMaxBufSize, NULL, NULL);
1956	if (rc) {
1957		tcon->broken_sparse_sup = true;
1958		cifs_dbg(FYI, "set sparse rc = %d\n", rc);
1959		return false;
1960	}
1961
1962	if (setsparse)
1963		cifsi->cifsAttrs |= FILE_ATTRIBUTE_SPARSE_FILE;
1964	else
1965		cifsi->cifsAttrs &= (~FILE_ATTRIBUTE_SPARSE_FILE);
1966
1967	return true;
1968}
1969
1970static int
1971smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
1972		   struct cifsFileInfo *cfile, __u64 size, bool set_alloc)
1973{
1974	struct inode *inode;
1975
1976	/*
1977	 * If extending file more than one page make sparse. Many Linux fs
1978	 * make files sparse by default when extending via ftruncate
1979	 */
1980	inode = d_inode(cfile->dentry);
1981
1982	if (!set_alloc && (size > inode->i_size + 8192)) {
1983		__u8 set_sparse = 1;
1984
1985		/* whether set sparse succeeds or not, extend the file */
1986		smb2_set_sparse(xid, tcon, cfile, inode, set_sparse);
1987	}
1988
1989	return SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
1990			    cfile->fid.volatile_fid, cfile->pid, size);
1991}
1992
1993static int
1994smb2_duplicate_extents(const unsigned int xid,
1995			struct cifsFileInfo *srcfile,
1996			struct cifsFileInfo *trgtfile, u64 src_off,
1997			u64 len, u64 dest_off)
1998{
1999	int rc;
2000	unsigned int ret_data_len;
2001	struct inode *inode;
2002	struct duplicate_extents_to_file dup_ext_buf;
2003	struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
2004
2005	/* server fileays advertise duplicate extent support with this flag */
2006	if ((le32_to_cpu(tcon->fsAttrInfo.Attributes) &
2007	     FILE_SUPPORTS_BLOCK_REFCOUNTING) == 0)
2008		return -EOPNOTSUPP;
2009
2010	dup_ext_buf.VolatileFileHandle = srcfile->fid.volatile_fid;
2011	dup_ext_buf.PersistentFileHandle = srcfile->fid.persistent_fid;
2012	dup_ext_buf.SourceFileOffset = cpu_to_le64(src_off);
2013	dup_ext_buf.TargetFileOffset = cpu_to_le64(dest_off);
2014	dup_ext_buf.ByteCount = cpu_to_le64(len);
2015	cifs_dbg(FYI, "Duplicate extents: src off %lld dst off %lld len %lld\n",
2016		src_off, dest_off, len);
2017
2018	inode = d_inode(trgtfile->dentry);
2019	if (inode->i_size < dest_off + len) {
2020		rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
2021		if (rc)
2022			goto duplicate_extents_out;
2023
2024		/*
2025		 * Although also could set plausible allocation size (i_blocks)
2026		 * here in addition to setting the file size, in reflink
2027		 * it is likely that the target file is sparse. Its allocation
2028		 * size will be queried on next revalidate, but it is important
2029		 * to make sure that file's cached size is updated immediately
2030		 */
2031		cifs_setsize(inode, dest_off + len);
2032	}
2033	rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
2034			trgtfile->fid.volatile_fid,
2035			FSCTL_DUPLICATE_EXTENTS_TO_FILE,
2036			(char *)&dup_ext_buf,
2037			sizeof(struct duplicate_extents_to_file),
2038			CIFSMaxBufSize, NULL,
2039			&ret_data_len);
2040
2041	if (ret_data_len > 0)
2042		cifs_dbg(FYI, "Non-zero response length in duplicate extents\n");
2043
2044duplicate_extents_out:
2045	return rc;
2046}
2047
2048static int
2049smb2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
2050		   struct cifsFileInfo *cfile)
2051{
2052	return SMB2_set_compression(xid, tcon, cfile->fid.persistent_fid,
2053			    cfile->fid.volatile_fid);
2054}
2055
2056static int
2057smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
2058		   struct cifsFileInfo *cfile)
2059{
2060	struct fsctl_set_integrity_information_req integr_info;
2061	unsigned int ret_data_len;
2062
2063	integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED);
2064	integr_info.Flags = 0;
2065	integr_info.Reserved = 0;
2066
2067	return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2068			cfile->fid.volatile_fid,
2069			FSCTL_SET_INTEGRITY_INFORMATION,
2070			(char *)&integr_info,
2071			sizeof(struct fsctl_set_integrity_information_req),
2072			CIFSMaxBufSize, NULL,
2073			&ret_data_len);
2074
2075}
2076
2077/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
2078#define GMT_TOKEN_SIZE 50
2079
2080#define MIN_SNAPSHOT_ARRAY_SIZE 16 /* See MS-SMB2 section 3.3.5.15.1 */
2081
2082/*
2083 * Input buffer contains (empty) struct smb_snapshot array with size filled in
2084 * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
2085 */
2086static int
2087smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
2088		   struct cifsFileInfo *cfile, void __user *ioc_buf)
2089{
2090	char *retbuf = NULL;
2091	unsigned int ret_data_len = 0;
2092	int rc;
2093	u32 max_response_size;
2094	struct smb_snapshot_array snapshot_in;
2095
2096	/*
2097	 * On the first query to enumerate the list of snapshots available
2098	 * for this volume the buffer begins with 0 (number of snapshots
2099	 * which can be returned is zero since at that point we do not know
2100	 * how big the buffer needs to be). On the second query,
2101	 * it (ret_data_len) is set to number of snapshots so we can
2102	 * know to set the maximum response size larger (see below).
2103	 */
2104	if (get_user(ret_data_len, (unsigned int __user *)ioc_buf))
2105		return -EFAULT;
2106
2107	/*
2108	 * Note that for snapshot queries that servers like Azure expect that
2109	 * the first query be minimal size (and just used to get the number/size
2110	 * of previous versions) so response size must be specified as EXACTLY
2111	 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
2112	 * of eight bytes.
2113	 */
2114	if (ret_data_len == 0)
2115		max_response_size = MIN_SNAPSHOT_ARRAY_SIZE;
2116	else
2117		max_response_size = CIFSMaxBufSize;
2118
2119	rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2120			cfile->fid.volatile_fid,
2121			FSCTL_SRV_ENUMERATE_SNAPSHOTS,
2122			NULL, 0 /* no input data */, max_response_size,
2123			(char **)&retbuf,
2124			&ret_data_len);
2125	cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n",
2126			rc, ret_data_len);
2127	if (rc)
2128		return rc;
2129
2130	if (ret_data_len && (ioc_buf != NULL) && (retbuf != NULL)) {
2131		/* Fixup buffer */
2132		if (copy_from_user(&snapshot_in, ioc_buf,
2133		    sizeof(struct smb_snapshot_array))) {
2134			rc = -EFAULT;
2135			kfree(retbuf);
2136			return rc;
2137		}
2138
2139		/*
2140		 * Check for min size, ie not large enough to fit even one GMT
2141		 * token (snapshot).  On the first ioctl some users may pass in
2142		 * smaller size (or zero) to simply get the size of the array
2143		 * so the user space caller can allocate sufficient memory
2144		 * and retry the ioctl again with larger array size sufficient
2145		 * to hold all of the snapshot GMT tokens on the second try.
2146		 */
2147		if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE)
2148			ret_data_len = sizeof(struct smb_snapshot_array);
2149
2150		/*
2151		 * We return struct SRV_SNAPSHOT_ARRAY, followed by
2152		 * the snapshot array (of 50 byte GMT tokens) each
2153		 * representing an available previous version of the data
2154		 */
2155		if (ret_data_len > (snapshot_in.snapshot_array_size +
2156					sizeof(struct smb_snapshot_array)))
2157			ret_data_len = snapshot_in.snapshot_array_size +
2158					sizeof(struct smb_snapshot_array);
2159
2160		if (copy_to_user(ioc_buf, retbuf, ret_data_len))
2161			rc = -EFAULT;
2162	}
2163
2164	kfree(retbuf);
2165	return rc;
2166}
2167
2168
2169
2170static int
2171smb3_notify(const unsigned int xid, struct file *pfile,
2172	    void __user *ioc_buf, bool return_changes)
2173{
2174	struct smb3_notify_info notify;
2175	struct smb3_notify_info __user *pnotify_buf;
2176	struct dentry *dentry = pfile->f_path.dentry;
2177	struct inode *inode = file_inode(pfile);
2178	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2179	struct cifs_open_parms oparms;
2180	struct cifs_fid fid;
2181	struct cifs_tcon *tcon;
2182	const unsigned char *path;
2183	char *returned_ioctl_info = NULL;
2184	void *page = alloc_dentry_path();
2185	__le16 *utf16_path = NULL;
2186	u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2187	int rc = 0;
2188	__u32 ret_len = 0;
2189
2190	path = build_path_from_dentry(dentry, page);
2191	if (IS_ERR(path)) {
2192		rc = PTR_ERR(path);
2193		goto notify_exit;
2194	}
2195
2196	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
2197	if (utf16_path == NULL) {
2198		rc = -ENOMEM;
2199		goto notify_exit;
2200	}
2201
2202	if (return_changes) {
2203		if (copy_from_user(&notify, ioc_buf, sizeof(struct smb3_notify_info))) {
2204			rc = -EFAULT;
2205			goto notify_exit;
2206		}
2207	} else {
2208		if (copy_from_user(&notify, ioc_buf, sizeof(struct smb3_notify))) {
2209			rc = -EFAULT;
2210			goto notify_exit;
2211		}
2212		notify.data_len = 0;
2213	}
2214
2215	tcon = cifs_sb_master_tcon(cifs_sb);
2216	oparms = (struct cifs_open_parms) {
2217		.tcon = tcon,
2218		.path = path,
2219		.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA,
2220		.disposition = FILE_OPEN,
2221		.create_options = cifs_create_options(cifs_sb, 0),
2222		.fid = &fid,
2223	};
2224
2225	rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL,
2226		       NULL);
2227	if (rc)
2228		goto notify_exit;
2229
2230	rc = SMB2_change_notify(xid, tcon, fid.persistent_fid, fid.volatile_fid,
2231				notify.watch_tree, notify.completion_filter,
2232				notify.data_len, &returned_ioctl_info, &ret_len);
2233
2234	SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2235
2236	cifs_dbg(FYI, "change notify for path %s rc %d\n", path, rc);
2237	if (return_changes && (ret_len > 0) && (notify.data_len > 0)) {
2238		if (ret_len > notify.data_len)
2239			ret_len = notify.data_len;
2240		pnotify_buf = (struct smb3_notify_info __user *)ioc_buf;
2241		if (copy_to_user(pnotify_buf->notify_data, returned_ioctl_info, ret_len))
2242			rc = -EFAULT;
2243		else if (copy_to_user(&pnotify_buf->data_len, &ret_len, sizeof(ret_len)))
2244			rc = -EFAULT;
2245	}
2246	kfree(returned_ioctl_info);
2247notify_exit:
2248	free_dentry_path(page);
2249	kfree(utf16_path);
2250	return rc;
2251}
2252
2253static int
2254smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
2255		     const char *path, struct cifs_sb_info *cifs_sb,
2256		     struct cifs_fid *fid, __u16 search_flags,
2257		     struct cifs_search_info *srch_inf)
2258{
2259	__le16 *utf16_path;
2260	struct smb_rqst rqst[2];
2261	struct kvec rsp_iov[2];
2262	int resp_buftype[2];
2263	struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
2264	struct kvec qd_iov[SMB2_QUERY_DIRECTORY_IOV_SIZE];
2265	int rc, flags = 0;
2266	u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2267	struct cifs_open_parms oparms;
2268	struct smb2_query_directory_rsp *qd_rsp = NULL;
2269	struct smb2_create_rsp *op_rsp = NULL;
2270	struct TCP_Server_Info *server;
2271	int retries = 0, cur_sleep = 1;
2272
2273replay_again:
2274	/* reinitialize for possible replay */
2275	flags = 0;
2276	oplock = SMB2_OPLOCK_LEVEL_NONE;
2277	server = cifs_pick_channel(tcon->ses);
2278
2279	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
2280	if (!utf16_path)
2281		return -ENOMEM;
2282
2283	if (smb3_encryption_required(tcon))
2284		flags |= CIFS_TRANSFORM_REQ;
2285
2286	memset(rqst, 0, sizeof(rqst));
2287	resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
2288	memset(rsp_iov, 0, sizeof(rsp_iov));
2289
2290	/* Open */
2291	memset(&open_iov, 0, sizeof(open_iov));
2292	rqst[0].rq_iov = open_iov;
2293	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
2294
2295	oparms = (struct cifs_open_parms) {
2296		.tcon = tcon,
2297		.path = path,
2298		.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA,
2299		.disposition = FILE_OPEN,
2300		.create_options = cifs_create_options(cifs_sb, 0),
2301		.fid = fid,
2302		.replay = !!(retries),
2303	};
2304
2305	rc = SMB2_open_init(tcon, server,
2306			    &rqst[0], &oplock, &oparms, utf16_path);
2307	if (rc)
2308		goto qdf_free;
2309	smb2_set_next_command(tcon, &rqst[0]);
2310
2311	/* Query directory */
2312	srch_inf->entries_in_buffer = 0;
2313	srch_inf->index_of_last_entry = 2;
2314
2315	memset(&qd_iov, 0, sizeof(qd_iov));
2316	rqst[1].rq_iov = qd_iov;
2317	rqst[1].rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE;
2318
2319	rc = SMB2_query_directory_init(xid, tcon, server,
2320				       &rqst[1],
2321				       COMPOUND_FID, COMPOUND_FID,
2322				       0, srch_inf->info_level);
2323	if (rc)
2324		goto qdf_free;
2325
2326	smb2_set_related(&rqst[1]);
2327
2328	if (retries) {
2329		smb2_set_replay(server, &rqst[0]);
2330		smb2_set_replay(server, &rqst[1]);
2331	}
2332
2333	rc = compound_send_recv(xid, tcon->ses, server,
2334				flags, 2, rqst,
2335				resp_buftype, rsp_iov);
2336
2337	/* If the open failed there is nothing to do */
2338	op_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
2339	if (op_rsp == NULL || op_rsp->hdr.Status != STATUS_SUCCESS) {
2340		cifs_dbg(FYI, "query_dir_first: open failed rc=%d\n", rc);
2341		goto qdf_free;
2342	}
2343	fid->persistent_fid = op_rsp->PersistentFileId;
2344	fid->volatile_fid = op_rsp->VolatileFileId;
2345
2346	/* Anything else than ENODATA means a genuine error */
2347	if (rc && rc != -ENODATA) {
2348		SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
2349		cifs_dbg(FYI, "query_dir_first: query directory failed rc=%d\n", rc);
2350		trace_smb3_query_dir_err(xid, fid->persistent_fid,
2351					 tcon->tid, tcon->ses->Suid, 0, 0, rc);
2352		goto qdf_free;
2353	}
2354
2355	atomic_inc(&tcon->num_remote_opens);
2356
2357	qd_rsp = (struct smb2_query_directory_rsp *)rsp_iov[1].iov_base;
2358	if (qd_rsp->hdr.Status == STATUS_NO_MORE_FILES) {
2359		trace_smb3_query_dir_done(xid, fid->persistent_fid,
2360					  tcon->tid, tcon->ses->Suid, 0, 0);
2361		srch_inf->endOfSearch = true;
2362		rc = 0;
2363		goto qdf_free;
2364	}
2365
2366	rc = smb2_parse_query_directory(tcon, &rsp_iov[1], resp_buftype[1],
2367					srch_inf);
2368	if (rc) {
2369		trace_smb3_query_dir_err(xid, fid->persistent_fid, tcon->tid,
2370			tcon->ses->Suid, 0, 0, rc);
2371		goto qdf_free;
2372	}
2373	resp_buftype[1] = CIFS_NO_BUFFER;
2374
2375	trace_smb3_query_dir_done(xid, fid->persistent_fid, tcon->tid,
2376			tcon->ses->Suid, 0, srch_inf->entries_in_buffer);
2377
2378 qdf_free:
2379	kfree(utf16_path);
2380	SMB2_open_free(&rqst[0]);
2381	SMB2_query_directory_free(&rqst[1]);
2382	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2383	free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
2384
2385	if (is_replayable_error(rc) &&
2386	    smb2_should_replay(tcon, &retries, &cur_sleep))
2387		goto replay_again;
2388
2389	return rc;
2390}
2391
2392static int
2393smb2_query_dir_next(const unsigned int xid, struct cifs_tcon *tcon,
2394		    struct cifs_fid *fid, __u16 search_flags,
2395		    struct cifs_search_info *srch_inf)
2396{
2397	return SMB2_query_directory(xid, tcon, fid->persistent_fid,
2398				    fid->volatile_fid, 0, srch_inf);
2399}
2400
2401static int
2402smb2_close_dir(const unsigned int xid, struct cifs_tcon *tcon,
2403	       struct cifs_fid *fid)
2404{
2405	return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
2406}
2407
2408/*
2409 * If we negotiate SMB2 protocol and get STATUS_PENDING - update
2410 * the number of credits and return true. Otherwise - return false.
2411 */
2412static bool
2413smb2_is_status_pending(char *buf, struct TCP_Server_Info *server)
2414{
2415	struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
2416	int scredits, in_flight;
2417
2418	if (shdr->Status != STATUS_PENDING)
2419		return false;
2420
2421	if (shdr->CreditRequest) {
2422		spin_lock(&server->req_lock);
2423		server->credits += le16_to_cpu(shdr->CreditRequest);
2424		scredits = server->credits;
2425		in_flight = server->in_flight;
2426		spin_unlock(&server->req_lock);
2427		wake_up(&server->request_q);
2428
2429		trace_smb3_pend_credits(server->CurrentMid,
2430				server->conn_id, server->hostname, scredits,
2431				le16_to_cpu(shdr->CreditRequest), in_flight);
2432		cifs_dbg(FYI, "%s: status pending add %u credits total=%d\n",
2433				__func__, le16_to_cpu(shdr->CreditRequest), scredits);
2434	}
2435
2436	return true;
2437}
2438
2439static bool
2440smb2_is_session_expired(char *buf)
2441{
2442	struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
2443
2444	if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED &&
2445	    shdr->Status != STATUS_USER_SESSION_DELETED)
2446		return false;
2447
2448	trace_smb3_ses_expired(le32_to_cpu(shdr->Id.SyncId.TreeId),
2449			       le64_to_cpu(shdr->SessionId),
2450			       le16_to_cpu(shdr->Command),
2451			       le64_to_cpu(shdr->MessageId));
2452	cifs_dbg(FYI, "Session expired or deleted\n");
2453
2454	return true;
2455}
2456
2457static bool
2458smb2_is_status_io_timeout(char *buf)
2459{
2460	struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
2461
2462	if (shdr->Status == STATUS_IO_TIMEOUT)
2463		return true;
2464	else
2465		return false;
2466}
2467
2468static bool
2469smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server)
2470{
2471	struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
2472	struct TCP_Server_Info *pserver;
2473	struct cifs_ses *ses;
2474	struct cifs_tcon *tcon;
2475
2476	if (shdr->Status != STATUS_NETWORK_NAME_DELETED)
2477		return false;
2478
2479	/* If server is a channel, select the primary channel */
2480	pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
2481
2482	spin_lock(&cifs_tcp_ses_lock);
2483	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
2484		if (cifs_ses_exiting(ses))
2485			continue;
2486		list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
2487			if (tcon->tid == le32_to_cpu(shdr->Id.SyncId.TreeId)) {
2488				spin_lock(&tcon->tc_lock);
2489				tcon->need_reconnect = true;
2490				spin_unlock(&tcon->tc_lock);
2491				spin_unlock(&cifs_tcp_ses_lock);
2492				pr_warn_once("Server share %s deleted.\n",
2493					     tcon->tree_name);
2494				return true;
2495			}
2496		}
2497	}
2498	spin_unlock(&cifs_tcp_ses_lock);
2499
2500	return false;
2501}
2502
2503static int
2504smb2_oplock_response(struct cifs_tcon *tcon, __u64 persistent_fid,
2505		__u64 volatile_fid, __u16 net_fid, struct cifsInodeInfo *cinode)
2506{
2507	if (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
2508		return SMB2_lease_break(0, tcon, cinode->lease_key,
2509					smb2_get_lease_state(cinode));
2510
2511	return SMB2_oplock_break(0, tcon, persistent_fid, volatile_fid,
2512				 CIFS_CACHE_READ(cinode) ? 1 : 0);
2513}
2514
2515void
2516smb2_set_replay(struct TCP_Server_Info *server, struct smb_rqst *rqst)
2517{
2518	struct smb2_hdr *shdr;
2519
2520	if (server->dialect < SMB30_PROT_ID)
2521		return;
2522
2523	shdr = (struct smb2_hdr *)(rqst->rq_iov[0].iov_base);
2524	if (shdr == NULL) {
2525		cifs_dbg(FYI, "shdr NULL in smb2_set_related\n");
2526		return;
2527	}
2528	shdr->Flags |= SMB2_FLAGS_REPLAY_OPERATION;
2529}
2530
2531void
2532smb2_set_related(struct smb_rqst *rqst)
2533{
2534	struct smb2_hdr *shdr;
2535
2536	shdr = (struct smb2_hdr *)(rqst->rq_iov[0].iov_base);
2537	if (shdr == NULL) {
2538		cifs_dbg(FYI, "shdr NULL in smb2_set_related\n");
2539		return;
2540	}
2541	shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
2542}
2543
2544char smb2_padding[7] = {0, 0, 0, 0, 0, 0, 0};
2545
2546void
2547smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
2548{
2549	struct smb2_hdr *shdr;
2550	struct cifs_ses *ses = tcon->ses;
2551	struct TCP_Server_Info *server = ses->server;
2552	unsigned long len = smb_rqst_len(server, rqst);
2553	int i, num_padding;
2554
2555	shdr = (struct smb2_hdr *)(rqst->rq_iov[0].iov_base);
2556	if (shdr == NULL) {
2557		cifs_dbg(FYI, "shdr NULL in smb2_set_next_command\n");
2558		return;
2559	}
2560
2561	/* SMB headers in a compound are 8 byte aligned. */
2562
2563	/* No padding needed */
2564	if (!(len & 7))
2565		goto finished;
2566
2567	num_padding = 8 - (len & 7);
2568	if (!smb3_encryption_required(tcon)) {
2569		/*
2570		 * If we do not have encryption then we can just add an extra
2571		 * iov for the padding.
2572		 */
2573		rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding;
2574		rqst->rq_iov[rqst->rq_nvec].iov_len = num_padding;
2575		rqst->rq_nvec++;
2576		len += num_padding;
2577	} else {
2578		/*
2579		 * We can not add a small padding iov for the encryption case
2580		 * because the encryption framework can not handle the padding
2581		 * iovs.
2582		 * We have to flatten this into a single buffer and add
2583		 * the padding to it.
2584		 */
2585		for (i = 1; i < rqst->rq_nvec; i++) {
2586			memcpy(rqst->rq_iov[0].iov_base +
2587			       rqst->rq_iov[0].iov_len,
2588			       rqst->rq_iov[i].iov_base,
2589			       rqst->rq_iov[i].iov_len);
2590			rqst->rq_iov[0].iov_len += rqst->rq_iov[i].iov_len;
2591		}
2592		memset(rqst->rq_iov[0].iov_base + rqst->rq_iov[0].iov_len,
2593		       0, num_padding);
2594		rqst->rq_iov[0].iov_len += num_padding;
2595		len += num_padding;
2596		rqst->rq_nvec = 1;
2597	}
2598
2599 finished:
2600	shdr->NextCommand = cpu_to_le32(len);
2601}
2602
2603/*
2604 * helper function for exponential backoff and check if replayable
2605 */
2606bool smb2_should_replay(struct cifs_tcon *tcon,
2607				int *pretries,
2608				int *pcur_sleep)
2609{
2610	if (!pretries || !pcur_sleep)
2611		return false;
2612
2613	if (tcon->retry || (*pretries)++ < tcon->ses->server->retrans) {
2614		msleep(*pcur_sleep);
2615		(*pcur_sleep) = ((*pcur_sleep) << 1);
2616		if ((*pcur_sleep) > CIFS_MAX_SLEEP)
2617			(*pcur_sleep) = CIFS_MAX_SLEEP;
2618		return true;
2619	}
2620
2621	return false;
2622}
2623
2624/*
2625 * Passes the query info response back to the caller on success.
2626 * Caller need to free this with free_rsp_buf().
2627 */
2628int
2629smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
2630			 const char *path, u32 desired_access,
2631			 u32 class, u32 type, u32 output_len,
2632			 struct kvec *rsp, int *buftype,
2633			 struct cifs_sb_info *cifs_sb)
2634{
2635	struct smb2_compound_vars *vars;
2636	struct cifs_ses *ses = tcon->ses;
2637	struct TCP_Server_Info *server;
2638	int flags = CIFS_CP_CREATE_CLOSE_OP;
2639	struct smb_rqst *rqst;
2640	int resp_buftype[3];
2641	struct kvec *rsp_iov;
2642	u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2643	struct cifs_open_parms oparms;
2644	struct cifs_fid fid;
2645	int rc;
2646	__le16 *utf16_path;
2647	struct cached_fid *cfid = NULL;
2648	int retries = 0, cur_sleep = 1;
2649
2650replay_again:
2651	/* reinitialize for possible replay */
2652	flags = CIFS_CP_CREATE_CLOSE_OP;
2653	oplock = SMB2_OPLOCK_LEVEL_NONE;
2654	server = cifs_pick_channel(ses);
2655
2656	if (!path)
2657		path = "";
2658	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
2659	if (!utf16_path)
2660		return -ENOMEM;
2661
2662	if (smb3_encryption_required(tcon))
2663		flags |= CIFS_TRANSFORM_REQ;
2664
2665	resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
2666	vars = kzalloc(sizeof(*vars), GFP_KERNEL);
2667	if (!vars) {
2668		rc = -ENOMEM;
2669		goto out_free_path;
2670	}
2671	rqst = vars->rqst;
2672	rsp_iov = vars->rsp_iov;
2673
2674	/*
2675	 * We can only call this for things we know are directories.
2676	 */
2677	if (!strcmp(path, ""))
2678		open_cached_dir(xid, tcon, path, cifs_sb, false,
2679				&cfid); /* cfid null if open dir failed */
2680
2681	rqst[0].rq_iov = vars->open_iov;
2682	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
2683
2684	oparms = (struct cifs_open_parms) {
2685		.tcon = tcon,
2686		.path = path,
2687		.desired_access = desired_access,
2688		.disposition = FILE_OPEN,
2689		.create_options = cifs_create_options(cifs_sb, 0),
2690		.fid = &fid,
2691		.replay = !!(retries),
2692	};
2693
2694	rc = SMB2_open_init(tcon, server,
2695			    &rqst[0], &oplock, &oparms, utf16_path);
2696	if (rc)
2697		goto qic_exit;
2698	smb2_set_next_command(tcon, &rqst[0]);
2699
2700	rqst[1].rq_iov = &vars->qi_iov;
2701	rqst[1].rq_nvec = 1;
2702
2703	if (cfid) {
2704		rc = SMB2_query_info_init(tcon, server,
2705					  &rqst[1],
2706					  cfid->fid.persistent_fid,
2707					  cfid->fid.volatile_fid,
2708					  class, type, 0,
2709					  output_len, 0,
2710					  NULL);
2711	} else {
2712		rc = SMB2_query_info_init(tcon, server,
2713					  &rqst[1],
2714					  COMPOUND_FID,
2715					  COMPOUND_FID,
2716					  class, type, 0,
2717					  output_len, 0,
2718					  NULL);
2719	}
2720	if (rc)
2721		goto qic_exit;
2722	if (!cfid) {
2723		smb2_set_next_command(tcon, &rqst[1]);
2724		smb2_set_related(&rqst[1]);
2725	}
2726
2727	rqst[2].rq_iov = &vars->close_iov;
2728	rqst[2].rq_nvec = 1;
2729
2730	rc = SMB2_close_init(tcon, server,
2731			     &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
2732	if (rc)
2733		goto qic_exit;
2734	smb2_set_related(&rqst[2]);
2735
2736	if (retries) {
2737		if (!cfid) {
2738			smb2_set_replay(server, &rqst[0]);
2739			smb2_set_replay(server, &rqst[2]);
2740		}
2741		smb2_set_replay(server, &rqst[1]);
2742	}
2743
2744	if (cfid) {
2745		rc = compound_send_recv(xid, ses, server,
2746					flags, 1, &rqst[1],
2747					&resp_buftype[1], &rsp_iov[1]);
2748	} else {
2749		rc = compound_send_recv(xid, ses, server,
2750					flags, 3, rqst,
2751					resp_buftype, rsp_iov);
2752	}
2753	if (rc) {
2754		free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
2755		if (rc == -EREMCHG) {
2756			tcon->need_reconnect = true;
2757			pr_warn_once("server share %s deleted\n",
2758				     tcon->tree_name);
2759		}
2760		goto qic_exit;
2761	}
2762	*rsp = rsp_iov[1];
2763	*buftype = resp_buftype[1];
2764
2765 qic_exit:
2766	SMB2_open_free(&rqst[0]);
2767	SMB2_query_info_free(&rqst[1]);
2768	SMB2_close_free(&rqst[2]);
2769	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
2770	free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
2771	if (cfid)
2772		close_cached_dir(cfid);
2773	kfree(vars);
2774out_free_path:
2775	kfree(utf16_path);
2776
2777	if (is_replayable_error(rc) &&
2778	    smb2_should_replay(tcon, &retries, &cur_sleep))
2779		goto replay_again;
2780
2781	return rc;
2782}
2783
2784static int
2785smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
2786	     struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
2787{
2788	struct smb2_query_info_rsp *rsp;
2789	struct smb2_fs_full_size_info *info = NULL;
2790	struct kvec rsp_iov = {NULL, 0};
2791	int buftype = CIFS_NO_BUFFER;
2792	int rc;
2793
2794
2795	rc = smb2_query_info_compound(xid, tcon, "",
2796				      FILE_READ_ATTRIBUTES,
2797				      FS_FULL_SIZE_INFORMATION,
2798				      SMB2_O_INFO_FILESYSTEM,
2799				      sizeof(struct smb2_fs_full_size_info),
2800				      &rsp_iov, &buftype, cifs_sb);
2801	if (rc)
2802		goto qfs_exit;
2803
2804	rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
2805	buf->f_type = SMB2_SUPER_MAGIC;
2806	info = (struct smb2_fs_full_size_info *)(
2807		le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
2808	rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
2809			       le32_to_cpu(rsp->OutputBufferLength),
2810			       &rsp_iov,
2811			       sizeof(struct smb2_fs_full_size_info));
2812	if (!rc)
2813		smb2_copy_fs_info_to_kstatfs(info, buf);
2814
2815qfs_exit:
2816	trace_smb3_qfs_done(xid, tcon->tid, tcon->ses->Suid, tcon->tree_name, rc);
2817	free_rsp_buf(buftype, rsp_iov.iov_base);
2818	return rc;
2819}
2820
2821static int
2822smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
2823	       struct cifs_sb_info *cifs_sb, struct kstatfs *buf)
2824{
2825	int rc;
2826	__le16 srch_path = 0; /* Null - open root of share */
2827	u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
2828	struct cifs_open_parms oparms;
2829	struct cifs_fid fid;
2830
2831	if (!tcon->posix_extensions)
2832		return smb2_queryfs(xid, tcon, cifs_sb, buf);
2833
2834	oparms = (struct cifs_open_parms) {
2835		.tcon = tcon,
2836		.path = "",
2837		.desired_access = FILE_READ_ATTRIBUTES,
2838		.disposition = FILE_OPEN,
2839		.create_options = cifs_create_options(cifs_sb, 0),
2840		.fid = &fid,
2841	};
2842
2843	rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL,
2844		       NULL, NULL);
2845	if (rc)
2846		return rc;
2847
2848	rc = SMB311_posix_qfs_info(xid, tcon, fid.persistent_fid,
2849				   fid.volatile_fid, buf);
2850	buf->f_type = SMB2_SUPER_MAGIC;
2851	SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
2852	return rc;
2853}
2854
2855static bool
2856smb2_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2)
2857{
2858	return ob1->fid.persistent_fid == ob2->fid.persistent_fid &&
2859	       ob1->fid.volatile_fid == ob2->fid.volatile_fid;
2860}
2861
2862static int
2863smb2_mand_lock(const unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
2864	       __u64 length, __u32 type, int lock, int unlock, bool wait)
2865{
2866	if (unlock && !lock)
2867		type = SMB2_LOCKFLAG_UNLOCK;
2868	return SMB2_lock(xid, tlink_tcon(cfile->tlink),
2869			 cfile->fid.persistent_fid, cfile->fid.volatile_fid,
2870			 current->tgid, length, offset, type, wait);
2871}
2872
2873static void
2874smb2_get_lease_key(struct inode *inode, struct cifs_fid *fid)
2875{
2876	memcpy(fid->lease_key, CIFS_I(inode)->lease_key, SMB2_LEASE_KEY_SIZE);
2877}
2878
2879static void
2880smb2_set_lease_key(struct inode *inode, struct cifs_fid *fid)
2881{
2882	memcpy(CIFS_I(inode)->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
2883}
2884
2885static void
2886smb2_new_lease_key(struct cifs_fid *fid)
2887{
2888	generate_random_uuid(fid->lease_key);
2889}
2890
2891static int
2892smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
2893		   const char *search_name,
2894		   struct dfs_info3_param **target_nodes,
2895		   unsigned int *num_of_nodes,
2896		   const struct nls_table *nls_codepage, int remap)
2897{
2898	int rc;
2899	__le16 *utf16_path = NULL;
2900	int utf16_path_len = 0;
2901	struct cifs_tcon *tcon;
2902	struct fsctl_get_dfs_referral_req *dfs_req = NULL;
2903	struct get_dfs_referral_rsp *dfs_rsp = NULL;
2904	u32 dfs_req_size = 0, dfs_rsp_size = 0;
2905	int retry_count = 0;
2906
2907	cifs_dbg(FYI, "%s: path: %s\n", __func__, search_name);
2908
2909	/*
2910	 * Try to use the IPC tcon, otherwise just use any
2911	 */
2912	tcon = ses->tcon_ipc;
2913	if (tcon == NULL) {
2914		spin_lock(&cifs_tcp_ses_lock);
2915		tcon = list_first_entry_or_null(&ses->tcon_list,
2916						struct cifs_tcon,
2917						tcon_list);
2918		if (tcon) {
2919			tcon->tc_count++;
2920			trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
2921					    netfs_trace_tcon_ref_get_dfs_refer);
2922		}
2923		spin_unlock(&cifs_tcp_ses_lock);
2924	}
2925
2926	if (tcon == NULL) {
2927		cifs_dbg(VFS, "session %p has no tcon available for a dfs referral request\n",
2928			 ses);
2929		rc = -ENOTCONN;
2930		goto out;
2931	}
2932
2933	utf16_path = cifs_strndup_to_utf16(search_name, PATH_MAX,
2934					   &utf16_path_len,
2935					   nls_codepage, remap);
2936	if (!utf16_path) {
2937		rc = -ENOMEM;
2938		goto out;
2939	}
2940
2941	dfs_req_size = sizeof(*dfs_req) + utf16_path_len;
2942	dfs_req = kzalloc(dfs_req_size, GFP_KERNEL);
2943	if (!dfs_req) {
2944		rc = -ENOMEM;
2945		goto out;
2946	}
2947
2948	/* Highest DFS referral version understood */
2949	dfs_req->MaxReferralLevel = DFS_VERSION;
2950
2951	/* Path to resolve in an UTF-16 null-terminated string */
2952	memcpy(dfs_req->RequestFileName, utf16_path, utf16_path_len);
2953
2954	do {
2955		rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
2956				FSCTL_DFS_GET_REFERRALS,
2957				(char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
2958				(char **)&dfs_rsp, &dfs_rsp_size);
2959		if (!is_retryable_error(rc))
2960			break;
2961		usleep_range(512, 2048);
2962	} while (++retry_count < 5);
2963
2964	if (!rc && !dfs_rsp)
2965		rc = -EIO;
2966	if (rc) {
2967		if (!is_retryable_error(rc) && rc != -ENOENT && rc != -EOPNOTSUPP)
2968			cifs_tcon_dbg(VFS, "%s: ioctl error: rc=%d\n", __func__, rc);
2969		goto out;
2970	}
2971
2972	rc = parse_dfs_referrals(dfs_rsp, dfs_rsp_size,
2973				 num_of_nodes, target_nodes,
2974				 nls_codepage, remap, search_name,
2975				 true /* is_unicode */);
2976	if (rc) {
2977		cifs_tcon_dbg(VFS, "parse error in %s rc=%d\n", __func__, rc);
2978		goto out;
2979	}
2980
2981 out:
2982	if (tcon && !tcon->ipc) {
2983		/* ipc tcons are not refcounted */
2984		spin_lock(&cifs_tcp_ses_lock);
2985		tcon->tc_count--;
2986		trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
2987				    netfs_trace_tcon_ref_dec_dfs_refer);
2988		/* tc_count can never go negative */
2989		WARN_ON(tcon->tc_count < 0);
2990		spin_unlock(&cifs_tcp_ses_lock);
2991	}
2992	kfree(utf16_path);
2993	kfree(dfs_req);
2994	kfree(dfs_rsp);
2995	return rc;
2996}
2997
2998static struct cifs_ntsd *
2999get_smb2_acl_by_fid(struct cifs_sb_info *cifs_sb,
3000		    const struct cifs_fid *cifsfid, u32 *pacllen, u32 info)
3001{
3002	struct cifs_ntsd *pntsd = NULL;
3003	unsigned int xid;
3004	int rc = -EOPNOTSUPP;
3005	struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
3006
3007	if (IS_ERR(tlink))
3008		return ERR_CAST(tlink);
3009
3010	xid = get_xid();
3011	cifs_dbg(FYI, "trying to get acl\n");
3012
3013	rc = SMB2_query_acl(xid, tlink_tcon(tlink), cifsfid->persistent_fid,
3014			    cifsfid->volatile_fid, (void **)&pntsd, pacllen,
3015			    info);
3016	free_xid(xid);
3017
3018	cifs_put_tlink(tlink);
3019
3020	cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
3021	if (rc)
3022		return ERR_PTR(rc);
3023	return pntsd;
3024
3025}
3026
3027static struct cifs_ntsd *
3028get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
3029		     const char *path, u32 *pacllen, u32 info)
3030{
3031	struct cifs_ntsd *pntsd = NULL;
3032	u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3033	unsigned int xid;
3034	int rc;
3035	struct cifs_tcon *tcon;
3036	struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
3037	struct cifs_fid fid;
3038	struct cifs_open_parms oparms;
3039	__le16 *utf16_path;
3040
3041	cifs_dbg(FYI, "get smb3 acl for path %s\n", path);
3042	if (IS_ERR(tlink))
3043		return ERR_CAST(tlink);
3044
3045	tcon = tlink_tcon(tlink);
3046	xid = get_xid();
3047
3048	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
3049	if (!utf16_path) {
3050		rc = -ENOMEM;
3051		free_xid(xid);
3052		return ERR_PTR(rc);
3053	}
3054
3055	oparms = (struct cifs_open_parms) {
3056		.tcon = tcon,
3057		.path = path,
3058		.desired_access = READ_CONTROL,
3059		.disposition = FILE_OPEN,
3060		/*
3061		 * When querying an ACL, even if the file is a symlink
3062		 * we want to open the source not the target, and so
3063		 * the protocol requires that the client specify this
3064		 * flag when opening a reparse point
3065		 */
3066		.create_options = cifs_create_options(cifs_sb, 0) |
3067				  OPEN_REPARSE_POINT,
3068		.fid = &fid,
3069	};
3070
3071	if (info & SACL_SECINFO)
3072		oparms.desired_access |= SYSTEM_SECURITY;
3073
3074	rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL, NULL,
3075		       NULL);
3076	kfree(utf16_path);
3077	if (!rc) {
3078		rc = SMB2_query_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
3079				    fid.volatile_fid, (void **)&pntsd, pacllen,
3080				    info);
3081		SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
3082	}
3083
3084	cifs_put_tlink(tlink);
3085	free_xid(xid);
3086
3087	cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
3088	if (rc)
3089		return ERR_PTR(rc);
3090	return pntsd;
3091}
3092
3093static int
3094set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
3095		struct inode *inode, const char *path, int aclflag)
3096{
3097	u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
3098	unsigned int xid;
3099	int rc, access_flags = 0;
3100	struct cifs_tcon *tcon;
3101	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3102	struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
3103	struct cifs_fid fid;
3104	struct cifs_open_parms oparms;
3105	__le16 *utf16_path;
3106
3107	cifs_dbg(FYI, "set smb3 acl for path %s\n", path);
3108	if (IS_ERR(tlink))
3109		return PTR_ERR(tlink);
3110
3111	tcon = tlink_tcon(tlink);
3112	xid = get_xid();
3113
3114	if (aclflag & CIFS_ACL_OWNER || aclflag & CIFS_ACL_GROUP)
3115		access_flags |= WRITE_OWNER;
3116	if (aclflag & CIFS_ACL_SACL)
3117		access_flags |= SYSTEM_SECURITY;
3118	if (aclflag & CIFS_ACL_DACL)
3119		access_flags |= WRITE_DAC;
3120
3121	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
3122	if (!utf16_path) {
3123		rc = -ENOMEM;
3124		free_xid(xid);
3125		return rc;
3126	}
3127
3128	oparms = (struct cifs_open_parms) {
3129		.tcon = tcon,
3130		.desired_access = access_flags,
3131		.create_options = cifs_create_options(cifs_sb, 0),
3132		.disposition = FILE_OPEN,
3133		.path = path,
3134		.fid = &fid,
3135	};
3136
3137	rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
3138		       NULL, NULL);
3139	kfree(utf16_path);
3140	if (!rc) {
3141		rc = SMB2_set_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
3142			    fid.volatile_fid, pnntsd, acllen, aclflag);
3143		SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
3144	}
3145
3146	cifs_put_tlink(tlink);
3147	free_xid(xid);
3148	return rc;
3149}
3150
3151/* Retrieve an ACL from the server */
3152static struct cifs_ntsd *
3153get_smb2_acl(struct cifs_sb_info *cifs_sb,
3154	     struct inode *inode, const char *path,
3155	     u32 *pacllen, u32 info)
3156{
3157	struct cifs_ntsd *pntsd = NULL;
3158	struct cifsFileInfo *open_file = NULL;
3159
3160	if (inode && !(info & SACL_SECINFO))
3161		open_file = find_readable_file(CIFS_I(inode), true);
3162	if (!open_file || (info & SACL_SECINFO))
3163		return get_smb2_acl_by_path(cifs_sb, path, pacllen, info);
3164
3165	pntsd = get_smb2_acl_by_fid(cifs_sb, &open_file->fid, pacllen, info);
3166	cifsFileInfo_put(open_file);
3167	return pntsd;
3168}
3169
3170static long smb3_zero_data(struct file *file, struct cifs_tcon *tcon,
3171			     loff_t offset, loff_t len, unsigned int xid)
3172{
3173	struct cifsFileInfo *cfile = file->private_data;
3174	struct file_zero_data_information fsctl_buf;
3175
3176	cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
3177
3178	fsctl_buf.FileOffset = cpu_to_le64(offset);
3179	fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
3180
3181	return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3182			  cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
3183			  (char *)&fsctl_buf,
3184			  sizeof(struct file_zero_data_information),
3185			  0, NULL, NULL);
3186}
3187
3188static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
3189			    loff_t offset, loff_t len, bool keep_size)
3190{
3191	struct cifs_ses *ses = tcon->ses;
3192	struct inode *inode = file_inode(file);
3193	struct cifsInodeInfo *cifsi = CIFS_I(inode);
3194	struct cifsFileInfo *cfile = file->private_data;
3195	unsigned long long new_size;
3196	long rc;
3197	unsigned int xid;
3198
3199	xid = get_xid();
3200
3201	trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
3202			      ses->Suid, offset, len);
3203
3204	inode_lock(inode);
3205	filemap_invalidate_lock(inode->i_mapping);
3206
3207	/*
3208	 * We zero the range through ioctl, so we need remove the page caches
3209	 * first, otherwise the data may be inconsistent with the server.
3210	 */
3211	truncate_pagecache_range(inode, offset, offset + len - 1);
3212
3213	/* if file not oplocked can't be sure whether asking to extend size */
3214	rc = -EOPNOTSUPP;
3215	if (keep_size == false && !CIFS_CACHE_READ(cifsi))
3216		goto zero_range_exit;
3217
3218	rc = smb3_zero_data(file, tcon, offset, len, xid);
3219	if (rc < 0)
3220		goto zero_range_exit;
3221
3222	/*
3223	 * do we also need to change the size of the file?
3224	 */
3225	new_size = offset + len;
3226	if (keep_size == false && (unsigned long long)i_size_read(inode) < new_size) {
3227		rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3228				  cfile->fid.volatile_fid, cfile->pid, new_size);
3229		if (rc >= 0) {
3230			truncate_setsize(inode, new_size);
3231			netfs_resize_file(&cifsi->netfs, new_size, true);
3232			if (offset < cifsi->netfs.zero_point)
3233				cifsi->netfs.zero_point = offset;
3234			fscache_resize_cookie(cifs_inode_cookie(inode), new_size);
3235		}
3236	}
3237
3238 zero_range_exit:
3239	filemap_invalidate_unlock(inode->i_mapping);
3240	inode_unlock(inode);
3241	free_xid(xid);
3242	if (rc)
3243		trace_smb3_zero_err(xid, cfile->fid.persistent_fid, tcon->tid,
3244			      ses->Suid, offset, len, rc);
3245	else
3246		trace_smb3_zero_done(xid, cfile->fid.persistent_fid, tcon->tid,
3247			      ses->Suid, offset, len);
3248	return rc;
3249}
3250
3251static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
3252			    loff_t offset, loff_t len)
3253{
3254	struct inode *inode = file_inode(file);
3255	struct cifsFileInfo *cfile = file->private_data;
3256	struct file_zero_data_information fsctl_buf;
3257	long rc;
3258	unsigned int xid;
3259	__u8 set_sparse = 1;
3260
3261	xid = get_xid();
3262
3263	inode_lock(inode);
3264	/* Need to make file sparse, if not already, before freeing range. */
3265	/* Consider adding equivalent for compressed since it could also work */
3266	if (!smb2_set_sparse(xid, tcon, cfile, inode, set_sparse)) {
3267		rc = -EOPNOTSUPP;
3268		goto out;
3269	}
3270
3271	filemap_invalidate_lock(inode->i_mapping);
3272	/*
3273	 * We implement the punch hole through ioctl, so we need remove the page
3274	 * caches first, otherwise the data may be inconsistent with the server.
3275	 */
3276	truncate_pagecache_range(inode, offset, offset + len - 1);
3277
3278	cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
3279
3280	fsctl_buf.FileOffset = cpu_to_le64(offset);
3281	fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
3282
3283	rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3284			cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
3285			(char *)&fsctl_buf,
3286			sizeof(struct file_zero_data_information),
3287			CIFSMaxBufSize, NULL, NULL);
3288	filemap_invalidate_unlock(inode->i_mapping);
3289out:
3290	inode_unlock(inode);
3291	free_xid(xid);
3292	return rc;
3293}
3294
3295static int smb3_simple_fallocate_write_range(unsigned int xid,
3296					     struct cifs_tcon *tcon,
3297					     struct cifsFileInfo *cfile,
3298					     loff_t off, loff_t len,
3299					     char *buf)
3300{
3301	struct cifs_io_parms io_parms = {0};
3302	int nbytes;
3303	int rc = 0;
3304	struct kvec iov[2];
3305
3306	io_parms.netfid = cfile->fid.netfid;
3307	io_parms.pid = current->tgid;
3308	io_parms.tcon = tcon;
3309	io_parms.persistent_fid = cfile->fid.persistent_fid;
3310	io_parms.volatile_fid = cfile->fid.volatile_fid;
3311
3312	while (len) {
3313		io_parms.offset = off;
3314		io_parms.length = len;
3315		if (io_parms.length > SMB2_MAX_BUFFER_SIZE)
3316			io_parms.length = SMB2_MAX_BUFFER_SIZE;
3317		/* iov[0] is reserved for smb header */
3318		iov[1].iov_base = buf;
3319		iov[1].iov_len = io_parms.length;
3320		rc = SMB2_write(xid, &io_parms, &nbytes, iov, 1);
3321		if (rc)
3322			break;
3323		if (nbytes > len)
3324			return -EINVAL;
3325		buf += nbytes;
3326		off += nbytes;
3327		len -= nbytes;
3328	}
3329	return rc;
3330}
3331
3332static int smb3_simple_fallocate_range(unsigned int xid,
3333				       struct cifs_tcon *tcon,
3334				       struct cifsFileInfo *cfile,
3335				       loff_t off, loff_t len)
3336{
3337	struct file_allocated_range_buffer in_data, *out_data = NULL, *tmp_data;
3338	u32 out_data_len;
3339	char *buf = NULL;
3340	loff_t l;
3341	int rc;
3342
3343	in_data.file_offset = cpu_to_le64(off);
3344	in_data.length = cpu_to_le64(len);
3345	rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3346			cfile->fid.volatile_fid,
3347			FSCTL_QUERY_ALLOCATED_RANGES,
3348			(char *)&in_data, sizeof(in_data),
3349			1024 * sizeof(struct file_allocated_range_buffer),
3350			(char **)&out_data, &out_data_len);
3351	if (rc)
3352		goto out;
3353
3354	buf = kzalloc(1024 * 1024, GFP_KERNEL);
3355	if (buf == NULL) {
3356		rc = -ENOMEM;
3357		goto out;
3358	}
3359
3360	tmp_data = out_data;
3361	while (len) {
3362		/*
3363		 * The rest of the region is unmapped so write it all.
3364		 */
3365		if (out_data_len == 0) {
3366			rc = smb3_simple_fallocate_write_range(xid, tcon,
3367					       cfile, off, len, buf);
3368			goto out;
3369		}
3370
3371		if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
3372			rc = -EINVAL;
3373			goto out;
3374		}
3375
3376		if (off < le64_to_cpu(tmp_data->file_offset)) {
3377			/*
3378			 * We are at a hole. Write until the end of the region
3379			 * or until the next allocated data,
3380			 * whichever comes next.
3381			 */
3382			l = le64_to_cpu(tmp_data->file_offset) - off;
3383			if (len < l)
3384				l = len;
3385			rc = smb3_simple_fallocate_write_range(xid, tcon,
3386					       cfile, off, l, buf);
3387			if (rc)
3388				goto out;
3389			off = off + l;
3390			len = len - l;
3391			if (len == 0)
3392				goto out;
3393		}
3394		/*
3395		 * We are at a section of allocated data, just skip forward
3396		 * until the end of the data or the end of the region
3397		 * we are supposed to fallocate, whichever comes first.
3398		 */
3399		l = le64_to_cpu(tmp_data->length);
3400		if (len < l)
3401			l = len;
3402		off += l;
3403		len -= l;
3404
3405		tmp_data = &tmp_data[1];
3406		out_data_len -= sizeof(struct file_allocated_range_buffer);
3407	}
3408
3409 out:
3410	kfree(out_data);
3411	kfree(buf);
3412	return rc;
3413}
3414
3415
3416static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
3417			    loff_t off, loff_t len, bool keep_size)
3418{
3419	struct inode *inode;
3420	struct cifsInodeInfo *cifsi;
3421	struct cifsFileInfo *cfile = file->private_data;
3422	long rc = -EOPNOTSUPP;
3423	unsigned int xid;
3424	loff_t new_eof;
3425
3426	xid = get_xid();
3427
3428	inode = d_inode(cfile->dentry);
3429	cifsi = CIFS_I(inode);
3430
3431	trace_smb3_falloc_enter(xid, cfile->fid.persistent_fid, tcon->tid,
3432				tcon->ses->Suid, off, len);
3433	/* if file not oplocked can't be sure whether asking to extend size */
3434	if (!CIFS_CACHE_READ(cifsi))
3435		if (keep_size == false) {
3436			trace_smb3_falloc_err(xid, cfile->fid.persistent_fid,
3437				tcon->tid, tcon->ses->Suid, off, len, rc);
3438			free_xid(xid);
3439			return rc;
3440		}
3441
3442	/*
3443	 * Extending the file
3444	 */
3445	if ((keep_size == false) && i_size_read(inode) < off + len) {
3446		rc = inode_newsize_ok(inode, off + len);
3447		if (rc)
3448			goto out;
3449
3450		if (cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)
3451			smb2_set_sparse(xid, tcon, cfile, inode, false);
3452
3453		new_eof = off + len;
3454		rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3455				  cfile->fid.volatile_fid, cfile->pid, new_eof);
3456		if (rc == 0) {
3457			netfs_resize_file(&cifsi->netfs, new_eof, true);
3458			cifs_setsize(inode, new_eof);
3459			cifs_truncate_page(inode->i_mapping, inode->i_size);
3460			truncate_setsize(inode, new_eof);
3461		}
3462		goto out;
3463	}
3464
3465	/*
3466	 * Files are non-sparse by default so falloc may be a no-op
3467	 * Must check if file sparse. If not sparse, and since we are not
3468	 * extending then no need to do anything since file already allocated
3469	 */
3470	if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) == 0) {
3471		rc = 0;
3472		goto out;
3473	}
3474
3475	if (keep_size == true) {
3476		/*
3477		 * We can not preallocate pages beyond the end of the file
3478		 * in SMB2
3479		 */
3480		if (off >= i_size_read(inode)) {
3481			rc = 0;
3482			goto out;
3483		}
3484		/*
3485		 * For fallocates that are partially beyond the end of file,
3486		 * clamp len so we only fallocate up to the end of file.
3487		 */
3488		if (off + len > i_size_read(inode)) {
3489			len = i_size_read(inode) - off;
3490		}
3491	}
3492
3493	if ((keep_size == true) || (i_size_read(inode) >= off + len)) {
3494		/*
3495		 * At this point, we are trying to fallocate an internal
3496		 * regions of a sparse file. Since smb2 does not have a
3497		 * fallocate command we have two otions on how to emulate this.
3498		 * We can either turn the entire file to become non-sparse
3499		 * which we only do if the fallocate is for virtually
3500		 * the whole file,  or we can overwrite the region with zeroes
3501		 * using SMB2_write, which could be prohibitevly expensive
3502		 * if len is large.
3503		 */
3504		/*
3505		 * We are only trying to fallocate a small region so
3506		 * just write it with zero.
3507		 */
3508		if (len <= 1024 * 1024) {
3509			rc = smb3_simple_fallocate_range(xid, tcon, cfile,
3510							 off, len);
3511			goto out;
3512		}
3513
3514		/*
3515		 * Check if falloc starts within first few pages of file
3516		 * and ends within a few pages of the end of file to
3517		 * ensure that most of file is being forced to be
3518		 * fallocated now. If so then setting whole file sparse
3519		 * ie potentially making a few extra pages at the beginning
3520		 * or end of the file non-sparse via set_sparse is harmless.
3521		 */
3522		if ((off > 8192) || (off + len + 8192 < i_size_read(inode))) {
3523			rc = -EOPNOTSUPP;
3524			goto out;
3525		}
3526	}
3527
3528	smb2_set_sparse(xid, tcon, cfile, inode, false);
3529	rc = 0;
3530
3531out:
3532	if (rc)
3533		trace_smb3_falloc_err(xid, cfile->fid.persistent_fid, tcon->tid,
3534				tcon->ses->Suid, off, len, rc);
3535	else
3536		trace_smb3_falloc_done(xid, cfile->fid.persistent_fid, tcon->tid,
3537				tcon->ses->Suid, off, len);
3538
3539	free_xid(xid);
3540	return rc;
3541}
3542
3543static long smb3_collapse_range(struct file *file, struct cifs_tcon *tcon,
3544			    loff_t off, loff_t len)
3545{
3546	int rc;
3547	unsigned int xid;
3548	struct inode *inode = file_inode(file);
3549	struct cifsInodeInfo *cifsi = CIFS_I(inode);
3550	struct cifsFileInfo *cfile = file->private_data;
3551	struct netfs_inode *ictx = &cifsi->netfs;
3552	loff_t old_eof, new_eof;
3553
3554	xid = get_xid();
3555
3556	inode_lock(inode);
3557
3558	old_eof = i_size_read(inode);
3559	if ((off >= old_eof) ||
3560	    off + len >= old_eof) {
3561		rc = -EINVAL;
3562		goto out;
3563	}
3564
3565	filemap_invalidate_lock(inode->i_mapping);
3566	rc = filemap_write_and_wait_range(inode->i_mapping, off, old_eof - 1);
3567	if (rc < 0)
3568		goto out_2;
3569
3570	truncate_pagecache_range(inode, off, old_eof);
3571	ictx->zero_point = old_eof;
3572
3573	rc = smb2_copychunk_range(xid, cfile, cfile, off + len,
3574				  old_eof - off - len, off);
3575	if (rc < 0)
3576		goto out_2;
3577
3578	new_eof = old_eof - len;
3579	rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3580			  cfile->fid.volatile_fid, cfile->pid, new_eof);
3581	if (rc < 0)
3582		goto out_2;
3583
3584	rc = 0;
3585
3586	truncate_setsize(inode, new_eof);
3587	netfs_resize_file(&cifsi->netfs, new_eof, true);
3588	ictx->zero_point = new_eof;
3589	fscache_resize_cookie(cifs_inode_cookie(inode), new_eof);
3590out_2:
3591	filemap_invalidate_unlock(inode->i_mapping);
3592 out:
3593	inode_unlock(inode);
3594	free_xid(xid);
3595	return rc;
3596}
3597
3598static long smb3_insert_range(struct file *file, struct cifs_tcon *tcon,
3599			      loff_t off, loff_t len)
3600{
3601	int rc;
3602	unsigned int xid;
3603	struct cifsFileInfo *cfile = file->private_data;
3604	struct inode *inode = file_inode(file);
3605	struct cifsInodeInfo *cifsi = CIFS_I(inode);
3606	__u64 count, old_eof, new_eof;
3607
3608	xid = get_xid();
3609
3610	inode_lock(inode);
3611
3612	old_eof = i_size_read(inode);
3613	if (off >= old_eof) {
3614		rc = -EINVAL;
3615		goto out;
3616	}
3617
3618	count = old_eof - off;
3619	new_eof = old_eof + len;
3620
3621	filemap_invalidate_lock(inode->i_mapping);
3622	rc = filemap_write_and_wait_range(inode->i_mapping, off, new_eof - 1);
3623	if (rc < 0)
3624		goto out_2;
3625	truncate_pagecache_range(inode, off, old_eof);
3626
3627	rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
3628			  cfile->fid.volatile_fid, cfile->pid, new_eof);
3629	if (rc < 0)
3630		goto out_2;
3631
3632	truncate_setsize(inode, new_eof);
3633	netfs_resize_file(&cifsi->netfs, i_size_read(inode), true);
3634	fscache_resize_cookie(cifs_inode_cookie(inode), i_size_read(inode));
3635
3636	rc = smb2_copychunk_range(xid, cfile, cfile, off, count, off + len);
3637	if (rc < 0)
3638		goto out_2;
3639
3640	rc = smb3_zero_data(file, tcon, off, len, xid);
3641	if (rc < 0)
3642		goto out_2;
3643
3644	rc = 0;
3645out_2:
3646	filemap_invalidate_unlock(inode->i_mapping);
3647 out:
3648	inode_unlock(inode);
3649	free_xid(xid);
3650	return rc;
3651}
3652
3653static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offset, int whence)
3654{
3655	struct cifsFileInfo *wrcfile, *cfile = file->private_data;
3656	struct cifsInodeInfo *cifsi;
3657	struct inode *inode;
3658	int rc = 0;
3659	struct file_allocated_range_buffer in_data, *out_data = NULL;
3660	u32 out_data_len;
3661	unsigned int xid;
3662
3663	if (whence != SEEK_HOLE && whence != SEEK_DATA)
3664		return generic_file_llseek(file, offset, whence);
3665
3666	inode = d_inode(cfile->dentry);
3667	cifsi = CIFS_I(inode);
3668
3669	if (offset < 0 || offset >= i_size_read(inode))
3670		return -ENXIO;
3671
3672	xid = get_xid();
3673	/*
3674	 * We need to be sure that all dirty pages are written as they
3675	 * might fill holes on the server.
3676	 * Note that we also MUST flush any written pages since at least
3677	 * some servers (Windows2016) will not reflect recent writes in
3678	 * QUERY_ALLOCATED_RANGES until SMB2_flush is called.
3679	 */
3680	wrcfile = find_writable_file(cifsi, FIND_WR_ANY);
3681	if (wrcfile) {
3682		filemap_write_and_wait(inode->i_mapping);
3683		smb2_flush_file(xid, tcon, &wrcfile->fid);
3684		cifsFileInfo_put(wrcfile);
3685	}
3686
3687	if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE)) {
3688		if (whence == SEEK_HOLE)
3689			offset = i_size_read(inode);
3690		goto lseek_exit;
3691	}
3692
3693	in_data.file_offset = cpu_to_le64(offset);
3694	in_data.length = cpu_to_le64(i_size_read(inode));
3695
3696	rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3697			cfile->fid.volatile_fid,
3698			FSCTL_QUERY_ALLOCATED_RANGES,
3699			(char *)&in_data, sizeof(in_data),
3700			sizeof(struct file_allocated_range_buffer),
3701			(char **)&out_data, &out_data_len);
3702	if (rc == -E2BIG)
3703		rc = 0;
3704	if (rc)
3705		goto lseek_exit;
3706
3707	if (whence == SEEK_HOLE && out_data_len == 0)
3708		goto lseek_exit;
3709
3710	if (whence == SEEK_DATA && out_data_len == 0) {
3711		rc = -ENXIO;
3712		goto lseek_exit;
3713	}
3714
3715	if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
3716		rc = -EINVAL;
3717		goto lseek_exit;
3718	}
3719	if (whence == SEEK_DATA) {
3720		offset = le64_to_cpu(out_data->file_offset);
3721		goto lseek_exit;
3722	}
3723	if (offset < le64_to_cpu(out_data->file_offset))
3724		goto lseek_exit;
3725
3726	offset = le64_to_cpu(out_data->file_offset) + le64_to_cpu(out_data->length);
3727
3728 lseek_exit:
3729	free_xid(xid);
3730	kfree(out_data);
3731	if (!rc)
3732		return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3733	else
3734		return rc;
3735}
3736
3737static int smb3_fiemap(struct cifs_tcon *tcon,
3738		       struct cifsFileInfo *cfile,
3739		       struct fiemap_extent_info *fei, u64 start, u64 len)
3740{
3741	unsigned int xid;
3742	struct file_allocated_range_buffer in_data, *out_data;
3743	u32 out_data_len;
3744	int i, num, rc, flags, last_blob;
3745	u64 next;
3746
3747	rc = fiemap_prep(d_inode(cfile->dentry), fei, start, &len, 0);
3748	if (rc)
3749		return rc;
3750
3751	xid = get_xid();
3752 again:
3753	in_data.file_offset = cpu_to_le64(start);
3754	in_data.length = cpu_to_le64(len);
3755
3756	rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
3757			cfile->fid.volatile_fid,
3758			FSCTL_QUERY_ALLOCATED_RANGES,
3759			(char *)&in_data, sizeof(in_data),
3760			1024 * sizeof(struct file_allocated_range_buffer),
3761			(char **)&out_data, &out_data_len);
3762	if (rc == -E2BIG) {
3763		last_blob = 0;
3764		rc = 0;
3765	} else
3766		last_blob = 1;
3767	if (rc)
3768		goto out;
3769
3770	if (out_data_len && out_data_len < sizeof(struct file_allocated_range_buffer)) {
3771		rc = -EINVAL;
3772		goto out;
3773	}
3774	if (out_data_len % sizeof(struct file_allocated_range_buffer)) {
3775		rc = -EINVAL;
3776		goto out;
3777	}
3778
3779	num = out_data_len / sizeof(struct file_allocated_range_buffer);
3780	for (i = 0; i < num; i++) {
3781		flags = 0;
3782		if (i == num - 1 && last_blob)
3783			flags |= FIEMAP_EXTENT_LAST;
3784
3785		rc = fiemap_fill_next_extent(fei,
3786				le64_to_cpu(out_data[i].file_offset),
3787				le64_to_cpu(out_data[i].file_offset),
3788				le64_to_cpu(out_data[i].length),
3789				flags);
3790		if (rc < 0)
3791			goto out;
3792		if (rc == 1) {
3793			rc = 0;
3794			goto out;
3795		}
3796	}
3797
3798	if (!last_blob) {
3799		next = le64_to_cpu(out_data[num - 1].file_offset) +
3800		  le64_to_cpu(out_data[num - 1].length);
3801		len = len - (next - start);
3802		start = next;
3803		goto again;
3804	}
3805
3806 out:
3807	free_xid(xid);
3808	kfree(out_data);
3809	return rc;
3810}
3811
3812static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
3813			   loff_t off, loff_t len)
3814{
3815	/* KEEP_SIZE already checked for by do_fallocate */
3816	if (mode & FALLOC_FL_PUNCH_HOLE)
3817		return smb3_punch_hole(file, tcon, off, len);
3818	else if (mode & FALLOC_FL_ZERO_RANGE) {
3819		if (mode & FALLOC_FL_KEEP_SIZE)
3820			return smb3_zero_range(file, tcon, off, len, true);
3821		return smb3_zero_range(file, tcon, off, len, false);
3822	} else if (mode == FALLOC_FL_KEEP_SIZE)
3823		return smb3_simple_falloc(file, tcon, off, len, true);
3824	else if (mode == FALLOC_FL_COLLAPSE_RANGE)
3825		return smb3_collapse_range(file, tcon, off, len);
3826	else if (mode == FALLOC_FL_INSERT_RANGE)
3827		return smb3_insert_range(file, tcon, off, len);
3828	else if (mode == 0)
3829		return smb3_simple_falloc(file, tcon, off, len, false);
3830
3831	return -EOPNOTSUPP;
3832}
3833
3834static void
3835smb2_downgrade_oplock(struct TCP_Server_Info *server,
3836		      struct cifsInodeInfo *cinode, __u32 oplock,
3837		      unsigned int epoch, bool *purge_cache)
3838{
3839	server->ops->set_oplock_level(cinode, oplock, 0, NULL);
3840}
3841
3842static void
3843smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3844		       unsigned int epoch, bool *purge_cache);
3845
3846static void
3847smb3_downgrade_oplock(struct TCP_Server_Info *server,
3848		       struct cifsInodeInfo *cinode, __u32 oplock,
3849		       unsigned int epoch, bool *purge_cache)
3850{
3851	unsigned int old_state = cinode->oplock;
3852	unsigned int old_epoch = cinode->epoch;
3853	unsigned int new_state;
3854
3855	if (epoch > old_epoch) {
3856		smb21_set_oplock_level(cinode, oplock, 0, NULL);
3857		cinode->epoch = epoch;
3858	}
3859
3860	new_state = cinode->oplock;
3861	*purge_cache = false;
3862
3863	if ((old_state & CIFS_CACHE_READ_FLG) != 0 &&
3864	    (new_state & CIFS_CACHE_READ_FLG) == 0)
3865		*purge_cache = true;
3866	else if (old_state == new_state && (epoch - old_epoch > 1))
3867		*purge_cache = true;
3868}
3869
3870static void
3871smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3872		      unsigned int epoch, bool *purge_cache)
3873{
3874	oplock &= 0xFF;
3875	cinode->lease_granted = false;
3876	if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
3877		return;
3878	if (oplock == SMB2_OPLOCK_LEVEL_BATCH) {
3879		cinode->oplock = CIFS_CACHE_RHW_FLG;
3880		cifs_dbg(FYI, "Batch Oplock granted on inode %p\n",
3881			 &cinode->netfs.inode);
3882	} else if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
3883		cinode->oplock = CIFS_CACHE_RW_FLG;
3884		cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
3885			 &cinode->netfs.inode);
3886	} else if (oplock == SMB2_OPLOCK_LEVEL_II) {
3887		cinode->oplock = CIFS_CACHE_READ_FLG;
3888		cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
3889			 &cinode->netfs.inode);
3890	} else
3891		cinode->oplock = 0;
3892}
3893
3894static void
3895smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3896		       unsigned int epoch, bool *purge_cache)
3897{
3898	char message[5] = {0};
3899	unsigned int new_oplock = 0;
3900
3901	oplock &= 0xFF;
3902	cinode->lease_granted = true;
3903	if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
3904		return;
3905
3906	/* Check if the server granted an oplock rather than a lease */
3907	if (oplock & SMB2_OPLOCK_LEVEL_EXCLUSIVE)
3908		return smb2_set_oplock_level(cinode, oplock, epoch,
3909					     purge_cache);
3910
3911	if (oplock & SMB2_LEASE_READ_CACHING_HE) {
3912		new_oplock |= CIFS_CACHE_READ_FLG;
3913		strcat(message, "R");
3914	}
3915	if (oplock & SMB2_LEASE_HANDLE_CACHING_HE) {
3916		new_oplock |= CIFS_CACHE_HANDLE_FLG;
3917		strcat(message, "H");
3918	}
3919	if (oplock & SMB2_LEASE_WRITE_CACHING_HE) {
3920		new_oplock |= CIFS_CACHE_WRITE_FLG;
3921		strcat(message, "W");
3922	}
3923	if (!new_oplock)
3924		strscpy(message, "None");
3925
3926	cinode->oplock = new_oplock;
3927	cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
3928		 &cinode->netfs.inode);
3929}
3930
3931static void
3932smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
3933		      unsigned int epoch, bool *purge_cache)
3934{
3935	unsigned int old_oplock = cinode->oplock;
3936
3937	smb21_set_oplock_level(cinode, oplock, epoch, purge_cache);
3938
3939	if (purge_cache) {
3940		*purge_cache = false;
3941		if (old_oplock == CIFS_CACHE_READ_FLG) {
3942			if (cinode->oplock == CIFS_CACHE_READ_FLG &&
3943			    (epoch - cinode->epoch > 0))
3944				*purge_cache = true;
3945			else if (cinode->oplock == CIFS_CACHE_RH_FLG &&
3946				 (epoch - cinode->epoch > 1))
3947				*purge_cache = true;
3948			else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
3949				 (epoch - cinode->epoch > 1))
3950				*purge_cache = true;
3951			else if (cinode->oplock == 0 &&
3952				 (epoch - cinode->epoch > 0))
3953				*purge_cache = true;
3954		} else if (old_oplock == CIFS_CACHE_RH_FLG) {
3955			if (cinode->oplock == CIFS_CACHE_RH_FLG &&
3956			    (epoch - cinode->epoch > 0))
3957				*purge_cache = true;
3958			else if (cinode->oplock == CIFS_CACHE_RHW_FLG &&
3959				 (epoch - cinode->epoch > 1))
3960				*purge_cache = true;
3961		}
3962		cinode->epoch = epoch;
3963	}
3964}
3965
3966#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
3967static bool
3968smb2_is_read_op(__u32 oplock)
3969{
3970	return oplock == SMB2_OPLOCK_LEVEL_II;
3971}
3972#endif /* CIFS_ALLOW_INSECURE_LEGACY */
3973
3974static bool
3975smb21_is_read_op(__u32 oplock)
3976{
3977	return (oplock & SMB2_LEASE_READ_CACHING_HE) &&
3978	       !(oplock & SMB2_LEASE_WRITE_CACHING_HE);
3979}
3980
3981static __le32
3982map_oplock_to_lease(u8 oplock)
3983{
3984	if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE)
3985		return SMB2_LEASE_WRITE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE;
3986	else if (oplock == SMB2_OPLOCK_LEVEL_II)
3987		return SMB2_LEASE_READ_CACHING_LE;
3988	else if (oplock == SMB2_OPLOCK_LEVEL_BATCH)
3989		return SMB2_LEASE_HANDLE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE |
3990		       SMB2_LEASE_WRITE_CACHING_LE;
3991	return 0;
3992}
3993
3994static char *
3995smb2_create_lease_buf(u8 *lease_key, u8 oplock)
3996{
3997	struct create_lease *buf;
3998
3999	buf = kzalloc(sizeof(struct create_lease), GFP_KERNEL);
4000	if (!buf)
4001		return NULL;
4002
4003	memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
4004	buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
4005
4006	buf->ccontext.DataOffset = cpu_to_le16(offsetof
4007					(struct create_lease, lcontext));
4008	buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context));
4009	buf->ccontext.NameOffset = cpu_to_le16(offsetof
4010				(struct create_lease, Name));
4011	buf->ccontext.NameLength = cpu_to_le16(4);
4012	/* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
4013	buf->Name[0] = 'R';
4014	buf->Name[1] = 'q';
4015	buf->Name[2] = 'L';
4016	buf->Name[3] = 's';
4017	return (char *)buf;
4018}
4019
4020static char *
4021smb3_create_lease_buf(u8 *lease_key, u8 oplock)
4022{
4023	struct create_lease_v2 *buf;
4024
4025	buf = kzalloc(sizeof(struct create_lease_v2), GFP_KERNEL);
4026	if (!buf)
4027		return NULL;
4028
4029	memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
4030	buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
4031
4032	buf->ccontext.DataOffset = cpu_to_le16(offsetof
4033					(struct create_lease_v2, lcontext));
4034	buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2));
4035	buf->ccontext.NameOffset = cpu_to_le16(offsetof
4036				(struct create_lease_v2, Name));
4037	buf->ccontext.NameLength = cpu_to_le16(4);
4038	/* SMB2_CREATE_REQUEST_LEASE is "RqLs" */
4039	buf->Name[0] = 'R';
4040	buf->Name[1] = 'q';
4041	buf->Name[2] = 'L';
4042	buf->Name[3] = 's';
4043	return (char *)buf;
4044}
4045
4046static __u8
4047smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
4048{
4049	struct create_lease *lc = (struct create_lease *)buf;
4050
4051	*epoch = 0; /* not used */
4052	if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE)
4053		return SMB2_OPLOCK_LEVEL_NOCHANGE;
4054	return le32_to_cpu(lc->lcontext.LeaseState);
4055}
4056
4057static __u8
4058smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
4059{
4060	struct create_lease_v2 *lc = (struct create_lease_v2 *)buf;
4061
4062	*epoch = le16_to_cpu(lc->lcontext.Epoch);
4063	if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE)
4064		return SMB2_OPLOCK_LEVEL_NOCHANGE;
4065	if (lease_key)
4066		memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
4067	return le32_to_cpu(lc->lcontext.LeaseState);
4068}
4069
4070static unsigned int
4071smb2_wp_retry_size(struct inode *inode)
4072{
4073	return min_t(unsigned int, CIFS_SB(inode->i_sb)->ctx->wsize,
4074		     SMB2_MAX_BUFFER_SIZE);
4075}
4076
4077static bool
4078smb2_dir_needs_close(struct cifsFileInfo *cfile)
4079{
4080	return !cfile->invalidHandle;
4081}
4082
4083static void
4084fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
4085		   struct smb_rqst *old_rq, __le16 cipher_type)
4086{
4087	struct smb2_hdr *shdr =
4088			(struct smb2_hdr *)old_rq->rq_iov[0].iov_base;
4089
4090	memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr));
4091	tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
4092	tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len);
4093	tr_hdr->Flags = cpu_to_le16(0x01);
4094	if ((cipher_type == SMB2_ENCRYPTION_AES128_GCM) ||
4095	    (cipher_type == SMB2_ENCRYPTION_AES256_GCM))
4096		get_random_bytes(&tr_hdr->Nonce, SMB3_AES_GCM_NONCE);
4097	else
4098		get_random_bytes(&tr_hdr->Nonce, SMB3_AES_CCM_NONCE);
4099	memcpy(&tr_hdr->SessionId, &shdr->SessionId, 8);
4100}
4101
4102static void *smb2_aead_req_alloc(struct crypto_aead *tfm, const struct smb_rqst *rqst,
4103				 int num_rqst, const u8 *sig, u8 **iv,
4104				 struct aead_request **req, struct sg_table *sgt,
4105				 unsigned int *num_sgs, size_t *sensitive_size)
4106{
4107	unsigned int req_size = sizeof(**req) + crypto_aead_reqsize(tfm);
4108	unsigned int iv_size = crypto_aead_ivsize(tfm);
4109	unsigned int len;
4110	u8 *p;
4111
4112	*num_sgs = cifs_get_num_sgs(rqst, num_rqst, sig);
4113	if (IS_ERR_VALUE((long)(int)*num_sgs))
4114		return ERR_PTR(*num_sgs);
4115
4116	len = iv_size;
4117	len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1);
4118	len = ALIGN(len, crypto_tfm_ctx_alignment());
4119	len += req_size;
4120	len = ALIGN(len, __alignof__(struct scatterlist));
4121	len += array_size(*num_sgs, sizeof(struct scatterlist));
4122	*sensitive_size = len;
4123
4124	p = kvzalloc(len, GFP_NOFS);
4125	if (!p)
4126		return ERR_PTR(-ENOMEM);
4127
4128	*iv = (u8 *)PTR_ALIGN(p, crypto_aead_alignmask(tfm) + 1);
4129	*req = (struct aead_request *)PTR_ALIGN(*iv + iv_size,
4130						crypto_tfm_ctx_alignment());
4131	sgt->sgl = (struct scatterlist *)PTR_ALIGN((u8 *)*req + req_size,
4132						   __alignof__(struct scatterlist));
4133	return p;
4134}
4135
4136static void *smb2_get_aead_req(struct crypto_aead *tfm, struct smb_rqst *rqst,
4137			       int num_rqst, const u8 *sig, u8 **iv,
4138			       struct aead_request **req, struct scatterlist **sgl,
4139			       size_t *sensitive_size)
4140{
4141	struct sg_table sgtable = {};
4142	unsigned int skip, num_sgs, i, j;
4143	ssize_t rc;
4144	void *p;
4145
4146	p = smb2_aead_req_alloc(tfm, rqst, num_rqst, sig, iv, req, &sgtable,
4147				&num_sgs, sensitive_size);
4148	if (IS_ERR(p))
4149		return ERR_CAST(p);
4150
4151	sg_init_marker(sgtable.sgl, num_sgs);
4152
4153	/*
4154	 * The first rqst has a transform header where the
4155	 * first 20 bytes are not part of the encrypted blob.
4156	 */
4157	skip = 20;
4158
4159	for (i = 0; i < num_rqst; i++) {
4160		struct iov_iter *iter = &rqst[i].rq_iter;
4161		size_t count = iov_iter_count(iter);
4162
4163		for (j = 0; j < rqst[i].rq_nvec; j++) {
4164			cifs_sg_set_buf(&sgtable,
4165					rqst[i].rq_iov[j].iov_base + skip,
4166					rqst[i].rq_iov[j].iov_len - skip);
4167
4168			/* See the above comment on the 'skip' assignment */
4169			skip = 0;
4170		}
4171		sgtable.orig_nents = sgtable.nents;
4172
4173		rc = extract_iter_to_sg(iter, count, &sgtable,
4174					num_sgs - sgtable.nents, 0);
4175		iov_iter_revert(iter, rc);
4176		sgtable.orig_nents = sgtable.nents;
4177	}
4178
4179	cifs_sg_set_buf(&sgtable, sig, SMB2_SIGNATURE_SIZE);
4180	sg_mark_end(&sgtable.sgl[sgtable.nents - 1]);
4181	*sgl = sgtable.sgl;
4182	return p;
4183}
4184
4185static int
4186smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
4187{
4188	struct TCP_Server_Info *pserver;
4189	struct cifs_ses *ses;
4190	u8 *ses_enc_key;
4191
4192	/* If server is a channel, select the primary channel */
4193	pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
4194
4195	spin_lock(&cifs_tcp_ses_lock);
4196	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
4197		if (ses->Suid == ses_id) {
4198			spin_lock(&ses->ses_lock);
4199			ses_enc_key = enc ? ses->smb3encryptionkey :
4200				ses->smb3decryptionkey;
4201			memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE);
4202			spin_unlock(&ses->ses_lock);
4203			spin_unlock(&cifs_tcp_ses_lock);
4204			return 0;
4205		}
4206	}
4207	spin_unlock(&cifs_tcp_ses_lock);
4208
4209	trace_smb3_ses_not_found(ses_id);
4210
4211	return -EAGAIN;
4212}
4213/*
4214 * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
4215 * iov[0]   - transform header (associate data),
4216 * iov[1-N] - SMB2 header and pages - data to encrypt.
4217 * On success return encrypted data in iov[1-N] and pages, leave iov[0]
4218 * untouched.
4219 */
4220static int
4221crypt_message(struct TCP_Server_Info *server, int num_rqst,
4222	      struct smb_rqst *rqst, int enc)
4223{
4224	struct smb2_transform_hdr *tr_hdr =
4225		(struct smb2_transform_hdr *)rqst[0].rq_iov[0].iov_base;
4226	unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
4227	int rc = 0;
4228	struct scatterlist *sg;
4229	u8 sign[SMB2_SIGNATURE_SIZE] = {};
4230	u8 key[SMB3_ENC_DEC_KEY_SIZE];
4231	struct aead_request *req;
4232	u8 *iv;
4233	DECLARE_CRYPTO_WAIT(wait);
4234	struct crypto_aead *tfm;
4235	unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
4236	void *creq;
4237	size_t sensitive_size;
4238
4239	rc = smb2_get_enc_key(server, le64_to_cpu(tr_hdr->SessionId), enc, key);
4240	if (rc) {
4241		cifs_server_dbg(FYI, "%s: Could not get %scryption key. sid: 0x%llx\n", __func__,
4242			 enc ? "en" : "de", le64_to_cpu(tr_hdr->SessionId));
4243		return rc;
4244	}
4245
4246	rc = smb3_crypto_aead_allocate(server);
4247	if (rc) {
4248		cifs_server_dbg(VFS, "%s: crypto alloc failed\n", __func__);
4249		return rc;
4250	}
4251
4252	tfm = enc ? server->secmech.enc : server->secmech.dec;
4253
4254	if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
4255		(server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
4256		rc = crypto_aead_setkey(tfm, key, SMB3_GCM256_CRYPTKEY_SIZE);
4257	else
4258		rc = crypto_aead_setkey(tfm, key, SMB3_GCM128_CRYPTKEY_SIZE);
4259
4260	if (rc) {
4261		cifs_server_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc);
4262		return rc;
4263	}
4264
4265	rc = crypto_aead_setauthsize(tfm, SMB2_SIGNATURE_SIZE);
4266	if (rc) {
4267		cifs_server_dbg(VFS, "%s: Failed to set authsize %d\n", __func__, rc);
4268		return rc;
4269	}
4270
4271	creq = smb2_get_aead_req(tfm, rqst, num_rqst, sign, &iv, &req, &sg,
4272				 &sensitive_size);
4273	if (IS_ERR(creq))
4274		return PTR_ERR(creq);
4275
4276	if (!enc) {
4277		memcpy(sign, &tr_hdr->Signature, SMB2_SIGNATURE_SIZE);
4278		crypt_len += SMB2_SIGNATURE_SIZE;
4279	}
4280
4281	if ((server->cipher_type == SMB2_ENCRYPTION_AES128_GCM) ||
4282	    (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
4283		memcpy(iv, (char *)tr_hdr->Nonce, SMB3_AES_GCM_NONCE);
4284	else {
4285		iv[0] = 3;
4286		memcpy(iv + 1, (char *)tr_hdr->Nonce, SMB3_AES_CCM_NONCE);
4287	}
4288
4289	aead_request_set_tfm(req, tfm);
4290	aead_request_set_crypt(req, sg, sg, crypt_len, iv);
4291	aead_request_set_ad(req, assoc_data_len);
4292
4293	aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
4294				  crypto_req_done, &wait);
4295
4296	rc = crypto_wait_req(enc ? crypto_aead_encrypt(req)
4297				: crypto_aead_decrypt(req), &wait);
4298
4299	if (!rc && enc)
4300		memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
4301
4302	kvfree_sensitive(creq, sensitive_size);
4303	return rc;
4304}
4305
4306/*
4307 * Clear a read buffer, discarding the folios which have XA_MARK_0 set.
4308 */
4309static void cifs_clear_xarray_buffer(struct xarray *buffer)
4310{
4311	struct folio *folio;
4312
4313	XA_STATE(xas, buffer, 0);
4314
4315	rcu_read_lock();
4316	xas_for_each_marked(&xas, folio, ULONG_MAX, XA_MARK_0) {
4317		folio_put(folio);
4318	}
4319	rcu_read_unlock();
4320	xa_destroy(buffer);
4321}
4322
4323void
4324smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst)
4325{
4326	int i;
4327
4328	for (i = 0; i < num_rqst; i++)
4329		if (!xa_empty(&rqst[i].rq_buffer))
4330			cifs_clear_xarray_buffer(&rqst[i].rq_buffer);
4331}
4332
4333/*
4334 * This function will initialize new_rq and encrypt the content.
4335 * The first entry, new_rq[0], only contains a single iov which contains
4336 * a smb2_transform_hdr and is pre-allocated by the caller.
4337 * This function then populates new_rq[1+] with the content from olq_rq[0+].
4338 *
4339 * The end result is an array of smb_rqst structures where the first structure
4340 * only contains a single iov for the transform header which we then can pass
4341 * to crypt_message().
4342 *
4343 * new_rq[0].rq_iov[0] :  smb2_transform_hdr pre-allocated by the caller
4344 * new_rq[1+].rq_iov[*] == old_rq[0+].rq_iov[*] : SMB2/3 requests
4345 */
4346static int
4347smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
4348		       struct smb_rqst *new_rq, struct smb_rqst *old_rq)
4349{
4350	struct smb2_transform_hdr *tr_hdr = new_rq[0].rq_iov[0].iov_base;
4351	struct page *page;
4352	unsigned int orig_len = 0;
4353	int i, j;
4354	int rc = -ENOMEM;
4355
4356	for (i = 1; i < num_rqst; i++) {
4357		struct smb_rqst *old = &old_rq[i - 1];
4358		struct smb_rqst *new = &new_rq[i];
4359		struct xarray *buffer = &new->rq_buffer;
4360		size_t size = iov_iter_count(&old->rq_iter), seg, copied = 0;
4361
4362		orig_len += smb_rqst_len(server, old);
4363		new->rq_iov = old->rq_iov;
4364		new->rq_nvec = old->rq_nvec;
4365
4366		xa_init(buffer);
4367
4368		if (size > 0) {
4369			unsigned int npages = DIV_ROUND_UP(size, PAGE_SIZE);
4370
4371			for (j = 0; j < npages; j++) {
4372				void *o;
4373
4374				rc = -ENOMEM;
4375				page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
4376				if (!page)
4377					goto err_free;
4378				page->index = j;
4379				o = xa_store(buffer, j, page, GFP_KERNEL);
4380				if (xa_is_err(o)) {
4381					rc = xa_err(o);
4382					put_page(page);
4383					goto err_free;
4384				}
4385
4386				xa_set_mark(buffer, j, XA_MARK_0);
4387
4388				seg = min_t(size_t, size - copied, PAGE_SIZE);
4389				if (copy_page_from_iter(page, 0, seg, &old->rq_iter) != seg) {
4390					rc = -EFAULT;
4391					goto err_free;
4392				}
4393				copied += seg;
4394			}
4395			iov_iter_xarray(&new->rq_iter, ITER_SOURCE,
4396					buffer, 0, size);
4397			new->rq_iter_size = size;
4398		}
4399	}
4400
4401	/* fill the 1st iov with a transform header */
4402	fill_transform_hdr(tr_hdr, orig_len, old_rq, server->cipher_type);
4403
4404	rc = crypt_message(server, num_rqst, new_rq, 1);
4405	cifs_dbg(FYI, "Encrypt message returned %d\n", rc);
4406	if (rc)
4407		goto err_free;
4408
4409	return rc;
4410
4411err_free:
4412	smb3_free_compound_rqst(num_rqst - 1, &new_rq[1]);
4413	return rc;
4414}
4415
4416static int
4417smb3_is_transform_hdr(void *buf)
4418{
4419	struct smb2_transform_hdr *trhdr = buf;
4420
4421	return trhdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM;
4422}
4423
4424static int
4425decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
4426		 unsigned int buf_data_size, struct iov_iter *iter,
4427		 bool is_offloaded)
4428{
4429	struct kvec iov[2];
4430	struct smb_rqst rqst = {NULL};
4431	size_t iter_size = 0;
4432	int rc;
4433
4434	iov[0].iov_base = buf;
4435	iov[0].iov_len = sizeof(struct smb2_transform_hdr);
4436	iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
4437	iov[1].iov_len = buf_data_size;
4438
4439	rqst.rq_iov = iov;
4440	rqst.rq_nvec = 2;
4441	if (iter) {
4442		rqst.rq_iter = *iter;
4443		rqst.rq_iter_size = iov_iter_count(iter);
4444		iter_size = iov_iter_count(iter);
4445	}
4446
4447	rc = crypt_message(server, 1, &rqst, 0);
4448	cifs_dbg(FYI, "Decrypt message returned %d\n", rc);
4449
4450	if (rc)
4451		return rc;
4452
4453	memmove(buf, iov[1].iov_base, buf_data_size);
4454
4455	if (!is_offloaded)
4456		server->total_read = buf_data_size + iter_size;
4457
4458	return rc;
4459}
4460
4461static int
4462cifs_copy_pages_to_iter(struct xarray *pages, unsigned int data_size,
4463			unsigned int skip, struct iov_iter *iter)
4464{
4465	struct page *page;
4466	unsigned long index;
4467
4468	xa_for_each(pages, index, page) {
4469		size_t n, len = min_t(unsigned int, PAGE_SIZE - skip, data_size);
4470
4471		n = copy_page_to_iter(page, skip, len, iter);
4472		if (n != len) {
4473			cifs_dbg(VFS, "%s: something went wrong\n", __func__);
4474			return -EIO;
4475		}
4476		data_size -= n;
4477		skip = 0;
4478	}
4479
4480	return 0;
4481}
4482
4483static int
4484handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
4485		 char *buf, unsigned int buf_len, struct xarray *pages,
4486		 unsigned int pages_len, bool is_offloaded)
4487{
4488	unsigned int data_offset;
4489	unsigned int data_len;
4490	unsigned int cur_off;
4491	unsigned int cur_page_idx;
4492	unsigned int pad_len;
4493	struct cifs_readdata *rdata = mid->callback_data;
4494	struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
4495	int length;
4496	bool use_rdma_mr = false;
4497
4498	if (shdr->Command != SMB2_READ) {
4499		cifs_server_dbg(VFS, "only big read responses are supported\n");
4500		return -EOPNOTSUPP;
4501	}
4502
4503	if (server->ops->is_session_expired &&
4504	    server->ops->is_session_expired(buf)) {
4505		if (!is_offloaded)
4506			cifs_reconnect(server, true);
4507		return -1;
4508	}
4509
4510	if (server->ops->is_status_pending &&
4511			server->ops->is_status_pending(buf, server))
4512		return -1;
4513
4514	/* set up first two iov to get credits */
4515	rdata->iov[0].iov_base = buf;
4516	rdata->iov[0].iov_len = 0;
4517	rdata->iov[1].iov_base = buf;
4518	rdata->iov[1].iov_len =
4519		min_t(unsigned int, buf_len, server->vals->read_rsp_size);
4520	cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
4521		 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
4522	cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
4523		 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
4524
4525	rdata->result = server->ops->map_error(buf, true);
4526	if (rdata->result != 0) {
4527		cifs_dbg(FYI, "%s: server returned error %d\n",
4528			 __func__, rdata->result);
4529		/* normal error on read response */
4530		if (is_offloaded)
4531			mid->mid_state = MID_RESPONSE_RECEIVED;
4532		else
4533			dequeue_mid(mid, false);
4534		return 0;
4535	}
4536
4537	data_offset = server->ops->read_data_offset(buf);
4538#ifdef CONFIG_CIFS_SMB_DIRECT
4539	use_rdma_mr = rdata->mr;
4540#endif
4541	data_len = server->ops->read_data_length(buf, use_rdma_mr);
4542
4543	if (data_offset < server->vals->read_rsp_size) {
4544		/*
4545		 * win2k8 sometimes sends an offset of 0 when the read
4546		 * is beyond the EOF. Treat it as if the data starts just after
4547		 * the header.
4548		 */
4549		cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
4550			 __func__, data_offset);
4551		data_offset = server->vals->read_rsp_size;
4552	} else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
4553		/* data_offset is beyond the end of smallbuf */
4554		cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
4555			 __func__, data_offset);
4556		rdata->result = -EIO;
4557		if (is_offloaded)
4558			mid->mid_state = MID_RESPONSE_MALFORMED;
4559		else
4560			dequeue_mid(mid, rdata->result);
4561		return 0;
4562	}
4563
4564	pad_len = data_offset - server->vals->read_rsp_size;
4565
4566	if (buf_len <= data_offset) {
4567		/* read response payload is in pages */
4568		cur_page_idx = pad_len / PAGE_SIZE;
4569		cur_off = pad_len % PAGE_SIZE;
4570
4571		if (cur_page_idx != 0) {
4572			/* data offset is beyond the 1st page of response */
4573			cifs_dbg(FYI, "%s: data offset (%u) beyond 1st page of response\n",
4574				 __func__, data_offset);
4575			rdata->result = -EIO;
4576			if (is_offloaded)
4577				mid->mid_state = MID_RESPONSE_MALFORMED;
4578			else
4579				dequeue_mid(mid, rdata->result);
4580			return 0;
4581		}
4582
4583		if (data_len > pages_len - pad_len) {
4584			/* data_len is corrupt -- discard frame */
4585			rdata->result = -EIO;
4586			if (is_offloaded)
4587				mid->mid_state = MID_RESPONSE_MALFORMED;
4588			else
4589				dequeue_mid(mid, rdata->result);
4590			return 0;
4591		}
4592
4593		/* Copy the data to the output I/O iterator. */
4594		rdata->result = cifs_copy_pages_to_iter(pages, pages_len,
4595							cur_off, &rdata->iter);
4596		if (rdata->result != 0) {
4597			if (is_offloaded)
4598				mid->mid_state = MID_RESPONSE_MALFORMED;
4599			else
4600				dequeue_mid(mid, rdata->result);
4601			return 0;
4602		}
4603		rdata->got_bytes = pages_len;
4604
4605	} else if (buf_len >= data_offset + data_len) {
4606		/* read response payload is in buf */
4607		WARN_ONCE(pages && !xa_empty(pages),
4608			  "read data can be either in buf or in pages");
4609		length = copy_to_iter(buf + data_offset, data_len, &rdata->iter);
4610		if (length < 0)
4611			return length;
4612		rdata->got_bytes = data_len;
4613	} else {
4614		/* read response payload cannot be in both buf and pages */
4615		WARN_ONCE(1, "buf can not contain only a part of read data");
4616		rdata->result = -EIO;
4617		if (is_offloaded)
4618			mid->mid_state = MID_RESPONSE_MALFORMED;
4619		else
4620			dequeue_mid(mid, rdata->result);
4621		return 0;
4622	}
4623
4624	if (is_offloaded)
4625		mid->mid_state = MID_RESPONSE_RECEIVED;
4626	else
4627		dequeue_mid(mid, false);
4628	return 0;
4629}
4630
4631struct smb2_decrypt_work {
4632	struct work_struct decrypt;
4633	struct TCP_Server_Info *server;
4634	struct xarray buffer;
4635	char *buf;
4636	unsigned int len;
4637};
4638
4639
4640static void smb2_decrypt_offload(struct work_struct *work)
4641{
4642	struct smb2_decrypt_work *dw = container_of(work,
4643				struct smb2_decrypt_work, decrypt);
4644	int rc;
4645	struct mid_q_entry *mid;
4646	struct iov_iter iter;
4647
4648	iov_iter_xarray(&iter, ITER_DEST, &dw->buffer, 0, dw->len);
4649	rc = decrypt_raw_data(dw->server, dw->buf, dw->server->vals->read_rsp_size,
4650			      &iter, true);
4651	if (rc) {
4652		cifs_dbg(VFS, "error decrypting rc=%d\n", rc);
4653		goto free_pages;
4654	}
4655
4656	dw->server->lstrp = jiffies;
4657	mid = smb2_find_dequeue_mid(dw->server, dw->buf);
4658	if (mid == NULL)
4659		cifs_dbg(FYI, "mid not found\n");
4660	else {
4661		mid->decrypted = true;
4662		rc = handle_read_data(dw->server, mid, dw->buf,
4663				      dw->server->vals->read_rsp_size,
4664				      &dw->buffer, dw->len,
4665				      true);
4666		if (rc >= 0) {
4667#ifdef CONFIG_CIFS_STATS2
4668			mid->when_received = jiffies;
4669#endif
4670			if (dw->server->ops->is_network_name_deleted)
4671				dw->server->ops->is_network_name_deleted(dw->buf,
4672									 dw->server);
4673
4674			mid->callback(mid);
4675		} else {
4676			spin_lock(&dw->server->srv_lock);
4677			if (dw->server->tcpStatus == CifsNeedReconnect) {
4678				spin_lock(&dw->server->mid_lock);
4679				mid->mid_state = MID_RETRY_NEEDED;
4680				spin_unlock(&dw->server->mid_lock);
4681				spin_unlock(&dw->server->srv_lock);
4682				mid->callback(mid);
4683			} else {
4684				spin_lock(&dw->server->mid_lock);
4685				mid->mid_state = MID_REQUEST_SUBMITTED;
4686				mid->mid_flags &= ~(MID_DELETED);
4687				list_add_tail(&mid->qhead,
4688					&dw->server->pending_mid_q);
4689				spin_unlock(&dw->server->mid_lock);
4690				spin_unlock(&dw->server->srv_lock);
4691			}
4692		}
4693		release_mid(mid);
4694	}
4695
4696free_pages:
4697	cifs_clear_xarray_buffer(&dw->buffer);
4698	cifs_small_buf_release(dw->buf);
4699	kfree(dw);
4700}
4701
4702
4703static int
4704receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
4705		       int *num_mids)
4706{
4707	struct page *page;
4708	char *buf = server->smallbuf;
4709	struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
4710	struct iov_iter iter;
4711	unsigned int len, npages;
4712	unsigned int buflen = server->pdu_size;
4713	int rc;
4714	int i = 0;
4715	struct smb2_decrypt_work *dw;
4716
4717	dw = kzalloc(sizeof(struct smb2_decrypt_work), GFP_KERNEL);
4718	if (!dw)
4719		return -ENOMEM;
4720	xa_init(&dw->buffer);
4721	INIT_WORK(&dw->decrypt, smb2_decrypt_offload);
4722	dw->server = server;
4723
4724	*num_mids = 1;
4725	len = min_t(unsigned int, buflen, server->vals->read_rsp_size +
4726		sizeof(struct smb2_transform_hdr)) - HEADER_SIZE(server) + 1;
4727
4728	rc = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1, len);
4729	if (rc < 0)
4730		goto free_dw;
4731	server->total_read += rc;
4732
4733	len = le32_to_cpu(tr_hdr->OriginalMessageSize) -
4734		server->vals->read_rsp_size;
4735	dw->len = len;
4736	npages = DIV_ROUND_UP(len, PAGE_SIZE);
4737
4738	rc = -ENOMEM;
4739	for (; i < npages; i++) {
4740		void *old;
4741
4742		page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
4743		if (!page)
4744			goto discard_data;
4745		page->index = i;
4746		old = xa_store(&dw->buffer, i, page, GFP_KERNEL);
4747		if (xa_is_err(old)) {
4748			rc = xa_err(old);
4749			put_page(page);
4750			goto discard_data;
4751		}
4752		xa_set_mark(&dw->buffer, i, XA_MARK_0);
4753	}
4754
4755	iov_iter_xarray(&iter, ITER_DEST, &dw->buffer, 0, npages * PAGE_SIZE);
4756
4757	/* Read the data into the buffer and clear excess bufferage. */
4758	rc = cifs_read_iter_from_socket(server, &iter, dw->len);
4759	if (rc < 0)
4760		goto discard_data;
4761
4762	server->total_read += rc;
4763	if (rc < npages * PAGE_SIZE)
4764		iov_iter_zero(npages * PAGE_SIZE - rc, &iter);
4765	iov_iter_revert(&iter, npages * PAGE_SIZE);
4766	iov_iter_truncate(&iter, dw->len);
4767
4768	rc = cifs_discard_remaining_data(server);
4769	if (rc)
4770		goto free_pages;
4771
4772	/*
4773	 * For large reads, offload to different thread for better performance,
4774	 * use more cores decrypting which can be expensive
4775	 */
4776
4777	if ((server->min_offload) && (server->in_flight > 1) &&
4778	    (server->pdu_size >= server->min_offload)) {
4779		dw->buf = server->smallbuf;
4780		server->smallbuf = (char *)cifs_small_buf_get();
4781
4782		queue_work(decrypt_wq, &dw->decrypt);
4783		*num_mids = 0; /* worker thread takes care of finding mid */
4784		return -1;
4785	}
4786
4787	rc = decrypt_raw_data(server, buf, server->vals->read_rsp_size,
4788			      &iter, false);
4789	if (rc)
4790		goto free_pages;
4791
4792	*mid = smb2_find_mid(server, buf);
4793	if (*mid == NULL) {
4794		cifs_dbg(FYI, "mid not found\n");
4795	} else {
4796		cifs_dbg(FYI, "mid found\n");
4797		(*mid)->decrypted = true;
4798		rc = handle_read_data(server, *mid, buf,
4799				      server->vals->read_rsp_size,
4800				      &dw->buffer, dw->len, false);
4801		if (rc >= 0) {
4802			if (server->ops->is_network_name_deleted) {
4803				server->ops->is_network_name_deleted(buf,
4804								server);
4805			}
4806		}
4807	}
4808
4809free_pages:
4810	cifs_clear_xarray_buffer(&dw->buffer);
4811free_dw:
4812	kfree(dw);
4813	return rc;
4814discard_data:
4815	cifs_discard_remaining_data(server);
4816	goto free_pages;
4817}
4818
4819static int
4820receive_encrypted_standard(struct TCP_Server_Info *server,
4821			   struct mid_q_entry **mids, char **bufs,
4822			   int *num_mids)
4823{
4824	int ret, length;
4825	char *buf = server->smallbuf;
4826	struct smb2_hdr *shdr;
4827	unsigned int pdu_length = server->pdu_size;
4828	unsigned int buf_size;
4829	unsigned int next_cmd;
4830	struct mid_q_entry *mid_entry;
4831	int next_is_large;
4832	char *next_buffer = NULL;
4833
4834	*num_mids = 0;
4835
4836	/* switch to large buffer if too big for a small one */
4837	if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE) {
4838		server->large_buf = true;
4839		memcpy(server->bigbuf, buf, server->total_read);
4840		buf = server->bigbuf;
4841	}
4842
4843	/* now read the rest */
4844	length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
4845				pdu_length - HEADER_SIZE(server) + 1);
4846	if (length < 0)
4847		return length;
4848	server->total_read += length;
4849
4850	buf_size = pdu_length - sizeof(struct smb2_transform_hdr);
4851	length = decrypt_raw_data(server, buf, buf_size, NULL, false);
4852	if (length)
4853		return length;
4854
4855	next_is_large = server->large_buf;
4856one_more:
4857	shdr = (struct smb2_hdr *)buf;
4858	next_cmd = le32_to_cpu(shdr->NextCommand);
4859	if (next_cmd) {
4860		if (WARN_ON_ONCE(next_cmd > pdu_length))
4861			return -1;
4862		if (next_is_large)
4863			next_buffer = (char *)cifs_buf_get();
4864		else
4865			next_buffer = (char *)cifs_small_buf_get();
4866		memcpy(next_buffer, buf + next_cmd, pdu_length - next_cmd);
4867	}
4868
4869	mid_entry = smb2_find_mid(server, buf);
4870	if (mid_entry == NULL)
4871		cifs_dbg(FYI, "mid not found\n");
4872	else {
4873		cifs_dbg(FYI, "mid found\n");
4874		mid_entry->decrypted = true;
4875		mid_entry->resp_buf_size = server->pdu_size;
4876	}
4877
4878	if (*num_mids >= MAX_COMPOUND) {
4879		cifs_server_dbg(VFS, "too many PDUs in compound\n");
4880		return -1;
4881	}
4882	bufs[*num_mids] = buf;
4883	mids[(*num_mids)++] = mid_entry;
4884
4885	if (mid_entry && mid_entry->handle)
4886		ret = mid_entry->handle(server, mid_entry);
4887	else
4888		ret = cifs_handle_standard(server, mid_entry);
4889
4890	if (ret == 0 && next_cmd) {
4891		pdu_length -= next_cmd;
4892		server->large_buf = next_is_large;
4893		if (next_is_large)
4894			server->bigbuf = buf = next_buffer;
4895		else
4896			server->smallbuf = buf = next_buffer;
4897		goto one_more;
4898	} else if (ret != 0) {
4899		/*
4900		 * ret != 0 here means that we didn't get to handle_mid() thus
4901		 * server->smallbuf and server->bigbuf are still valid. We need
4902		 * to free next_buffer because it is not going to be used
4903		 * anywhere.
4904		 */
4905		if (next_is_large)
4906			free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer);
4907		else
4908			free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer);
4909	}
4910
4911	return ret;
4912}
4913
4914static int
4915smb3_receive_transform(struct TCP_Server_Info *server,
4916		       struct mid_q_entry **mids, char **bufs, int *num_mids)
4917{
4918	char *buf = server->smallbuf;
4919	unsigned int pdu_length = server->pdu_size;
4920	struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
4921	unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
4922
4923	if (pdu_length < sizeof(struct smb2_transform_hdr) +
4924						sizeof(struct smb2_hdr)) {
4925		cifs_server_dbg(VFS, "Transform message is too small (%u)\n",
4926			 pdu_length);
4927		cifs_reconnect(server, true);
4928		return -ECONNABORTED;
4929	}
4930
4931	if (pdu_length < orig_len + sizeof(struct smb2_transform_hdr)) {
4932		cifs_server_dbg(VFS, "Transform message is broken\n");
4933		cifs_reconnect(server, true);
4934		return -ECONNABORTED;
4935	}
4936
4937	/* TODO: add support for compounds containing READ. */
4938	if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) {
4939		return receive_encrypted_read(server, &mids[0], num_mids);
4940	}
4941
4942	return receive_encrypted_standard(server, mids, bufs, num_mids);
4943}
4944
4945int
4946smb3_handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid)
4947{
4948	char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
4949
4950	return handle_read_data(server, mid, buf, server->pdu_size,
4951				NULL, 0, false);
4952}
4953
4954static int smb2_next_header(struct TCP_Server_Info *server, char *buf,
4955			    unsigned int *noff)
4956{
4957	struct smb2_hdr *hdr = (struct smb2_hdr *)buf;
4958	struct smb2_transform_hdr *t_hdr = (struct smb2_transform_hdr *)buf;
4959
4960	if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
4961		*noff = le32_to_cpu(t_hdr->OriginalMessageSize);
4962		if (unlikely(check_add_overflow(*noff, sizeof(*t_hdr), noff)))
4963			return -EINVAL;
4964	} else {
4965		*noff = le32_to_cpu(hdr->NextCommand);
4966	}
4967	if (unlikely(*noff && *noff < MID_HEADER_SIZE(server)))
4968		return -EINVAL;
4969	return 0;
4970}
4971
4972static int __cifs_sfu_make_node(unsigned int xid, struct inode *inode,
4973				struct dentry *dentry, struct cifs_tcon *tcon,
4974				const char *full_path, umode_t mode, dev_t dev)
4975{
4976	struct TCP_Server_Info *server = tcon->ses->server;
4977	struct cifs_open_parms oparms;
4978	struct cifs_io_parms io_parms = {};
4979	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
4980	struct cifs_fid fid;
4981	unsigned int bytes_written;
4982	struct win_dev pdev = {};
4983	struct kvec iov[2];
4984	__u32 oplock = server->oplocks ? REQ_OPLOCK : 0;
4985	int rc;
4986
4987	switch (mode & S_IFMT) {
4988	case S_IFCHR:
4989		strscpy(pdev.type, "IntxCHR");
4990		pdev.major = cpu_to_le64(MAJOR(dev));
4991		pdev.minor = cpu_to_le64(MINOR(dev));
4992		break;
4993	case S_IFBLK:
4994		strscpy(pdev.type, "IntxBLK");
4995		pdev.major = cpu_to_le64(MAJOR(dev));
4996		pdev.minor = cpu_to_le64(MINOR(dev));
4997		break;
4998	case S_IFIFO:
4999		strscpy(pdev.type, "LnxFIFO");
5000		break;
5001	default:
5002		return -EPERM;
5003	}
5004
5005	oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, GENERIC_WRITE,
5006			     FILE_CREATE, CREATE_NOT_DIR |
5007			     CREATE_OPTION_SPECIAL, ACL_NO_MODE);
5008	oparms.fid = &fid;
5009
5010	rc = server->ops->open(xid, &oparms, &oplock, NULL);
5011	if (rc)
5012		return rc;
5013
5014	io_parms.pid = current->tgid;
5015	io_parms.tcon = tcon;
5016	io_parms.length = sizeof(pdev);
5017	iov[1].iov_base = &pdev;
5018	iov[1].iov_len = sizeof(pdev);
5019
5020	rc = server->ops->sync_write(xid, &fid, &io_parms,
5021				     &bytes_written, iov, 1);
5022	server->ops->close(xid, tcon, &fid);
5023	return rc;
5024}
5025
5026int cifs_sfu_make_node(unsigned int xid, struct inode *inode,
5027		       struct dentry *dentry, struct cifs_tcon *tcon,
5028		       const char *full_path, umode_t mode, dev_t dev)
5029{
5030	struct inode *new = NULL;
5031	int rc;
5032
5033	rc = __cifs_sfu_make_node(xid, inode, dentry, tcon,
5034				  full_path, mode, dev);
5035	if (rc)
5036		return rc;
5037
5038	if (tcon->posix_extensions) {
5039		rc = smb311_posix_get_inode_info(&new, full_path, NULL,
5040						 inode->i_sb, xid);
5041	} else if (tcon->unix_ext) {
5042		rc = cifs_get_inode_info_unix(&new, full_path,
5043					      inode->i_sb, xid);
5044	} else {
5045		rc = cifs_get_inode_info(&new, full_path, NULL,
5046					 inode->i_sb, xid, NULL);
5047	}
5048	if (!rc)
5049		d_instantiate(dentry, new);
5050	return rc;
5051}
5052
5053static int smb2_make_node(unsigned int xid, struct inode *inode,
5054			  struct dentry *dentry, struct cifs_tcon *tcon,
5055			  const char *full_path, umode_t mode, dev_t dev)
5056{
5057	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
5058	int rc;
5059
5060	/*
5061	 * Check if mounted with mount parm 'sfu' mount parm.
5062	 * SFU emulation should work with all servers, but only
5063	 * supports block and char device (no socket & fifo),
5064	 * and was used by default in earlier versions of Windows
5065	 */
5066	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
5067		rc = cifs_sfu_make_node(xid, inode, dentry, tcon,
5068					full_path, mode, dev);
5069	} else {
5070		rc = smb2_mknod_reparse(xid, inode, dentry, tcon,
5071					full_path, mode, dev);
5072	}
5073	return rc;
5074}
5075
5076#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
5077struct smb_version_operations smb20_operations = {
5078	.compare_fids = smb2_compare_fids,
5079	.setup_request = smb2_setup_request,
5080	.setup_async_request = smb2_setup_async_request,
5081	.check_receive = smb2_check_receive,
5082	.add_credits = smb2_add_credits,
5083	.set_credits = smb2_set_credits,
5084	.get_credits_field = smb2_get_credits_field,
5085	.get_credits = smb2_get_credits,
5086	.wait_mtu_credits = cifs_wait_mtu_credits,
5087	.get_next_mid = smb2_get_next_mid,
5088	.revert_current_mid = smb2_revert_current_mid,
5089	.read_data_offset = smb2_read_data_offset,
5090	.read_data_length = smb2_read_data_length,
5091	.map_error = map_smb2_to_linux_error,
5092	.find_mid = smb2_find_mid,
5093	.check_message = smb2_check_message,
5094	.dump_detail = smb2_dump_detail,
5095	.clear_stats = smb2_clear_stats,
5096	.print_stats = smb2_print_stats,
5097	.is_oplock_break = smb2_is_valid_oplock_break,
5098	.handle_cancelled_mid = smb2_handle_cancelled_mid,
5099	.downgrade_oplock = smb2_downgrade_oplock,
5100	.need_neg = smb2_need_neg,
5101	.negotiate = smb2_negotiate,
5102	.negotiate_wsize = smb2_negotiate_wsize,
5103	.negotiate_rsize = smb2_negotiate_rsize,
5104	.sess_setup = SMB2_sess_setup,
5105	.logoff = SMB2_logoff,
5106	.tree_connect = SMB2_tcon,
5107	.tree_disconnect = SMB2_tdis,
5108	.qfs_tcon = smb2_qfs_tcon,
5109	.is_path_accessible = smb2_is_path_accessible,
5110	.can_echo = smb2_can_echo,
5111	.echo = SMB2_echo,
5112	.query_path_info = smb2_query_path_info,
5113	.query_reparse_point = smb2_query_reparse_point,
5114	.get_srv_inum = smb2_get_srv_inum,
5115	.query_file_info = smb2_query_file_info,
5116	.set_path_size = smb2_set_path_size,
5117	.set_file_size = smb2_set_file_size,
5118	.set_file_info = smb2_set_file_info,
5119	.set_compression = smb2_set_compression,
5120	.mkdir = smb2_mkdir,
5121	.mkdir_setinfo = smb2_mkdir_setinfo,
5122	.rmdir = smb2_rmdir,
5123	.unlink = smb2_unlink,
5124	.rename = smb2_rename_path,
5125	.create_hardlink = smb2_create_hardlink,
5126	.parse_reparse_point = smb2_parse_reparse_point,
5127	.query_mf_symlink = smb3_query_mf_symlink,
5128	.create_mf_symlink = smb3_create_mf_symlink,
5129	.create_reparse_symlink = smb2_create_reparse_symlink,
5130	.open = smb2_open_file,
5131	.set_fid = smb2_set_fid,
5132	.close = smb2_close_file,
5133	.flush = smb2_flush_file,
5134	.async_readv = smb2_async_readv,
5135	.async_writev = smb2_async_writev,
5136	.sync_read = smb2_sync_read,
5137	.sync_write = smb2_sync_write,
5138	.query_dir_first = smb2_query_dir_first,
5139	.query_dir_next = smb2_query_dir_next,
5140	.close_dir = smb2_close_dir,
5141	.calc_smb_size = smb2_calc_size,
5142	.is_status_pending = smb2_is_status_pending,
5143	.is_session_expired = smb2_is_session_expired,
5144	.oplock_response = smb2_oplock_response,
5145	.queryfs = smb2_queryfs,
5146	.mand_lock = smb2_mand_lock,
5147	.mand_unlock_range = smb2_unlock_range,
5148	.push_mand_locks = smb2_push_mandatory_locks,
5149	.get_lease_key = smb2_get_lease_key,
5150	.set_lease_key = smb2_set_lease_key,
5151	.new_lease_key = smb2_new_lease_key,
5152	.calc_signature = smb2_calc_signature,
5153	.is_read_op = smb2_is_read_op,
5154	.set_oplock_level = smb2_set_oplock_level,
5155	.create_lease_buf = smb2_create_lease_buf,
5156	.parse_lease_buf = smb2_parse_lease_buf,
5157	.copychunk_range = smb2_copychunk_range,
5158	.wp_retry_size = smb2_wp_retry_size,
5159	.dir_needs_close = smb2_dir_needs_close,
5160	.get_dfs_refer = smb2_get_dfs_refer,
5161	.select_sectype = smb2_select_sectype,
5162#ifdef CONFIG_CIFS_XATTR
5163	.query_all_EAs = smb2_query_eas,
5164	.set_EA = smb2_set_ea,
5165#endif /* CIFS_XATTR */
5166	.get_acl = get_smb2_acl,
5167	.get_acl_by_fid = get_smb2_acl_by_fid,
5168	.set_acl = set_smb2_acl,
5169	.next_header = smb2_next_header,
5170	.ioctl_query_info = smb2_ioctl_query_info,
5171	.make_node = smb2_make_node,
5172	.fiemap = smb3_fiemap,
5173	.llseek = smb3_llseek,
5174	.is_status_io_timeout = smb2_is_status_io_timeout,
5175	.is_network_name_deleted = smb2_is_network_name_deleted,
5176};
5177#endif /* CIFS_ALLOW_INSECURE_LEGACY */
5178
5179struct smb_version_operations smb21_operations = {
5180	.compare_fids = smb2_compare_fids,
5181	.setup_request = smb2_setup_request,
5182	.setup_async_request = smb2_setup_async_request,
5183	.check_receive = smb2_check_receive,
5184	.add_credits = smb2_add_credits,
5185	.set_credits = smb2_set_credits,
5186	.get_credits_field = smb2_get_credits_field,
5187	.get_credits = smb2_get_credits,
5188	.wait_mtu_credits = smb2_wait_mtu_credits,
5189	.adjust_credits = smb2_adjust_credits,
5190	.get_next_mid = smb2_get_next_mid,
5191	.revert_current_mid = smb2_revert_current_mid,
5192	.read_data_offset = smb2_read_data_offset,
5193	.read_data_length = smb2_read_data_length,
5194	.map_error = map_smb2_to_linux_error,
5195	.find_mid = smb2_find_mid,
5196	.check_message = smb2_check_message,
5197	.dump_detail = smb2_dump_detail,
5198	.clear_stats = smb2_clear_stats,
5199	.print_stats = smb2_print_stats,
5200	.is_oplock_break = smb2_is_valid_oplock_break,
5201	.handle_cancelled_mid = smb2_handle_cancelled_mid,
5202	.downgrade_oplock = smb2_downgrade_oplock,
5203	.need_neg = smb2_need_neg,
5204	.negotiate = smb2_negotiate,
5205	.negotiate_wsize = smb2_negotiate_wsize,
5206	.negotiate_rsize = smb2_negotiate_rsize,
5207	.sess_setup = SMB2_sess_setup,
5208	.logoff = SMB2_logoff,
5209	.tree_connect = SMB2_tcon,
5210	.tree_disconnect = SMB2_tdis,
5211	.qfs_tcon = smb2_qfs_tcon,
5212	.is_path_accessible = smb2_is_path_accessible,
5213	.can_echo = smb2_can_echo,
5214	.echo = SMB2_echo,
5215	.query_path_info = smb2_query_path_info,
5216	.query_reparse_point = smb2_query_reparse_point,
5217	.get_srv_inum = smb2_get_srv_inum,
5218	.query_file_info = smb2_query_file_info,
5219	.set_path_size = smb2_set_path_size,
5220	.set_file_size = smb2_set_file_size,
5221	.set_file_info = smb2_set_file_info,
5222	.set_compression = smb2_set_compression,
5223	.mkdir = smb2_mkdir,
5224	.mkdir_setinfo = smb2_mkdir_setinfo,
5225	.rmdir = smb2_rmdir,
5226	.unlink = smb2_unlink,
5227	.rename = smb2_rename_path,
5228	.create_hardlink = smb2_create_hardlink,
5229	.parse_reparse_point = smb2_parse_reparse_point,
5230	.query_mf_symlink = smb3_query_mf_symlink,
5231	.create_mf_symlink = smb3_create_mf_symlink,
5232	.create_reparse_symlink = smb2_create_reparse_symlink,
5233	.open = smb2_open_file,
5234	.set_fid = smb2_set_fid,
5235	.close = smb2_close_file,
5236	.flush = smb2_flush_file,
5237	.async_readv = smb2_async_readv,
5238	.async_writev = smb2_async_writev,
5239	.sync_read = smb2_sync_read,
5240	.sync_write = smb2_sync_write,
5241	.query_dir_first = smb2_query_dir_first,
5242	.query_dir_next = smb2_query_dir_next,
5243	.close_dir = smb2_close_dir,
5244	.calc_smb_size = smb2_calc_size,
5245	.is_status_pending = smb2_is_status_pending,
5246	.is_session_expired = smb2_is_session_expired,
5247	.oplock_response = smb2_oplock_response,
5248	.queryfs = smb2_queryfs,
5249	.mand_lock = smb2_mand_lock,
5250	.mand_unlock_range = smb2_unlock_range,
5251	.push_mand_locks = smb2_push_mandatory_locks,
5252	.get_lease_key = smb2_get_lease_key,
5253	.set_lease_key = smb2_set_lease_key,
5254	.new_lease_key = smb2_new_lease_key,
5255	.calc_signature = smb2_calc_signature,
5256	.is_read_op = smb21_is_read_op,
5257	.set_oplock_level = smb21_set_oplock_level,
5258	.create_lease_buf = smb2_create_lease_buf,
5259	.parse_lease_buf = smb2_parse_lease_buf,
5260	.copychunk_range = smb2_copychunk_range,
5261	.wp_retry_size = smb2_wp_retry_size,
5262	.dir_needs_close = smb2_dir_needs_close,
5263	.enum_snapshots = smb3_enum_snapshots,
5264	.notify = smb3_notify,
5265	.get_dfs_refer = smb2_get_dfs_refer,
5266	.select_sectype = smb2_select_sectype,
5267#ifdef CONFIG_CIFS_XATTR
5268	.query_all_EAs = smb2_query_eas,
5269	.set_EA = smb2_set_ea,
5270#endif /* CIFS_XATTR */
5271	.get_acl = get_smb2_acl,
5272	.get_acl_by_fid = get_smb2_acl_by_fid,
5273	.set_acl = set_smb2_acl,
5274	.next_header = smb2_next_header,
5275	.ioctl_query_info = smb2_ioctl_query_info,
5276	.make_node = smb2_make_node,
5277	.fiemap = smb3_fiemap,
5278	.llseek = smb3_llseek,
5279	.is_status_io_timeout = smb2_is_status_io_timeout,
5280	.is_network_name_deleted = smb2_is_network_name_deleted,
5281};
5282
5283struct smb_version_operations smb30_operations = {
5284	.compare_fids = smb2_compare_fids,
5285	.setup_request = smb2_setup_request,
5286	.setup_async_request = smb2_setup_async_request,
5287	.check_receive = smb2_check_receive,
5288	.add_credits = smb2_add_credits,
5289	.set_credits = smb2_set_credits,
5290	.get_credits_field = smb2_get_credits_field,
5291	.get_credits = smb2_get_credits,
5292	.wait_mtu_credits = smb2_wait_mtu_credits,
5293	.adjust_credits = smb2_adjust_credits,
5294	.get_next_mid = smb2_get_next_mid,
5295	.revert_current_mid = smb2_revert_current_mid,
5296	.read_data_offset = smb2_read_data_offset,
5297	.read_data_length = smb2_read_data_length,
5298	.map_error = map_smb2_to_linux_error,
5299	.find_mid = smb2_find_mid,
5300	.check_message = smb2_check_message,
5301	.dump_detail = smb2_dump_detail,
5302	.clear_stats = smb2_clear_stats,
5303	.print_stats = smb2_print_stats,
5304	.dump_share_caps = smb2_dump_share_caps,
5305	.is_oplock_break = smb2_is_valid_oplock_break,
5306	.handle_cancelled_mid = smb2_handle_cancelled_mid,
5307	.downgrade_oplock = smb3_downgrade_oplock,
5308	.need_neg = smb2_need_neg,
5309	.negotiate = smb2_negotiate,
5310	.negotiate_wsize = smb3_negotiate_wsize,
5311	.negotiate_rsize = smb3_negotiate_rsize,
5312	.sess_setup = SMB2_sess_setup,
5313	.logoff = SMB2_logoff,
5314	.tree_connect = SMB2_tcon,
5315	.tree_disconnect = SMB2_tdis,
5316	.qfs_tcon = smb3_qfs_tcon,
5317	.query_server_interfaces = SMB3_request_interfaces,
5318	.is_path_accessible = smb2_is_path_accessible,
5319	.can_echo = smb2_can_echo,
5320	.echo = SMB2_echo,
5321	.query_path_info = smb2_query_path_info,
5322	/* WSL tags introduced long after smb2.1, enable for SMB3, 3.11 only */
5323	.query_reparse_point = smb2_query_reparse_point,
5324	.get_srv_inum = smb2_get_srv_inum,
5325	.query_file_info = smb2_query_file_info,
5326	.set_path_size = smb2_set_path_size,
5327	.set_file_size = smb2_set_file_size,
5328	.set_file_info = smb2_set_file_info,
5329	.set_compression = smb2_set_compression,
5330	.mkdir = smb2_mkdir,
5331	.mkdir_setinfo = smb2_mkdir_setinfo,
5332	.rmdir = smb2_rmdir,
5333	.unlink = smb2_unlink,
5334	.rename = smb2_rename_path,
5335	.create_hardlink = smb2_create_hardlink,
5336	.parse_reparse_point = smb2_parse_reparse_point,
5337	.query_mf_symlink = smb3_query_mf_symlink,
5338	.create_mf_symlink = smb3_create_mf_symlink,
5339	.create_reparse_symlink = smb2_create_reparse_symlink,
5340	.open = smb2_open_file,
5341	.set_fid = smb2_set_fid,
5342	.close = smb2_close_file,
5343	.close_getattr = smb2_close_getattr,
5344	.flush = smb2_flush_file,
5345	.async_readv = smb2_async_readv,
5346	.async_writev = smb2_async_writev,
5347	.sync_read = smb2_sync_read,
5348	.sync_write = smb2_sync_write,
5349	.query_dir_first = smb2_query_dir_first,
5350	.query_dir_next = smb2_query_dir_next,
5351	.close_dir = smb2_close_dir,
5352	.calc_smb_size = smb2_calc_size,
5353	.is_status_pending = smb2_is_status_pending,
5354	.is_session_expired = smb2_is_session_expired,
5355	.oplock_response = smb2_oplock_response,
5356	.queryfs = smb2_queryfs,
5357	.mand_lock = smb2_mand_lock,
5358	.mand_unlock_range = smb2_unlock_range,
5359	.push_mand_locks = smb2_push_mandatory_locks,
5360	.get_lease_key = smb2_get_lease_key,
5361	.set_lease_key = smb2_set_lease_key,
5362	.new_lease_key = smb2_new_lease_key,
5363	.generate_signingkey = generate_smb30signingkey,
5364	.calc_signature = smb3_calc_signature,
5365	.set_integrity  = smb3_set_integrity,
5366	.is_read_op = smb21_is_read_op,
5367	.set_oplock_level = smb3_set_oplock_level,
5368	.create_lease_buf = smb3_create_lease_buf,
5369	.parse_lease_buf = smb3_parse_lease_buf,
5370	.copychunk_range = smb2_copychunk_range,
5371	.duplicate_extents = smb2_duplicate_extents,
5372	.validate_negotiate = smb3_validate_negotiate,
5373	.wp_retry_size = smb2_wp_retry_size,
5374	.dir_needs_close = smb2_dir_needs_close,
5375	.fallocate = smb3_fallocate,
5376	.enum_snapshots = smb3_enum_snapshots,
5377	.notify = smb3_notify,
5378	.init_transform_rq = smb3_init_transform_rq,
5379	.is_transform_hdr = smb3_is_transform_hdr,
5380	.receive_transform = smb3_receive_transform,
5381	.get_dfs_refer = smb2_get_dfs_refer,
5382	.select_sectype = smb2_select_sectype,
5383#ifdef CONFIG_CIFS_XATTR
5384	.query_all_EAs = smb2_query_eas,
5385	.set_EA = smb2_set_ea,
5386#endif /* CIFS_XATTR */
5387	.get_acl = get_smb2_acl,
5388	.get_acl_by_fid = get_smb2_acl_by_fid,
5389	.set_acl = set_smb2_acl,
5390	.next_header = smb2_next_header,
5391	.ioctl_query_info = smb2_ioctl_query_info,
5392	.make_node = smb2_make_node,
5393	.fiemap = smb3_fiemap,
5394	.llseek = smb3_llseek,
5395	.is_status_io_timeout = smb2_is_status_io_timeout,
5396	.is_network_name_deleted = smb2_is_network_name_deleted,
5397};
5398
5399struct smb_version_operations smb311_operations = {
5400	.compare_fids = smb2_compare_fids,
5401	.setup_request = smb2_setup_request,
5402	.setup_async_request = smb2_setup_async_request,
5403	.check_receive = smb2_check_receive,
5404	.add_credits = smb2_add_credits,
5405	.set_credits = smb2_set_credits,
5406	.get_credits_field = smb2_get_credits_field,
5407	.get_credits = smb2_get_credits,
5408	.wait_mtu_credits = smb2_wait_mtu_credits,
5409	.adjust_credits = smb2_adjust_credits,
5410	.get_next_mid = smb2_get_next_mid,
5411	.revert_current_mid = smb2_revert_current_mid,
5412	.read_data_offset = smb2_read_data_offset,
5413	.read_data_length = smb2_read_data_length,
5414	.map_error = map_smb2_to_linux_error,
5415	.find_mid = smb2_find_mid,
5416	.check_message = smb2_check_message,
5417	.dump_detail = smb2_dump_detail,
5418	.clear_stats = smb2_clear_stats,
5419	.print_stats = smb2_print_stats,
5420	.dump_share_caps = smb2_dump_share_caps,
5421	.is_oplock_break = smb2_is_valid_oplock_break,
5422	.handle_cancelled_mid = smb2_handle_cancelled_mid,
5423	.downgrade_oplock = smb3_downgrade_oplock,
5424	.need_neg = smb2_need_neg,
5425	.negotiate = smb2_negotiate,
5426	.negotiate_wsize = smb3_negotiate_wsize,
5427	.negotiate_rsize = smb3_negotiate_rsize,
5428	.sess_setup = SMB2_sess_setup,
5429	.logoff = SMB2_logoff,
5430	.tree_connect = SMB2_tcon,
5431	.tree_disconnect = SMB2_tdis,
5432	.qfs_tcon = smb3_qfs_tcon,
5433	.query_server_interfaces = SMB3_request_interfaces,
5434	.is_path_accessible = smb2_is_path_accessible,
5435	.can_echo = smb2_can_echo,
5436	.echo = SMB2_echo,
5437	.query_path_info = smb2_query_path_info,
5438	.query_reparse_point = smb2_query_reparse_point,
5439	.get_srv_inum = smb2_get_srv_inum,
5440	.query_file_info = smb2_query_file_info,
5441	.set_path_size = smb2_set_path_size,
5442	.set_file_size = smb2_set_file_size,
5443	.set_file_info = smb2_set_file_info,
5444	.set_compression = smb2_set_compression,
5445	.mkdir = smb2_mkdir,
5446	.mkdir_setinfo = smb2_mkdir_setinfo,
5447	.posix_mkdir = smb311_posix_mkdir,
5448	.rmdir = smb2_rmdir,
5449	.unlink = smb2_unlink,
5450	.rename = smb2_rename_path,
5451	.create_hardlink = smb2_create_hardlink,
5452	.parse_reparse_point = smb2_parse_reparse_point,
5453	.query_mf_symlink = smb3_query_mf_symlink,
5454	.create_mf_symlink = smb3_create_mf_symlink,
5455	.create_reparse_symlink = smb2_create_reparse_symlink,
5456	.open = smb2_open_file,
5457	.set_fid = smb2_set_fid,
5458	.close = smb2_close_file,
5459	.close_getattr = smb2_close_getattr,
5460	.flush = smb2_flush_file,
5461	.async_readv = smb2_async_readv,
5462	.async_writev = smb2_async_writev,
5463	.sync_read = smb2_sync_read,
5464	.sync_write = smb2_sync_write,
5465	.query_dir_first = smb2_query_dir_first,
5466	.query_dir_next = smb2_query_dir_next,
5467	.close_dir = smb2_close_dir,
5468	.calc_smb_size = smb2_calc_size,
5469	.is_status_pending = smb2_is_status_pending,
5470	.is_session_expired = smb2_is_session_expired,
5471	.oplock_response = smb2_oplock_response,
5472	.queryfs = smb311_queryfs,
5473	.mand_lock = smb2_mand_lock,
5474	.mand_unlock_range = smb2_unlock_range,
5475	.push_mand_locks = smb2_push_mandatory_locks,
5476	.get_lease_key = smb2_get_lease_key,
5477	.set_lease_key = smb2_set_lease_key,
5478	.new_lease_key = smb2_new_lease_key,
5479	.generate_signingkey = generate_smb311signingkey,
5480	.calc_signature = smb3_calc_signature,
5481	.set_integrity  = smb3_set_integrity,
5482	.is_read_op = smb21_is_read_op,
5483	.set_oplock_level = smb3_set_oplock_level,
5484	.create_lease_buf = smb3_create_lease_buf,
5485	.parse_lease_buf = smb3_parse_lease_buf,
5486	.copychunk_range = smb2_copychunk_range,
5487	.duplicate_extents = smb2_duplicate_extents,
5488/*	.validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */
5489	.wp_retry_size = smb2_wp_retry_size,
5490	.dir_needs_close = smb2_dir_needs_close,
5491	.fallocate = smb3_fallocate,
5492	.enum_snapshots = smb3_enum_snapshots,
5493	.notify = smb3_notify,
5494	.init_transform_rq = smb3_init_transform_rq,
5495	.is_transform_hdr = smb3_is_transform_hdr,
5496	.receive_transform = smb3_receive_transform,
5497	.get_dfs_refer = smb2_get_dfs_refer,
5498	.select_sectype = smb2_select_sectype,
5499#ifdef CONFIG_CIFS_XATTR
5500	.query_all_EAs = smb2_query_eas,
5501	.set_EA = smb2_set_ea,
5502#endif /* CIFS_XATTR */
5503	.get_acl = get_smb2_acl,
5504	.get_acl_by_fid = get_smb2_acl_by_fid,
5505	.set_acl = set_smb2_acl,
5506	.next_header = smb2_next_header,
5507	.ioctl_query_info = smb2_ioctl_query_info,
5508	.make_node = smb2_make_node,
5509	.fiemap = smb3_fiemap,
5510	.llseek = smb3_llseek,
5511	.is_status_io_timeout = smb2_is_status_io_timeout,
5512	.is_network_name_deleted = smb2_is_network_name_deleted,
5513};
5514
5515#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
5516struct smb_version_values smb20_values = {
5517	.version_string = SMB20_VERSION_STRING,
5518	.protocol_id = SMB20_PROT_ID,
5519	.req_capabilities = 0, /* MBZ */
5520	.large_lock_type = 0,
5521	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
5522	.shared_lock_type = SMB2_LOCKFLAG_SHARED,
5523	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
5524	.header_size = sizeof(struct smb2_hdr),
5525	.header_preamble_size = 0,
5526	.max_header_size = MAX_SMB2_HDR_SIZE,
5527	.read_rsp_size = sizeof(struct smb2_read_rsp),
5528	.lock_cmd = SMB2_LOCK,
5529	.cap_unix = 0,
5530	.cap_nt_find = SMB2_NT_FIND,
5531	.cap_large_files = SMB2_LARGE_FILES,
5532	.signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5533	.signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5534	.create_lease_size = sizeof(struct create_lease),
5535};
5536#endif /* ALLOW_INSECURE_LEGACY */
5537
5538struct smb_version_values smb21_values = {
5539	.version_string = SMB21_VERSION_STRING,
5540	.protocol_id = SMB21_PROT_ID,
5541	.req_capabilities = 0, /* MBZ on negotiate req until SMB3 dialect */
5542	.large_lock_type = 0,
5543	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
5544	.shared_lock_type = SMB2_LOCKFLAG_SHARED,
5545	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
5546	.header_size = sizeof(struct smb2_hdr),
5547	.header_preamble_size = 0,
5548	.max_header_size = MAX_SMB2_HDR_SIZE,
5549	.read_rsp_size = sizeof(struct smb2_read_rsp),
5550	.lock_cmd = SMB2_LOCK,
5551	.cap_unix = 0,
5552	.cap_nt_find = SMB2_NT_FIND,
5553	.cap_large_files = SMB2_LARGE_FILES,
5554	.signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5555	.signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5556	.create_lease_size = sizeof(struct create_lease),
5557};
5558
5559struct smb_version_values smb3any_values = {
5560	.version_string = SMB3ANY_VERSION_STRING,
5561	.protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
5562	.req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
5563	.large_lock_type = 0,
5564	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
5565	.shared_lock_type = SMB2_LOCKFLAG_SHARED,
5566	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
5567	.header_size = sizeof(struct smb2_hdr),
5568	.header_preamble_size = 0,
5569	.max_header_size = MAX_SMB2_HDR_SIZE,
5570	.read_rsp_size = sizeof(struct smb2_read_rsp),
5571	.lock_cmd = SMB2_LOCK,
5572	.cap_unix = 0,
5573	.cap_nt_find = SMB2_NT_FIND,
5574	.cap_large_files = SMB2_LARGE_FILES,
5575	.signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5576	.signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5577	.create_lease_size = sizeof(struct create_lease_v2),
5578};
5579
5580struct smb_version_values smbdefault_values = {
5581	.version_string = SMBDEFAULT_VERSION_STRING,
5582	.protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
5583	.req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
5584	.large_lock_type = 0,
5585	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
5586	.shared_lock_type = SMB2_LOCKFLAG_SHARED,
5587	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
5588	.header_size = sizeof(struct smb2_hdr),
5589	.header_preamble_size = 0,
5590	.max_header_size = MAX_SMB2_HDR_SIZE,
5591	.read_rsp_size = sizeof(struct smb2_read_rsp),
5592	.lock_cmd = SMB2_LOCK,
5593	.cap_unix = 0,
5594	.cap_nt_find = SMB2_NT_FIND,
5595	.cap_large_files = SMB2_LARGE_FILES,
5596	.signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5597	.signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5598	.create_lease_size = sizeof(struct create_lease_v2),
5599};
5600
5601struct smb_version_values smb30_values = {
5602	.version_string = SMB30_VERSION_STRING,
5603	.protocol_id = SMB30_PROT_ID,
5604	.req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
5605	.large_lock_type = 0,
5606	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
5607	.shared_lock_type = SMB2_LOCKFLAG_SHARED,
5608	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
5609	.header_size = sizeof(struct smb2_hdr),
5610	.header_preamble_size = 0,
5611	.max_header_size = MAX_SMB2_HDR_SIZE,
5612	.read_rsp_size = sizeof(struct smb2_read_rsp),
5613	.lock_cmd = SMB2_LOCK,
5614	.cap_unix = 0,
5615	.cap_nt_find = SMB2_NT_FIND,
5616	.cap_large_files = SMB2_LARGE_FILES,
5617	.signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5618	.signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5619	.create_lease_size = sizeof(struct create_lease_v2),
5620};
5621
5622struct smb_version_values smb302_values = {
5623	.version_string = SMB302_VERSION_STRING,
5624	.protocol_id = SMB302_PROT_ID,
5625	.req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
5626	.large_lock_type = 0,
5627	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
5628	.shared_lock_type = SMB2_LOCKFLAG_SHARED,
5629	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
5630	.header_size = sizeof(struct smb2_hdr),
5631	.header_preamble_size = 0,
5632	.max_header_size = MAX_SMB2_HDR_SIZE,
5633	.read_rsp_size = sizeof(struct smb2_read_rsp),
5634	.lock_cmd = SMB2_LOCK,
5635	.cap_unix = 0,
5636	.cap_nt_find = SMB2_NT_FIND,
5637	.cap_large_files = SMB2_LARGE_FILES,
5638	.signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5639	.signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5640	.create_lease_size = sizeof(struct create_lease_v2),
5641};
5642
5643struct smb_version_values smb311_values = {
5644	.version_string = SMB311_VERSION_STRING,
5645	.protocol_id = SMB311_PROT_ID,
5646	.req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
5647	.large_lock_type = 0,
5648	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
5649	.shared_lock_type = SMB2_LOCKFLAG_SHARED,
5650	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
5651	.header_size = sizeof(struct smb2_hdr),
5652	.header_preamble_size = 0,
5653	.max_header_size = MAX_SMB2_HDR_SIZE,
5654	.read_rsp_size = sizeof(struct smb2_read_rsp),
5655	.lock_cmd = SMB2_LOCK,
5656	.cap_unix = 0,
5657	.cap_nt_find = SMB2_NT_FIND,
5658	.cap_large_files = SMB2_LARGE_FILES,
5659	.signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
5660	.signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
5661	.create_lease_size = sizeof(struct create_lease_v2),
5662};
5663