1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *   Jeremy Allison (jra@samba.org) 2006.
7  *
8  */
9 
10 #include <linux/fs.h>
11 #include <linux/list.h>
12 #include <linux/gfp.h>
13 #include <linux/wait.h>
14 #include <linux/net.h>
15 #include <linux/delay.h>
16 #include <linux/freezer.h>
17 #include <linux/tcp.h>
18 #include <linux/bvec.h>
19 #include <linux/highmem.h>
20 #include <linux/uaccess.h>
21 #include <asm/processor.h>
22 #include <linux/mempool.h>
23 #include <linux/sched/signal.h>
24 #include <linux/task_io_accounting_ops.h>
25 #include "cifspdu.h"
26 #include "cifsglob.h"
27 #include "cifsproto.h"
28 #include "cifs_debug.h"
29 #include "smb2proto.h"
30 #include "smbdirect.h"
31 
32 /* Max number of iovectors we can use off the stack when sending requests. */
33 #define CIFS_MAX_IOV_SIZE 8
34 
35 void
cifs_wake_up_task(struct mid_q_entry * mid)36 cifs_wake_up_task(struct mid_q_entry *mid)
37 {
38 	wake_up_process(mid->callback_data);
39 }
40 
41 static struct mid_q_entry *
alloc_mid(const struct smb_hdr * smb_buffer,struct TCP_Server_Info * server)42 alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
43 {
44 	struct mid_q_entry *temp;
45 
46 	if (server == NULL) {
47 		cifs_dbg(VFS, "%s: null TCP session\n", __func__);
48 		return NULL;
49 	}
50 
51 	temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
52 	memset(temp, 0, sizeof(struct mid_q_entry));
53 	kref_init(&temp->refcount);
54 	temp->mid = get_mid(smb_buffer);
55 	temp->pid = current->pid;
56 	temp->command = cpu_to_le16(smb_buffer->Command);
57 	cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
58 	/*	do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
59 	/* when mid allocated can be before when sent */
60 	temp->when_alloc = jiffies;
61 	temp->server = server;
62 
63 	/*
64 	 * The default is for the mid to be synchronous, so the
65 	 * default callback just wakes up the current task.
66 	 */
67 	get_task_struct(current);
68 	temp->creator = current;
69 	temp->callback = cifs_wake_up_task;
70 	temp->callback_data = current;
71 
72 	atomic_inc(&mid_count);
73 	temp->mid_state = MID_REQUEST_ALLOCATED;
74 	return temp;
75 }
76 
__release_mid(struct kref * refcount)77 static void __release_mid(struct kref *refcount)
78 {
79 	struct mid_q_entry *midEntry =
80 			container_of(refcount, struct mid_q_entry, refcount);
81 #ifdef CONFIG_CIFS_STATS2
82 	__le16 command = midEntry->server->vals->lock_cmd;
83 	__u16 smb_cmd = le16_to_cpu(midEntry->command);
84 	unsigned long now;
85 	unsigned long roundtrip_time;
86 #endif
87 	struct TCP_Server_Info *server = midEntry->server;
88 
89 	if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
90 	    midEntry->mid_state == MID_RESPONSE_RECEIVED &&
91 	    server->ops->handle_cancelled_mid)
92 		server->ops->handle_cancelled_mid(midEntry, server);
93 
94 	midEntry->mid_state = MID_FREE;
95 	atomic_dec(&mid_count);
96 	if (midEntry->large_buf)
97 		cifs_buf_release(midEntry->resp_buf);
98 	else
99 		cifs_small_buf_release(midEntry->resp_buf);
100 #ifdef CONFIG_CIFS_STATS2
101 	now = jiffies;
102 	if (now < midEntry->when_alloc)
103 		cifs_server_dbg(VFS, "Invalid mid allocation time\n");
104 	roundtrip_time = now - midEntry->when_alloc;
105 
106 	if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
107 		if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
108 			server->slowest_cmd[smb_cmd] = roundtrip_time;
109 			server->fastest_cmd[smb_cmd] = roundtrip_time;
110 		} else {
111 			if (server->slowest_cmd[smb_cmd] < roundtrip_time)
112 				server->slowest_cmd[smb_cmd] = roundtrip_time;
113 			else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
114 				server->fastest_cmd[smb_cmd] = roundtrip_time;
115 		}
116 		cifs_stats_inc(&server->num_cmds[smb_cmd]);
117 		server->time_per_cmd[smb_cmd] += roundtrip_time;
118 	}
119 	/*
120 	 * commands taking longer than one second (default) can be indications
121 	 * that something is wrong, unless it is quite a slow link or a very
122 	 * busy server. Note that this calc is unlikely or impossible to wrap
123 	 * as long as slow_rsp_threshold is not set way above recommended max
124 	 * value (32767 ie 9 hours) and is generally harmless even if wrong
125 	 * since only affects debug counters - so leaving the calc as simple
126 	 * comparison rather than doing multiple conversions and overflow
127 	 * checks
128 	 */
129 	if ((slow_rsp_threshold != 0) &&
130 	    time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
131 	    (midEntry->command != command)) {
132 		/*
133 		 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
134 		 * NB: le16_to_cpu returns unsigned so can not be negative below
135 		 */
136 		if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
137 			cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
138 
139 		trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
140 			       midEntry->when_sent, midEntry->when_received);
141 		if (cifsFYI & CIFS_TIMER) {
142 			pr_debug("slow rsp: cmd %d mid %llu",
143 				 midEntry->command, midEntry->mid);
144 			cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
145 				  now - midEntry->when_alloc,
146 				  now - midEntry->when_sent,
147 				  now - midEntry->when_received);
148 		}
149 	}
150 #endif
151 	put_task_struct(midEntry->creator);
152 
153 	mempool_free(midEntry, cifs_mid_poolp);
154 }
155 
release_mid(struct mid_q_entry * mid)156 void release_mid(struct mid_q_entry *mid)
157 {
158 	struct TCP_Server_Info *server = mid->server;
159 
160 	spin_lock(&server->mid_lock);
161 	kref_put(&mid->refcount, __release_mid);
162 	spin_unlock(&server->mid_lock);
163 }
164 
165 void
delete_mid(struct mid_q_entry * mid)166 delete_mid(struct mid_q_entry *mid)
167 {
168 	spin_lock(&mid->server->mid_lock);
169 	if (!(mid->mid_flags & MID_DELETED)) {
170 		list_del_init(&mid->qhead);
171 		mid->mid_flags |= MID_DELETED;
172 	}
173 	spin_unlock(&mid->server->mid_lock);
174 
175 	release_mid(mid);
176 }
177 
178 /*
179  * smb_send_kvec - send an array of kvecs to the server
180  * @server:	Server to send the data to
181  * @smb_msg:	Message to send
182  * @sent:	amount of data sent on socket is stored here
183  *
184  * Our basic "send data to server" function. Should be called with srv_mutex
185  * held. The caller is responsible for handling the results.
186  */
187 static int
smb_send_kvec(struct TCP_Server_Info * server,struct msghdr * smb_msg,size_t * sent)188 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
189 	      size_t *sent)
190 {
191 	int rc = 0;
192 	int retries = 0;
193 	struct socket *ssocket = server->ssocket;
194 
195 	*sent = 0;
196 
197 	if (server->noblocksnd)
198 		smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
199 	else
200 		smb_msg->msg_flags = MSG_NOSIGNAL;
201 
202 	while (msg_data_left(smb_msg)) {
203 		/*
204 		 * If blocking send, we try 3 times, since each can block
205 		 * for 5 seconds. For nonblocking  we have to try more
206 		 * but wait increasing amounts of time allowing time for
207 		 * socket to clear.  The overall time we wait in either
208 		 * case to send on the socket is about 15 seconds.
209 		 * Similarly we wait for 15 seconds for a response from
210 		 * the server in SendReceive[2] for the server to send
211 		 * a response back for most types of requests (except
212 		 * SMB Write past end of file which can be slow, and
213 		 * blocking lock operations). NFS waits slightly longer
214 		 * than CIFS, but this can make it take longer for
215 		 * nonresponsive servers to be detected and 15 seconds
216 		 * is more than enough time for modern networks to
217 		 * send a packet.  In most cases if we fail to send
218 		 * after the retries we will kill the socket and
219 		 * reconnect which may clear the network problem.
220 		 */
221 		rc = sock_sendmsg(ssocket, smb_msg);
222 		if (rc == -EAGAIN) {
223 			retries++;
224 			if (retries >= 14 ||
225 			    (!server->noblocksnd && (retries > 2))) {
226 				cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
227 					 ssocket);
228 				return -EAGAIN;
229 			}
230 			msleep(1 << retries);
231 			continue;
232 		}
233 
234 		if (rc < 0)
235 			return rc;
236 
237 		if (rc == 0) {
238 			/* should never happen, letting socket clear before
239 			   retrying is our only obvious option here */
240 			cifs_server_dbg(VFS, "tcp sent no data\n");
241 			msleep(500);
242 			continue;
243 		}
244 
245 		/* send was at least partially successful */
246 		*sent += rc;
247 		retries = 0; /* in case we get ENOSPC on the next send */
248 	}
249 	return 0;
250 }
251 
252 unsigned long
smb_rqst_len(struct TCP_Server_Info * server,struct smb_rqst * rqst)253 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
254 {
255 	unsigned int i;
256 	struct kvec *iov;
257 	int nvec;
258 	unsigned long buflen = 0;
259 
260 	if (!is_smb1(server) && rqst->rq_nvec >= 2 &&
261 	    rqst->rq_iov[0].iov_len == 4) {
262 		iov = &rqst->rq_iov[1];
263 		nvec = rqst->rq_nvec - 1;
264 	} else {
265 		iov = rqst->rq_iov;
266 		nvec = rqst->rq_nvec;
267 	}
268 
269 	/* total up iov array first */
270 	for (i = 0; i < nvec; i++)
271 		buflen += iov[i].iov_len;
272 
273 	buflen += iov_iter_count(&rqst->rq_iter);
274 	return buflen;
275 }
276 
277 static int
__smb_send_rqst(struct TCP_Server_Info * server,int num_rqst,struct smb_rqst * rqst)278 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
279 		struct smb_rqst *rqst)
280 {
281 	int rc = 0;
282 	struct kvec *iov;
283 	int n_vec;
284 	unsigned int send_length = 0;
285 	unsigned int i, j;
286 	sigset_t mask, oldmask;
287 	size_t total_len = 0, sent, size;
288 	struct socket *ssocket = server->ssocket;
289 	struct msghdr smb_msg = {};
290 	__be32 rfc1002_marker;
291 
292 	if (cifs_rdma_enabled(server)) {
293 		/* return -EAGAIN when connecting or reconnecting */
294 		rc = -EAGAIN;
295 		if (server->smbd_conn)
296 			rc = smbd_send(server, num_rqst, rqst);
297 		goto smbd_done;
298 	}
299 
300 	if (ssocket == NULL)
301 		return -EAGAIN;
302 
303 	if (fatal_signal_pending(current)) {
304 		cifs_dbg(FYI, "signal pending before send request\n");
305 		return -ERESTARTSYS;
306 	}
307 
308 	/* cork the socket */
309 	tcp_sock_set_cork(ssocket->sk, true);
310 
311 	for (j = 0; j < num_rqst; j++)
312 		send_length += smb_rqst_len(server, &rqst[j]);
313 	rfc1002_marker = cpu_to_be32(send_length);
314 
315 	/*
316 	 * We should not allow signals to interrupt the network send because
317 	 * any partial send will cause session reconnects thus increasing
318 	 * latency of system calls and overload a server with unnecessary
319 	 * requests.
320 	 */
321 
322 	sigfillset(&mask);
323 	sigprocmask(SIG_BLOCK, &mask, &oldmask);
324 
325 	/* Generate a rfc1002 marker for SMB2+ */
326 	if (!is_smb1(server)) {
327 		struct kvec hiov = {
328 			.iov_base = &rfc1002_marker,
329 			.iov_len  = 4
330 		};
331 		iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, &hiov, 1, 4);
332 		rc = smb_send_kvec(server, &smb_msg, &sent);
333 		if (rc < 0)
334 			goto unmask;
335 
336 		total_len += sent;
337 		send_length += 4;
338 	}
339 
340 	cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
341 
342 	for (j = 0; j < num_rqst; j++) {
343 		iov = rqst[j].rq_iov;
344 		n_vec = rqst[j].rq_nvec;
345 
346 		size = 0;
347 		for (i = 0; i < n_vec; i++) {
348 			dump_smb(iov[i].iov_base, iov[i].iov_len);
349 			size += iov[i].iov_len;
350 		}
351 
352 		iov_iter_kvec(&smb_msg.msg_iter, ITER_SOURCE, iov, n_vec, size);
353 
354 		rc = smb_send_kvec(server, &smb_msg, &sent);
355 		if (rc < 0)
356 			goto unmask;
357 
358 		total_len += sent;
359 
360 		if (iov_iter_count(&rqst[j].rq_iter) > 0) {
361 			smb_msg.msg_iter = rqst[j].rq_iter;
362 			rc = smb_send_kvec(server, &smb_msg, &sent);
363 			if (rc < 0)
364 				break;
365 			total_len += sent;
366 		}
367 
368 }
369 
370 unmask:
371 	sigprocmask(SIG_SETMASK, &oldmask, NULL);
372 
373 	/*
374 	 * If signal is pending but we have already sent the whole packet to
375 	 * the server we need to return success status to allow a corresponding
376 	 * mid entry to be kept in the pending requests queue thus allowing
377 	 * to handle responses from the server by the client.
378 	 *
379 	 * If only part of the packet has been sent there is no need to hide
380 	 * interrupt because the session will be reconnected anyway, so there
381 	 * won't be any response from the server to handle.
382 	 */
383 
384 	if (signal_pending(current) && (total_len != send_length)) {
385 		cifs_dbg(FYI, "signal is pending after attempt to send\n");
386 		rc = -ERESTARTSYS;
387 	}
388 
389 	/* uncork it */
390 	tcp_sock_set_cork(ssocket->sk, false);
391 
392 	if ((total_len > 0) && (total_len != send_length)) {
393 		cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
394 			 send_length, total_len);
395 		/*
396 		 * If we have only sent part of an SMB then the next SMB could
397 		 * be taken as the remainder of this one. We need to kill the
398 		 * socket so the server throws away the partial SMB
399 		 */
400 		cifs_signal_cifsd_for_reconnect(server, false);
401 		trace_smb3_partial_send_reconnect(server->CurrentMid,
402 						  server->conn_id, server->hostname);
403 	}
404 smbd_done:
405 	if (rc < 0 && rc != -EINTR)
406 		cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
407 			 rc);
408 	else if (rc > 0)
409 		rc = 0;
410 
411 	return rc;
412 }
413 
414 static int
smb_send_rqst(struct TCP_Server_Info * server,int num_rqst,struct smb_rqst * rqst,int flags)415 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
416 	      struct smb_rqst *rqst, int flags)
417 {
418 	struct kvec iov;
419 	struct smb2_transform_hdr *tr_hdr;
420 	struct smb_rqst cur_rqst[MAX_COMPOUND];
421 	int rc;
422 
423 	if (!(flags & CIFS_TRANSFORM_REQ))
424 		return __smb_send_rqst(server, num_rqst, rqst);
425 
426 	if (num_rqst > MAX_COMPOUND - 1)
427 		return -ENOMEM;
428 
429 	if (!server->ops->init_transform_rq) {
430 		cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
431 		return -EIO;
432 	}
433 
434 	tr_hdr = kzalloc(sizeof(*tr_hdr), GFP_NOFS);
435 	if (!tr_hdr)
436 		return -ENOMEM;
437 
438 	memset(&cur_rqst[0], 0, sizeof(cur_rqst));
439 	memset(&iov, 0, sizeof(iov));
440 
441 	iov.iov_base = tr_hdr;
442 	iov.iov_len = sizeof(*tr_hdr);
443 	cur_rqst[0].rq_iov = &iov;
444 	cur_rqst[0].rq_nvec = 1;
445 
446 	rc = server->ops->init_transform_rq(server, num_rqst + 1,
447 					    &cur_rqst[0], rqst);
448 	if (rc)
449 		goto out;
450 
451 	rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
452 	smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
453 out:
454 	kfree(tr_hdr);
455 	return rc;
456 }
457 
458 int
smb_send(struct TCP_Server_Info * server,struct smb_hdr * smb_buffer,unsigned int smb_buf_length)459 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
460 	 unsigned int smb_buf_length)
461 {
462 	struct kvec iov[2];
463 	struct smb_rqst rqst = { .rq_iov = iov,
464 				 .rq_nvec = 2 };
465 
466 	iov[0].iov_base = smb_buffer;
467 	iov[0].iov_len = 4;
468 	iov[1].iov_base = (char *)smb_buffer + 4;
469 	iov[1].iov_len = smb_buf_length;
470 
471 	return __smb_send_rqst(server, 1, &rqst);
472 }
473 
474 static int
wait_for_free_credits(struct TCP_Server_Info * server,const int num_credits,const int timeout,const int flags,unsigned int * instance)475 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
476 		      const int timeout, const int flags,
477 		      unsigned int *instance)
478 {
479 	long rc;
480 	int *credits;
481 	int optype;
482 	long int t;
483 	int scredits, in_flight;
484 
485 	if (timeout < 0)
486 		t = MAX_JIFFY_OFFSET;
487 	else
488 		t = msecs_to_jiffies(timeout);
489 
490 	optype = flags & CIFS_OP_MASK;
491 
492 	*instance = 0;
493 
494 	credits = server->ops->get_credits_field(server, optype);
495 	/* Since an echo is already inflight, no need to wait to send another */
496 	if (*credits <= 0 && optype == CIFS_ECHO_OP)
497 		return -EAGAIN;
498 
499 	spin_lock(&server->req_lock);
500 	if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
501 		/* oplock breaks must not be held up */
502 		server->in_flight++;
503 		if (server->in_flight > server->max_in_flight)
504 			server->max_in_flight = server->in_flight;
505 		*credits -= 1;
506 		*instance = server->reconnect_instance;
507 		scredits = *credits;
508 		in_flight = server->in_flight;
509 		spin_unlock(&server->req_lock);
510 
511 		trace_smb3_nblk_credits(server->CurrentMid,
512 				server->conn_id, server->hostname, scredits, -1, in_flight);
513 		cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
514 				__func__, 1, scredits);
515 
516 		return 0;
517 	}
518 
519 	while (1) {
520 		if (*credits < num_credits) {
521 			scredits = *credits;
522 			spin_unlock(&server->req_lock);
523 
524 			cifs_num_waiters_inc(server);
525 			rc = wait_event_killable_timeout(server->request_q,
526 				has_credits(server, credits, num_credits), t);
527 			cifs_num_waiters_dec(server);
528 			if (!rc) {
529 				spin_lock(&server->req_lock);
530 				scredits = *credits;
531 				in_flight = server->in_flight;
532 				spin_unlock(&server->req_lock);
533 
534 				trace_smb3_credit_timeout(server->CurrentMid,
535 						server->conn_id, server->hostname, scredits,
536 						num_credits, in_flight);
537 				cifs_server_dbg(VFS, "wait timed out after %d ms\n",
538 						timeout);
539 				return -EBUSY;
540 			}
541 			if (rc == -ERESTARTSYS)
542 				return -ERESTARTSYS;
543 			spin_lock(&server->req_lock);
544 		} else {
545 			spin_unlock(&server->req_lock);
546 
547 			spin_lock(&server->srv_lock);
548 			if (server->tcpStatus == CifsExiting) {
549 				spin_unlock(&server->srv_lock);
550 				return -ENOENT;
551 			}
552 			spin_unlock(&server->srv_lock);
553 
554 			/*
555 			 * For normal commands, reserve the last MAX_COMPOUND
556 			 * credits to compound requests.
557 			 * Otherwise these compounds could be permanently
558 			 * starved for credits by single-credit requests.
559 			 *
560 			 * To prevent spinning CPU, block this thread until
561 			 * there are >MAX_COMPOUND credits available.
562 			 * But only do this is we already have a lot of
563 			 * credits in flight to avoid triggering this check
564 			 * for servers that are slow to hand out credits on
565 			 * new sessions.
566 			 */
567 			spin_lock(&server->req_lock);
568 			if (!optype && num_credits == 1 &&
569 			    server->in_flight > 2 * MAX_COMPOUND &&
570 			    *credits <= MAX_COMPOUND) {
571 				spin_unlock(&server->req_lock);
572 
573 				cifs_num_waiters_inc(server);
574 				rc = wait_event_killable_timeout(
575 					server->request_q,
576 					has_credits(server, credits,
577 						    MAX_COMPOUND + 1),
578 					t);
579 				cifs_num_waiters_dec(server);
580 				if (!rc) {
581 					spin_lock(&server->req_lock);
582 					scredits = *credits;
583 					in_flight = server->in_flight;
584 					spin_unlock(&server->req_lock);
585 
586 					trace_smb3_credit_timeout(
587 							server->CurrentMid,
588 							server->conn_id, server->hostname,
589 							scredits, num_credits, in_flight);
590 					cifs_server_dbg(VFS, "wait timed out after %d ms\n",
591 							timeout);
592 					return -EBUSY;
593 				}
594 				if (rc == -ERESTARTSYS)
595 					return -ERESTARTSYS;
596 				spin_lock(&server->req_lock);
597 				continue;
598 			}
599 
600 			/*
601 			 * Can not count locking commands against total
602 			 * as they are allowed to block on server.
603 			 */
604 
605 			/* update # of requests on the wire to server */
606 			if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
607 				*credits -= num_credits;
608 				server->in_flight += num_credits;
609 				if (server->in_flight > server->max_in_flight)
610 					server->max_in_flight = server->in_flight;
611 				*instance = server->reconnect_instance;
612 			}
613 			scredits = *credits;
614 			in_flight = server->in_flight;
615 			spin_unlock(&server->req_lock);
616 
617 			trace_smb3_waitff_credits(server->CurrentMid,
618 					server->conn_id, server->hostname, scredits,
619 					-(num_credits), in_flight);
620 			cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
621 					__func__, num_credits, scredits);
622 			break;
623 		}
624 	}
625 	return 0;
626 }
627 
628 static int
wait_for_free_request(struct TCP_Server_Info * server,const int flags,unsigned int * instance)629 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
630 		      unsigned int *instance)
631 {
632 	return wait_for_free_credits(server, 1, -1, flags,
633 				     instance);
634 }
635 
636 static int
wait_for_compound_request(struct TCP_Server_Info * server,int num,const int flags,unsigned int * instance)637 wait_for_compound_request(struct TCP_Server_Info *server, int num,
638 			  const int flags, unsigned int *instance)
639 {
640 	int *credits;
641 	int scredits, in_flight;
642 
643 	credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
644 
645 	spin_lock(&server->req_lock);
646 	scredits = *credits;
647 	in_flight = server->in_flight;
648 
649 	if (*credits < num) {
650 		/*
651 		 * If the server is tight on resources or just gives us less
652 		 * credits for other reasons (e.g. requests are coming out of
653 		 * order and the server delays granting more credits until it
654 		 * processes a missing mid) and we exhausted most available
655 		 * credits there may be situations when we try to send
656 		 * a compound request but we don't have enough credits. At this
657 		 * point the client needs to decide if it should wait for
658 		 * additional credits or fail the request. If at least one
659 		 * request is in flight there is a high probability that the
660 		 * server will return enough credits to satisfy this compound
661 		 * request.
662 		 *
663 		 * Return immediately if no requests in flight since we will be
664 		 * stuck on waiting for credits.
665 		 */
666 		if (server->in_flight == 0) {
667 			spin_unlock(&server->req_lock);
668 			trace_smb3_insufficient_credits(server->CurrentMid,
669 					server->conn_id, server->hostname, scredits,
670 					num, in_flight);
671 			cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
672 					__func__, in_flight, num, scredits);
673 			return -EDEADLK;
674 		}
675 	}
676 	spin_unlock(&server->req_lock);
677 
678 	return wait_for_free_credits(server, num, 60000, flags,
679 				     instance);
680 }
681 
682 int
cifs_wait_mtu_credits(struct TCP_Server_Info * server,unsigned int size,unsigned int * num,struct cifs_credits * credits)683 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
684 		      unsigned int *num, struct cifs_credits *credits)
685 {
686 	*num = size;
687 	credits->value = 0;
688 	credits->instance = server->reconnect_instance;
689 	return 0;
690 }
691 
allocate_mid(struct cifs_ses * ses,struct smb_hdr * in_buf,struct mid_q_entry ** ppmidQ)692 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
693 			struct mid_q_entry **ppmidQ)
694 {
695 	spin_lock(&ses->ses_lock);
696 	if (ses->ses_status == SES_NEW) {
697 		if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
698 			(in_buf->Command != SMB_COM_NEGOTIATE)) {
699 			spin_unlock(&ses->ses_lock);
700 			return -EAGAIN;
701 		}
702 		/* else ok - we are setting up session */
703 	}
704 
705 	if (ses->ses_status == SES_EXITING) {
706 		/* check if SMB session is bad because we are setting it up */
707 		if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
708 			spin_unlock(&ses->ses_lock);
709 			return -EAGAIN;
710 		}
711 		/* else ok - we are shutting down session */
712 	}
713 	spin_unlock(&ses->ses_lock);
714 
715 	*ppmidQ = alloc_mid(in_buf, ses->server);
716 	if (*ppmidQ == NULL)
717 		return -ENOMEM;
718 	spin_lock(&ses->server->mid_lock);
719 	list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
720 	spin_unlock(&ses->server->mid_lock);
721 	return 0;
722 }
723 
724 static int
wait_for_response(struct TCP_Server_Info * server,struct mid_q_entry * midQ)725 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
726 {
727 	int error;
728 
729 	error = wait_event_state(server->response_q,
730 				 midQ->mid_state != MID_REQUEST_SUBMITTED,
731 				 (TASK_KILLABLE|TASK_FREEZABLE_UNSAFE));
732 	if (error < 0)
733 		return -ERESTARTSYS;
734 
735 	return 0;
736 }
737 
738 struct mid_q_entry *
cifs_setup_async_request(struct TCP_Server_Info * server,struct smb_rqst * rqst)739 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
740 {
741 	int rc;
742 	struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
743 	struct mid_q_entry *mid;
744 
745 	if (rqst->rq_iov[0].iov_len != 4 ||
746 	    rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
747 		return ERR_PTR(-EIO);
748 
749 	/* enable signing if server requires it */
750 	if (server->sign)
751 		hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
752 
753 	mid = alloc_mid(hdr, server);
754 	if (mid == NULL)
755 		return ERR_PTR(-ENOMEM);
756 
757 	rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
758 	if (rc) {
759 		release_mid(mid);
760 		return ERR_PTR(rc);
761 	}
762 
763 	return mid;
764 }
765 
766 /*
767  * Send a SMB request and set the callback function in the mid to handle
768  * the result. Caller is responsible for dealing with timeouts.
769  */
770 int
cifs_call_async(struct TCP_Server_Info * server,struct smb_rqst * rqst,mid_receive_t * receive,mid_callback_t * callback,mid_handle_t * handle,void * cbdata,const int flags,const struct cifs_credits * exist_credits)771 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
772 		mid_receive_t *receive, mid_callback_t *callback,
773 		mid_handle_t *handle, void *cbdata, const int flags,
774 		const struct cifs_credits *exist_credits)
775 {
776 	int rc;
777 	struct mid_q_entry *mid;
778 	struct cifs_credits credits = { .value = 0, .instance = 0 };
779 	unsigned int instance;
780 	int optype;
781 
782 	optype = flags & CIFS_OP_MASK;
783 
784 	if ((flags & CIFS_HAS_CREDITS) == 0) {
785 		rc = wait_for_free_request(server, flags, &instance);
786 		if (rc)
787 			return rc;
788 		credits.value = 1;
789 		credits.instance = instance;
790 	} else
791 		instance = exist_credits->instance;
792 
793 	cifs_server_lock(server);
794 
795 	/*
796 	 * We can't use credits obtained from the previous session to send this
797 	 * request. Check if there were reconnects after we obtained credits and
798 	 * return -EAGAIN in such cases to let callers handle it.
799 	 */
800 	if (instance != server->reconnect_instance) {
801 		cifs_server_unlock(server);
802 		add_credits_and_wake_if(server, &credits, optype);
803 		return -EAGAIN;
804 	}
805 
806 	mid = server->ops->setup_async_request(server, rqst);
807 	if (IS_ERR(mid)) {
808 		cifs_server_unlock(server);
809 		add_credits_and_wake_if(server, &credits, optype);
810 		return PTR_ERR(mid);
811 	}
812 
813 	mid->receive = receive;
814 	mid->callback = callback;
815 	mid->callback_data = cbdata;
816 	mid->handle = handle;
817 	mid->mid_state = MID_REQUEST_SUBMITTED;
818 
819 	/* put it on the pending_mid_q */
820 	spin_lock(&server->mid_lock);
821 	list_add_tail(&mid->qhead, &server->pending_mid_q);
822 	spin_unlock(&server->mid_lock);
823 
824 	/*
825 	 * Need to store the time in mid before calling I/O. For call_async,
826 	 * I/O response may come back and free the mid entry on another thread.
827 	 */
828 	cifs_save_when_sent(mid);
829 	cifs_in_send_inc(server);
830 	rc = smb_send_rqst(server, 1, rqst, flags);
831 	cifs_in_send_dec(server);
832 
833 	if (rc < 0) {
834 		revert_current_mid(server, mid->credits);
835 		server->sequence_number -= 2;
836 		delete_mid(mid);
837 	}
838 
839 	cifs_server_unlock(server);
840 
841 	if (rc == 0)
842 		return 0;
843 
844 	add_credits_and_wake_if(server, &credits, optype);
845 	return rc;
846 }
847 
848 /*
849  *
850  * Send an SMB Request.  No response info (other than return code)
851  * needs to be parsed.
852  *
853  * flags indicate the type of request buffer and how long to wait
854  * and whether to log NT STATUS code (error) before mapping it to POSIX error
855  *
856  */
857 int
SendReceiveNoRsp(const unsigned int xid,struct cifs_ses * ses,char * in_buf,int flags)858 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
859 		 char *in_buf, int flags)
860 {
861 	int rc;
862 	struct kvec iov[1];
863 	struct kvec rsp_iov;
864 	int resp_buf_type;
865 
866 	iov[0].iov_base = in_buf;
867 	iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
868 	flags |= CIFS_NO_RSP_BUF;
869 	rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
870 	cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
871 
872 	return rc;
873 }
874 
875 static int
cifs_sync_mid_result(struct mid_q_entry * mid,struct TCP_Server_Info * server)876 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
877 {
878 	int rc = 0;
879 
880 	cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
881 		 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
882 
883 	spin_lock(&server->mid_lock);
884 	switch (mid->mid_state) {
885 	case MID_RESPONSE_RECEIVED:
886 		spin_unlock(&server->mid_lock);
887 		return rc;
888 	case MID_RETRY_NEEDED:
889 		rc = -EAGAIN;
890 		break;
891 	case MID_RESPONSE_MALFORMED:
892 		rc = -EIO;
893 		break;
894 	case MID_SHUTDOWN:
895 		rc = -EHOSTDOWN;
896 		break;
897 	default:
898 		if (!(mid->mid_flags & MID_DELETED)) {
899 			list_del_init(&mid->qhead);
900 			mid->mid_flags |= MID_DELETED;
901 		}
902 		cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
903 			 __func__, mid->mid, mid->mid_state);
904 		rc = -EIO;
905 	}
906 	spin_unlock(&server->mid_lock);
907 
908 	release_mid(mid);
909 	return rc;
910 }
911 
912 static inline int
send_cancel(struct TCP_Server_Info * server,struct smb_rqst * rqst,struct mid_q_entry * mid)913 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
914 	    struct mid_q_entry *mid)
915 {
916 	return server->ops->send_cancel ?
917 				server->ops->send_cancel(server, rqst, mid) : 0;
918 }
919 
920 int
cifs_check_receive(struct mid_q_entry * mid,struct TCP_Server_Info * server,bool log_error)921 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
922 		   bool log_error)
923 {
924 	unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
925 
926 	dump_smb(mid->resp_buf, min_t(u32, 92, len));
927 
928 	/* convert the length into a more usable form */
929 	if (server->sign) {
930 		struct kvec iov[2];
931 		int rc = 0;
932 		struct smb_rqst rqst = { .rq_iov = iov,
933 					 .rq_nvec = 2 };
934 
935 		iov[0].iov_base = mid->resp_buf;
936 		iov[0].iov_len = 4;
937 		iov[1].iov_base = (char *)mid->resp_buf + 4;
938 		iov[1].iov_len = len - 4;
939 		/* FIXME: add code to kill session */
940 		rc = cifs_verify_signature(&rqst, server,
941 					   mid->sequence_number);
942 		if (rc)
943 			cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
944 				 rc);
945 	}
946 
947 	/* BB special case reconnect tid and uid here? */
948 	return map_and_check_smb_error(mid, log_error);
949 }
950 
951 struct mid_q_entry *
cifs_setup_request(struct cifs_ses * ses,struct TCP_Server_Info * ignored,struct smb_rqst * rqst)952 cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
953 		   struct smb_rqst *rqst)
954 {
955 	int rc;
956 	struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
957 	struct mid_q_entry *mid;
958 
959 	if (rqst->rq_iov[0].iov_len != 4 ||
960 	    rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
961 		return ERR_PTR(-EIO);
962 
963 	rc = allocate_mid(ses, hdr, &mid);
964 	if (rc)
965 		return ERR_PTR(rc);
966 	rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
967 	if (rc) {
968 		delete_mid(mid);
969 		return ERR_PTR(rc);
970 	}
971 	return mid;
972 }
973 
974 static void
cifs_compound_callback(struct mid_q_entry * mid)975 cifs_compound_callback(struct mid_q_entry *mid)
976 {
977 	struct TCP_Server_Info *server = mid->server;
978 	struct cifs_credits credits;
979 
980 	credits.value = server->ops->get_credits(mid);
981 	credits.instance = server->reconnect_instance;
982 
983 	add_credits(server, &credits, mid->optype);
984 }
985 
986 static void
cifs_compound_last_callback(struct mid_q_entry * mid)987 cifs_compound_last_callback(struct mid_q_entry *mid)
988 {
989 	cifs_compound_callback(mid);
990 	cifs_wake_up_task(mid);
991 }
992 
993 static void
cifs_cancelled_callback(struct mid_q_entry * mid)994 cifs_cancelled_callback(struct mid_q_entry *mid)
995 {
996 	cifs_compound_callback(mid);
997 	release_mid(mid);
998 }
999 
1000 /*
1001  * Return a channel (master if none) of @ses that can be used to send
1002  * regular requests.
1003  *
1004  * If we are currently binding a new channel (negprot/sess.setup),
1005  * return the new incomplete channel.
1006  */
cifs_pick_channel(struct cifs_ses * ses)1007 struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1008 {
1009 	uint index = 0;
1010 	unsigned int min_in_flight = UINT_MAX, max_in_flight = 0;
1011 	struct TCP_Server_Info *server = NULL;
1012 	int i;
1013 
1014 	if (!ses)
1015 		return NULL;
1016 
1017 	spin_lock(&ses->chan_lock);
1018 	for (i = 0; i < ses->chan_count; i++) {
1019 		server = ses->chans[i].server;
1020 		if (!server)
1021 			continue;
1022 
1023 		/*
1024 		 * strictly speaking, we should pick up req_lock to read
1025 		 * server->in_flight. But it shouldn't matter much here if we
1026 		 * race while reading this data. The worst that can happen is
1027 		 * that we could use a channel that's not least loaded. Avoiding
1028 		 * taking the lock could help reduce wait time, which is
1029 		 * important for this function
1030 		 */
1031 		if (server->in_flight < min_in_flight) {
1032 			min_in_flight = server->in_flight;
1033 			index = i;
1034 		}
1035 		if (server->in_flight > max_in_flight)
1036 			max_in_flight = server->in_flight;
1037 	}
1038 
1039 	/* if all channels are equally loaded, fall back to round-robin */
1040 	if (min_in_flight == max_in_flight) {
1041 		index = (uint)atomic_inc_return(&ses->chan_seq);
1042 		index %= ses->chan_count;
1043 	}
1044 	spin_unlock(&ses->chan_lock);
1045 
1046 	return ses->chans[index].server;
1047 }
1048 
1049 int
compound_send_recv(const unsigned int xid,struct cifs_ses * ses,struct TCP_Server_Info * server,const int flags,const int num_rqst,struct smb_rqst * rqst,int * resp_buf_type,struct kvec * resp_iov)1050 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1051 		   struct TCP_Server_Info *server,
1052 		   const int flags, const int num_rqst, struct smb_rqst *rqst,
1053 		   int *resp_buf_type, struct kvec *resp_iov)
1054 {
1055 	int i, j, optype, rc = 0;
1056 	struct mid_q_entry *midQ[MAX_COMPOUND];
1057 	bool cancelled_mid[MAX_COMPOUND] = {false};
1058 	struct cifs_credits credits[MAX_COMPOUND] = {
1059 		{ .value = 0, .instance = 0 }
1060 	};
1061 	unsigned int instance;
1062 	char *buf;
1063 
1064 	optype = flags & CIFS_OP_MASK;
1065 
1066 	for (i = 0; i < num_rqst; i++)
1067 		resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
1068 
1069 	if (!ses || !ses->server || !server) {
1070 		cifs_dbg(VFS, "Null session\n");
1071 		return -EIO;
1072 	}
1073 
1074 	spin_lock(&server->srv_lock);
1075 	if (server->tcpStatus == CifsExiting) {
1076 		spin_unlock(&server->srv_lock);
1077 		return -ENOENT;
1078 	}
1079 	spin_unlock(&server->srv_lock);
1080 
1081 	/*
1082 	 * Wait for all the requests to become available.
1083 	 * This approach still leaves the possibility to be stuck waiting for
1084 	 * credits if the server doesn't grant credits to the outstanding
1085 	 * requests and if the client is completely idle, not generating any
1086 	 * other requests.
1087 	 * This can be handled by the eventual session reconnect.
1088 	 */
1089 	rc = wait_for_compound_request(server, num_rqst, flags,
1090 				       &instance);
1091 	if (rc)
1092 		return rc;
1093 
1094 	for (i = 0; i < num_rqst; i++) {
1095 		credits[i].value = 1;
1096 		credits[i].instance = instance;
1097 	}
1098 
1099 	/*
1100 	 * Make sure that we sign in the same order that we send on this socket
1101 	 * and avoid races inside tcp sendmsg code that could cause corruption
1102 	 * of smb data.
1103 	 */
1104 
1105 	cifs_server_lock(server);
1106 
1107 	/*
1108 	 * All the parts of the compound chain belong obtained credits from the
1109 	 * same session. We can not use credits obtained from the previous
1110 	 * session to send this request. Check if there were reconnects after
1111 	 * we obtained credits and return -EAGAIN in such cases to let callers
1112 	 * handle it.
1113 	 */
1114 	if (instance != server->reconnect_instance) {
1115 		cifs_server_unlock(server);
1116 		for (j = 0; j < num_rqst; j++)
1117 			add_credits(server, &credits[j], optype);
1118 		return -EAGAIN;
1119 	}
1120 
1121 	for (i = 0; i < num_rqst; i++) {
1122 		midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1123 		if (IS_ERR(midQ[i])) {
1124 			revert_current_mid(server, i);
1125 			for (j = 0; j < i; j++)
1126 				delete_mid(midQ[j]);
1127 			cifs_server_unlock(server);
1128 
1129 			/* Update # of requests on wire to server */
1130 			for (j = 0; j < num_rqst; j++)
1131 				add_credits(server, &credits[j], optype);
1132 			return PTR_ERR(midQ[i]);
1133 		}
1134 
1135 		midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1136 		midQ[i]->optype = optype;
1137 		/*
1138 		 * Invoke callback for every part of the compound chain
1139 		 * to calculate credits properly. Wake up this thread only when
1140 		 * the last element is received.
1141 		 */
1142 		if (i < num_rqst - 1)
1143 			midQ[i]->callback = cifs_compound_callback;
1144 		else
1145 			midQ[i]->callback = cifs_compound_last_callback;
1146 	}
1147 	cifs_in_send_inc(server);
1148 	rc = smb_send_rqst(server, num_rqst, rqst, flags);
1149 	cifs_in_send_dec(server);
1150 
1151 	for (i = 0; i < num_rqst; i++)
1152 		cifs_save_when_sent(midQ[i]);
1153 
1154 	if (rc < 0) {
1155 		revert_current_mid(server, num_rqst);
1156 		server->sequence_number -= 2;
1157 	}
1158 
1159 	cifs_server_unlock(server);
1160 
1161 	/*
1162 	 * If sending failed for some reason or it is an oplock break that we
1163 	 * will not receive a response to - return credits back
1164 	 */
1165 	if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1166 		for (i = 0; i < num_rqst; i++)
1167 			add_credits(server, &credits[i], optype);
1168 		goto out;
1169 	}
1170 
1171 	/*
1172 	 * At this point the request is passed to the network stack - we assume
1173 	 * that any credits taken from the server structure on the client have
1174 	 * been spent and we can't return them back. Once we receive responses
1175 	 * we will collect credits granted by the server in the mid callbacks
1176 	 * and add those credits to the server structure.
1177 	 */
1178 
1179 	/*
1180 	 * Compounding is never used during session establish.
1181 	 */
1182 	spin_lock(&ses->ses_lock);
1183 	if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1184 		spin_unlock(&ses->ses_lock);
1185 
1186 		cifs_server_lock(server);
1187 		smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
1188 		cifs_server_unlock(server);
1189 
1190 		spin_lock(&ses->ses_lock);
1191 	}
1192 	spin_unlock(&ses->ses_lock);
1193 
1194 	for (i = 0; i < num_rqst; i++) {
1195 		rc = wait_for_response(server, midQ[i]);
1196 		if (rc != 0)
1197 			break;
1198 	}
1199 	if (rc != 0) {
1200 		for (; i < num_rqst; i++) {
1201 			cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
1202 				 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1203 			send_cancel(server, &rqst[i], midQ[i]);
1204 			spin_lock(&server->mid_lock);
1205 			midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1206 			if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1207 				midQ[i]->callback = cifs_cancelled_callback;
1208 				cancelled_mid[i] = true;
1209 				credits[i].value = 0;
1210 			}
1211 			spin_unlock(&server->mid_lock);
1212 		}
1213 	}
1214 
1215 	for (i = 0; i < num_rqst; i++) {
1216 		if (rc < 0)
1217 			goto out;
1218 
1219 		rc = cifs_sync_mid_result(midQ[i], server);
1220 		if (rc != 0) {
1221 			/* mark this mid as cancelled to not free it below */
1222 			cancelled_mid[i] = true;
1223 			goto out;
1224 		}
1225 
1226 		if (!midQ[i]->resp_buf ||
1227 		    midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1228 			rc = -EIO;
1229 			cifs_dbg(FYI, "Bad MID state?\n");
1230 			goto out;
1231 		}
1232 
1233 		buf = (char *)midQ[i]->resp_buf;
1234 		resp_iov[i].iov_base = buf;
1235 		resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1236 			HEADER_PREAMBLE_SIZE(server);
1237 
1238 		if (midQ[i]->large_buf)
1239 			resp_buf_type[i] = CIFS_LARGE_BUFFER;
1240 		else
1241 			resp_buf_type[i] = CIFS_SMALL_BUFFER;
1242 
1243 		rc = server->ops->check_receive(midQ[i], server,
1244 						     flags & CIFS_LOG_ERROR);
1245 
1246 		/* mark it so buf will not be freed by delete_mid */
1247 		if ((flags & CIFS_NO_RSP_BUF) == 0)
1248 			midQ[i]->resp_buf = NULL;
1249 
1250 	}
1251 
1252 	/*
1253 	 * Compounding is never used during session establish.
1254 	 */
1255 	spin_lock(&ses->ses_lock);
1256 	if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1257 		struct kvec iov = {
1258 			.iov_base = resp_iov[0].iov_base,
1259 			.iov_len = resp_iov[0].iov_len
1260 		};
1261 		spin_unlock(&ses->ses_lock);
1262 		cifs_server_lock(server);
1263 		smb311_update_preauth_hash(ses, server, &iov, 1);
1264 		cifs_server_unlock(server);
1265 		spin_lock(&ses->ses_lock);
1266 	}
1267 	spin_unlock(&ses->ses_lock);
1268 
1269 out:
1270 	/*
1271 	 * This will dequeue all mids. After this it is important that the
1272 	 * demultiplex_thread will not process any of these mids any futher.
1273 	 * This is prevented above by using a noop callback that will not
1274 	 * wake this thread except for the very last PDU.
1275 	 */
1276 	for (i = 0; i < num_rqst; i++) {
1277 		if (!cancelled_mid[i])
1278 			delete_mid(midQ[i]);
1279 	}
1280 
1281 	return rc;
1282 }
1283 
1284 int
cifs_send_recv(const unsigned int xid,struct cifs_ses * ses,struct TCP_Server_Info * server,struct smb_rqst * rqst,int * resp_buf_type,const int flags,struct kvec * resp_iov)1285 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1286 	       struct TCP_Server_Info *server,
1287 	       struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1288 	       struct kvec *resp_iov)
1289 {
1290 	return compound_send_recv(xid, ses, server, flags, 1,
1291 				  rqst, resp_buf_type, resp_iov);
1292 }
1293 
1294 int
SendReceive2(const unsigned int xid,struct cifs_ses * ses,struct kvec * iov,int n_vec,int * resp_buf_type,const int flags,struct kvec * resp_iov)1295 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1296 	     struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1297 	     const int flags, struct kvec *resp_iov)
1298 {
1299 	struct smb_rqst rqst;
1300 	struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1301 	int rc;
1302 
1303 	if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1304 		new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1305 					GFP_KERNEL);
1306 		if (!new_iov) {
1307 			/* otherwise cifs_send_recv below sets resp_buf_type */
1308 			*resp_buf_type = CIFS_NO_BUFFER;
1309 			return -ENOMEM;
1310 		}
1311 	} else
1312 		new_iov = s_iov;
1313 
1314 	/* 1st iov is a RFC1001 length followed by the rest of the packet */
1315 	memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1316 
1317 	new_iov[0].iov_base = new_iov[1].iov_base;
1318 	new_iov[0].iov_len = 4;
1319 	new_iov[1].iov_base += 4;
1320 	new_iov[1].iov_len -= 4;
1321 
1322 	memset(&rqst, 0, sizeof(struct smb_rqst));
1323 	rqst.rq_iov = new_iov;
1324 	rqst.rq_nvec = n_vec + 1;
1325 
1326 	rc = cifs_send_recv(xid, ses, ses->server,
1327 			    &rqst, resp_buf_type, flags, resp_iov);
1328 	if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1329 		kfree(new_iov);
1330 	return rc;
1331 }
1332 
1333 int
SendReceive(const unsigned int xid,struct cifs_ses * ses,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned,const int flags)1334 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1335 	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1336 	    int *pbytes_returned, const int flags)
1337 {
1338 	int rc = 0;
1339 	struct mid_q_entry *midQ;
1340 	unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1341 	struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1342 	struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1343 	struct cifs_credits credits = { .value = 1, .instance = 0 };
1344 	struct TCP_Server_Info *server;
1345 
1346 	if (ses == NULL) {
1347 		cifs_dbg(VFS, "Null smb session\n");
1348 		return -EIO;
1349 	}
1350 	server = ses->server;
1351 	if (server == NULL) {
1352 		cifs_dbg(VFS, "Null tcp session\n");
1353 		return -EIO;
1354 	}
1355 
1356 	spin_lock(&server->srv_lock);
1357 	if (server->tcpStatus == CifsExiting) {
1358 		spin_unlock(&server->srv_lock);
1359 		return -ENOENT;
1360 	}
1361 	spin_unlock(&server->srv_lock);
1362 
1363 	/* Ensure that we do not send more than 50 overlapping requests
1364 	   to the same server. We may make this configurable later or
1365 	   use ses->maxReq */
1366 
1367 	if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1368 		cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1369 				len);
1370 		return -EIO;
1371 	}
1372 
1373 	rc = wait_for_free_request(server, flags, &credits.instance);
1374 	if (rc)
1375 		return rc;
1376 
1377 	/* make sure that we sign in the same order that we send on this socket
1378 	   and avoid races inside tcp sendmsg code that could cause corruption
1379 	   of smb data */
1380 
1381 	cifs_server_lock(server);
1382 
1383 	rc = allocate_mid(ses, in_buf, &midQ);
1384 	if (rc) {
1385 		cifs_server_unlock(server);
1386 		/* Update # of requests on wire to server */
1387 		add_credits(server, &credits, 0);
1388 		return rc;
1389 	}
1390 
1391 	rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1392 	if (rc) {
1393 		cifs_server_unlock(server);
1394 		goto out;
1395 	}
1396 
1397 	midQ->mid_state = MID_REQUEST_SUBMITTED;
1398 
1399 	cifs_in_send_inc(server);
1400 	rc = smb_send(server, in_buf, len);
1401 	cifs_in_send_dec(server);
1402 	cifs_save_when_sent(midQ);
1403 
1404 	if (rc < 0)
1405 		server->sequence_number -= 2;
1406 
1407 	cifs_server_unlock(server);
1408 
1409 	if (rc < 0)
1410 		goto out;
1411 
1412 	rc = wait_for_response(server, midQ);
1413 	if (rc != 0) {
1414 		send_cancel(server, &rqst, midQ);
1415 		spin_lock(&server->mid_lock);
1416 		if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1417 			/* no longer considered to be "in-flight" */
1418 			midQ->callback = release_mid;
1419 			spin_unlock(&server->mid_lock);
1420 			add_credits(server, &credits, 0);
1421 			return rc;
1422 		}
1423 		spin_unlock(&server->mid_lock);
1424 	}
1425 
1426 	rc = cifs_sync_mid_result(midQ, server);
1427 	if (rc != 0) {
1428 		add_credits(server, &credits, 0);
1429 		return rc;
1430 	}
1431 
1432 	if (!midQ->resp_buf || !out_buf ||
1433 	    midQ->mid_state != MID_RESPONSE_RECEIVED) {
1434 		rc = -EIO;
1435 		cifs_server_dbg(VFS, "Bad MID state?\n");
1436 		goto out;
1437 	}
1438 
1439 	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1440 	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1441 	rc = cifs_check_receive(midQ, server, 0);
1442 out:
1443 	delete_mid(midQ);
1444 	add_credits(server, &credits, 0);
1445 
1446 	return rc;
1447 }
1448 
1449 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1450    blocking lock to return. */
1451 
1452 static int
send_lock_cancel(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf)1453 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1454 			struct smb_hdr *in_buf,
1455 			struct smb_hdr *out_buf)
1456 {
1457 	int bytes_returned;
1458 	struct cifs_ses *ses = tcon->ses;
1459 	LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1460 
1461 	/* We just modify the current in_buf to change
1462 	   the type of lock from LOCKING_ANDX_SHARED_LOCK
1463 	   or LOCKING_ANDX_EXCLUSIVE_LOCK to
1464 	   LOCKING_ANDX_CANCEL_LOCK. */
1465 
1466 	pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1467 	pSMB->Timeout = 0;
1468 	pSMB->hdr.Mid = get_next_mid(ses->server);
1469 
1470 	return SendReceive(xid, ses, in_buf, out_buf,
1471 			&bytes_returned, 0);
1472 }
1473 
1474 int
SendReceiveBlockingLock(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned)1475 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1476 	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1477 	    int *pbytes_returned)
1478 {
1479 	int rc = 0;
1480 	int rstart = 0;
1481 	struct mid_q_entry *midQ;
1482 	struct cifs_ses *ses;
1483 	unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1484 	struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1485 	struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1486 	unsigned int instance;
1487 	struct TCP_Server_Info *server;
1488 
1489 	if (tcon == NULL || tcon->ses == NULL) {
1490 		cifs_dbg(VFS, "Null smb session\n");
1491 		return -EIO;
1492 	}
1493 	ses = tcon->ses;
1494 	server = ses->server;
1495 
1496 	if (server == NULL) {
1497 		cifs_dbg(VFS, "Null tcp session\n");
1498 		return -EIO;
1499 	}
1500 
1501 	spin_lock(&server->srv_lock);
1502 	if (server->tcpStatus == CifsExiting) {
1503 		spin_unlock(&server->srv_lock);
1504 		return -ENOENT;
1505 	}
1506 	spin_unlock(&server->srv_lock);
1507 
1508 	/* Ensure that we do not send more than 50 overlapping requests
1509 	   to the same server. We may make this configurable later or
1510 	   use ses->maxReq */
1511 
1512 	if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1513 		cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1514 			      len);
1515 		return -EIO;
1516 	}
1517 
1518 	rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1519 	if (rc)
1520 		return rc;
1521 
1522 	/* make sure that we sign in the same order that we send on this socket
1523 	   and avoid races inside tcp sendmsg code that could cause corruption
1524 	   of smb data */
1525 
1526 	cifs_server_lock(server);
1527 
1528 	rc = allocate_mid(ses, in_buf, &midQ);
1529 	if (rc) {
1530 		cifs_server_unlock(server);
1531 		return rc;
1532 	}
1533 
1534 	rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1535 	if (rc) {
1536 		delete_mid(midQ);
1537 		cifs_server_unlock(server);
1538 		return rc;
1539 	}
1540 
1541 	midQ->mid_state = MID_REQUEST_SUBMITTED;
1542 	cifs_in_send_inc(server);
1543 	rc = smb_send(server, in_buf, len);
1544 	cifs_in_send_dec(server);
1545 	cifs_save_when_sent(midQ);
1546 
1547 	if (rc < 0)
1548 		server->sequence_number -= 2;
1549 
1550 	cifs_server_unlock(server);
1551 
1552 	if (rc < 0) {
1553 		delete_mid(midQ);
1554 		return rc;
1555 	}
1556 
1557 	/* Wait for a reply - allow signals to interrupt. */
1558 	rc = wait_event_interruptible(server->response_q,
1559 		(!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1560 		((server->tcpStatus != CifsGood) &&
1561 		 (server->tcpStatus != CifsNew)));
1562 
1563 	/* Were we interrupted by a signal ? */
1564 	spin_lock(&server->srv_lock);
1565 	if ((rc == -ERESTARTSYS) &&
1566 		(midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1567 		((server->tcpStatus == CifsGood) ||
1568 		 (server->tcpStatus == CifsNew))) {
1569 		spin_unlock(&server->srv_lock);
1570 
1571 		if (in_buf->Command == SMB_COM_TRANSACTION2) {
1572 			/* POSIX lock. We send a NT_CANCEL SMB to cause the
1573 			   blocking lock to return. */
1574 			rc = send_cancel(server, &rqst, midQ);
1575 			if (rc) {
1576 				delete_mid(midQ);
1577 				return rc;
1578 			}
1579 		} else {
1580 			/* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1581 			   to cause the blocking lock to return. */
1582 
1583 			rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1584 
1585 			/* If we get -ENOLCK back the lock may have
1586 			   already been removed. Don't exit in this case. */
1587 			if (rc && rc != -ENOLCK) {
1588 				delete_mid(midQ);
1589 				return rc;
1590 			}
1591 		}
1592 
1593 		rc = wait_for_response(server, midQ);
1594 		if (rc) {
1595 			send_cancel(server, &rqst, midQ);
1596 			spin_lock(&server->mid_lock);
1597 			if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1598 				/* no longer considered to be "in-flight" */
1599 				midQ->callback = release_mid;
1600 				spin_unlock(&server->mid_lock);
1601 				return rc;
1602 			}
1603 			spin_unlock(&server->mid_lock);
1604 		}
1605 
1606 		/* We got the response - restart system call. */
1607 		rstart = 1;
1608 		spin_lock(&server->srv_lock);
1609 	}
1610 	spin_unlock(&server->srv_lock);
1611 
1612 	rc = cifs_sync_mid_result(midQ, server);
1613 	if (rc != 0)
1614 		return rc;
1615 
1616 	/* rcvd frame is ok */
1617 	if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1618 		rc = -EIO;
1619 		cifs_tcon_dbg(VFS, "Bad MID state?\n");
1620 		goto out;
1621 	}
1622 
1623 	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1624 	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1625 	rc = cifs_check_receive(midQ, server, 0);
1626 out:
1627 	delete_mid(midQ);
1628 	if (rstart && rc == -EACCES)
1629 		return -ERESTARTSYS;
1630 	return rc;
1631 }
1632 
1633 /*
1634  * Discard any remaining data in the current SMB. To do this, we borrow the
1635  * current bigbuf.
1636  */
1637 int
cifs_discard_remaining_data(struct TCP_Server_Info * server)1638 cifs_discard_remaining_data(struct TCP_Server_Info *server)
1639 {
1640 	unsigned int rfclen = server->pdu_size;
1641 	size_t remaining = rfclen + HEADER_PREAMBLE_SIZE(server) -
1642 		server->total_read;
1643 
1644 	while (remaining > 0) {
1645 		ssize_t length;
1646 
1647 		length = cifs_discard_from_socket(server,
1648 				min_t(size_t, remaining,
1649 				      CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
1650 		if (length < 0)
1651 			return length;
1652 		server->total_read += length;
1653 		remaining -= length;
1654 	}
1655 
1656 	return 0;
1657 }
1658 
1659 static int
__cifs_readv_discard(struct TCP_Server_Info * server,struct mid_q_entry * mid,bool malformed)1660 __cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
1661 		     bool malformed)
1662 {
1663 	int length;
1664 
1665 	length = cifs_discard_remaining_data(server);
1666 	dequeue_mid(mid, malformed);
1667 	mid->resp_buf = server->smallbuf;
1668 	server->smallbuf = NULL;
1669 	return length;
1670 }
1671 
1672 static int
cifs_readv_discard(struct TCP_Server_Info * server,struct mid_q_entry * mid)1673 cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1674 {
1675 	struct cifs_readdata *rdata = mid->callback_data;
1676 
1677 	return  __cifs_readv_discard(server, mid, rdata->result);
1678 }
1679 
1680 int
cifs_readv_receive(struct TCP_Server_Info * server,struct mid_q_entry * mid)1681 cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1682 {
1683 	int length, len;
1684 	unsigned int data_offset, data_len;
1685 	struct cifs_readdata *rdata = mid->callback_data;
1686 	char *buf = server->smallbuf;
1687 	unsigned int buflen = server->pdu_size + HEADER_PREAMBLE_SIZE(server);
1688 	bool use_rdma_mr = false;
1689 
1690 	cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n",
1691 		 __func__, mid->mid, rdata->offset, rdata->bytes);
1692 
1693 	/*
1694 	 * read the rest of READ_RSP header (sans Data array), or whatever we
1695 	 * can if there's not enough data. At this point, we've read down to
1696 	 * the Mid.
1697 	 */
1698 	len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
1699 							HEADER_SIZE(server) + 1;
1700 
1701 	length = cifs_read_from_socket(server,
1702 				       buf + HEADER_SIZE(server) - 1, len);
1703 	if (length < 0)
1704 		return length;
1705 	server->total_read += length;
1706 
1707 	if (server->ops->is_session_expired &&
1708 	    server->ops->is_session_expired(buf)) {
1709 		cifs_reconnect(server, true);
1710 		return -1;
1711 	}
1712 
1713 	if (server->ops->is_status_pending &&
1714 	    server->ops->is_status_pending(buf, server)) {
1715 		cifs_discard_remaining_data(server);
1716 		return -1;
1717 	}
1718 
1719 	/* set up first two iov for signature check and to get credits */
1720 	rdata->iov[0].iov_base = buf;
1721 	rdata->iov[0].iov_len = HEADER_PREAMBLE_SIZE(server);
1722 	rdata->iov[1].iov_base = buf + HEADER_PREAMBLE_SIZE(server);
1723 	rdata->iov[1].iov_len =
1724 		server->total_read - HEADER_PREAMBLE_SIZE(server);
1725 	cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n",
1726 		 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
1727 	cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n",
1728 		 rdata->iov[1].iov_base, rdata->iov[1].iov_len);
1729 
1730 	/* Was the SMB read successful? */
1731 	rdata->result = server->ops->map_error(buf, false);
1732 	if (rdata->result != 0) {
1733 		cifs_dbg(FYI, "%s: server returned error %d\n",
1734 			 __func__, rdata->result);
1735 		/* normal error on read response */
1736 		return __cifs_readv_discard(server, mid, false);
1737 	}
1738 
1739 	/* Is there enough to get to the rest of the READ_RSP header? */
1740 	if (server->total_read < server->vals->read_rsp_size) {
1741 		cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n",
1742 			 __func__, server->total_read,
1743 			 server->vals->read_rsp_size);
1744 		rdata->result = -EIO;
1745 		return cifs_readv_discard(server, mid);
1746 	}
1747 
1748 	data_offset = server->ops->read_data_offset(buf) +
1749 		HEADER_PREAMBLE_SIZE(server);
1750 	if (data_offset < server->total_read) {
1751 		/*
1752 		 * win2k8 sometimes sends an offset of 0 when the read
1753 		 * is beyond the EOF. Treat it as if the data starts just after
1754 		 * the header.
1755 		 */
1756 		cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n",
1757 			 __func__, data_offset);
1758 		data_offset = server->total_read;
1759 	} else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
1760 		/* data_offset is beyond the end of smallbuf */
1761 		cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n",
1762 			 __func__, data_offset);
1763 		rdata->result = -EIO;
1764 		return cifs_readv_discard(server, mid);
1765 	}
1766 
1767 	cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n",
1768 		 __func__, server->total_read, data_offset);
1769 
1770 	len = data_offset - server->total_read;
1771 	if (len > 0) {
1772 		/* read any junk before data into the rest of smallbuf */
1773 		length = cifs_read_from_socket(server,
1774 					       buf + server->total_read, len);
1775 		if (length < 0)
1776 			return length;
1777 		server->total_read += length;
1778 	}
1779 
1780 	/* how much data is in the response? */
1781 #ifdef CONFIG_CIFS_SMB_DIRECT
1782 	use_rdma_mr = rdata->mr;
1783 #endif
1784 	data_len = server->ops->read_data_length(buf, use_rdma_mr);
1785 	if (!use_rdma_mr && (data_offset + data_len > buflen)) {
1786 		/* data_len is corrupt -- discard frame */
1787 		rdata->result = -EIO;
1788 		return cifs_readv_discard(server, mid);
1789 	}
1790 
1791 #ifdef CONFIG_CIFS_SMB_DIRECT
1792 	if (rdata->mr)
1793 		length = data_len; /* An RDMA read is already done. */
1794 	else
1795 #endif
1796 		length = cifs_read_iter_from_socket(server, &rdata->iter,
1797 						    data_len);
1798 	if (length > 0)
1799 		rdata->got_bytes += length;
1800 	server->total_read += length;
1801 
1802 	cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n",
1803 		 server->total_read, buflen, data_len);
1804 
1805 	/* discard anything left over */
1806 	if (server->total_read < buflen)
1807 		return cifs_readv_discard(server, mid);
1808 
1809 	dequeue_mid(mid, false);
1810 	mid->resp_buf = server->smallbuf;
1811 	server->smallbuf = NULL;
1812 	return length;
1813 }
1814