1/* 2 * fs/cifs/transport.c 3 * 4 * Copyright (C) International Business Machines Corp., 2002,2008 5 * Author(s): Steve French (sfrench@us.ibm.com) 6 * Jeremy Allison (jra@samba.org) 2006. 7 * 8 * This library is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU Lesser General Public License as published 10 * by the Free Software Foundation; either version 2.1 of the License, or 11 * (at your option) any later version. 12 * 13 * This library is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See 16 * the GNU Lesser General Public License for more details. 17 * 18 * You should have received a copy of the GNU Lesser General Public License 19 * along with this library; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 */ 22 23#include <linux/fs.h> 24#include <linux/list.h> 25#include <linux/gfp.h> 26#include <linux/wait.h> 27#include <linux/net.h> 28#include <linux/delay.h> 29#include <linux/freezer.h> 30#include <linux/tcp.h> 31#include <linux/highmem.h> 32#include <asm/uaccess.h> 33#include <asm/processor.h> 34#include <linux/mempool.h> 35#include "cifspdu.h" 36#include "cifsglob.h" 37#include "cifsproto.h" 38#include "cifs_debug.h" 39 40void 41cifs_wake_up_task(struct mid_q_entry *mid) 42{ 43 wake_up_process(mid->callback_data); 44} 45 46struct mid_q_entry * 47AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) 48{ 49 struct mid_q_entry *temp; 50 51 if (server == NULL) { 52 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n"); 53 return NULL; 54 } 55 56 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS); 57 if (temp == NULL) 58 return temp; 59 else { 60 memset(temp, 0, sizeof(struct mid_q_entry)); 61 temp->mid = get_mid(smb_buffer); 62 temp->pid = current->pid; 63 temp->command = cpu_to_le16(smb_buffer->Command); 64 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command); 65 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */ 66 /* when mid allocated can be before when sent */ 67 temp->when_alloc = jiffies; 68 temp->server = server; 69 70 /* 71 * The default is for the mid to be synchronous, so the 72 * default callback just wakes up the current task. 73 */ 74 temp->callback = cifs_wake_up_task; 75 temp->callback_data = current; 76 } 77 78 atomic_inc(&midCount); 79 temp->mid_state = MID_REQUEST_ALLOCATED; 80 return temp; 81} 82 83void 84DeleteMidQEntry(struct mid_q_entry *midEntry) 85{ 86#ifdef CONFIG_CIFS_STATS2 87 __le16 command = midEntry->server->vals->lock_cmd; 88 unsigned long now; 89#endif 90 midEntry->mid_state = MID_FREE; 91 atomic_dec(&midCount); 92 if (midEntry->large_buf) 93 cifs_buf_release(midEntry->resp_buf); 94 else 95 cifs_small_buf_release(midEntry->resp_buf); 96#ifdef CONFIG_CIFS_STATS2 97 now = jiffies; 98 /* commands taking longer than one second are indications that 99 something is wrong, unless it is quite a slow link or server */ 100 if ((now - midEntry->when_alloc) > HZ) { 101 if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) { 102 pr_debug(" CIFS slow rsp: cmd %d mid %llu", 103 midEntry->command, midEntry->mid); 104 pr_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n", 105 now - midEntry->when_alloc, 106 now - midEntry->when_sent, 107 now - midEntry->when_received); 108 } 109 } 110#endif 111 mempool_free(midEntry, cifs_mid_poolp); 112} 113 114void 115cifs_delete_mid(struct mid_q_entry *mid) 116{ 117 spin_lock(&GlobalMid_Lock); 118 list_del(&mid->qhead); 119 spin_unlock(&GlobalMid_Lock); 120 121 DeleteMidQEntry(mid); 122} 123 124/* 125 * smb_send_kvec - send an array of kvecs to the server 126 * @server: Server to send the data to 127 * @iov: Pointer to array of kvecs 128 * @n_vec: length of kvec array 129 * @sent: amount of data sent on socket is stored here 130 * 131 * Our basic "send data to server" function. Should be called with srv_mutex 132 * held. The caller is responsible for handling the results. 133 */ 134static int 135smb_send_kvec(struct TCP_Server_Info *server, struct kvec *iov, size_t n_vec, 136 size_t *sent) 137{ 138 int rc = 0; 139 int i = 0; 140 struct msghdr smb_msg; 141 unsigned int remaining; 142 size_t first_vec = 0; 143 struct socket *ssocket = server->ssocket; 144 145 *sent = 0; 146 147 smb_msg.msg_name = (struct sockaddr *) &server->dstaddr; 148 smb_msg.msg_namelen = sizeof(struct sockaddr); 149 smb_msg.msg_control = NULL; 150 smb_msg.msg_controllen = 0; 151 if (server->noblocksnd) 152 smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL; 153 else 154 smb_msg.msg_flags = MSG_NOSIGNAL; 155 156 remaining = 0; 157 for (i = 0; i < n_vec; i++) 158 remaining += iov[i].iov_len; 159 160 i = 0; 161 while (remaining) { 162 /* 163 * If blocking send, we try 3 times, since each can block 164 * for 5 seconds. For nonblocking we have to try more 165 * but wait increasing amounts of time allowing time for 166 * socket to clear. The overall time we wait in either 167 * case to send on the socket is about 15 seconds. 168 * Similarly we wait for 15 seconds for a response from 169 * the server in SendReceive[2] for the server to send 170 * a response back for most types of requests (except 171 * SMB Write past end of file which can be slow, and 172 * blocking lock operations). NFS waits slightly longer 173 * than CIFS, but this can make it take longer for 174 * nonresponsive servers to be detected and 15 seconds 175 * is more than enough time for modern networks to 176 * send a packet. In most cases if we fail to send 177 * after the retries we will kill the socket and 178 * reconnect which may clear the network problem. 179 */ 180 rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec], 181 n_vec - first_vec, remaining); 182 if (rc == -EAGAIN) { 183 i++; 184 if (i >= 14 || (!server->noblocksnd && (i > 2))) { 185 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n", 186 ssocket); 187 rc = -EAGAIN; 188 break; 189 } 190 msleep(1 << i); 191 continue; 192 } 193 194 if (rc < 0) 195 break; 196 197 /* send was at least partially successful */ 198 *sent += rc; 199 200 if (rc == remaining) { 201 remaining = 0; 202 break; 203 } 204 205 if (rc > remaining) { 206 cifs_dbg(VFS, "sent %d requested %d\n", rc, remaining); 207 break; 208 } 209 210 if (rc == 0) { 211 /* should never happen, letting socket clear before 212 retrying is our only obvious option here */ 213 cifs_dbg(VFS, "tcp sent no data\n"); 214 msleep(500); 215 continue; 216 } 217 218 remaining -= rc; 219 220 /* the line below resets i */ 221 for (i = first_vec; i < n_vec; i++) { 222 if (iov[i].iov_len) { 223 if (rc > iov[i].iov_len) { 224 rc -= iov[i].iov_len; 225 iov[i].iov_len = 0; 226 } else { 227 iov[i].iov_base += rc; 228 iov[i].iov_len -= rc; 229 first_vec = i; 230 break; 231 } 232 } 233 } 234 235 i = 0; /* in case we get ENOSPC on the next send */ 236 rc = 0; 237 } 238 return rc; 239} 240 241/** 242 * rqst_page_to_kvec - Turn a slot in the smb_rqst page array into a kvec 243 * @rqst: pointer to smb_rqst 244 * @idx: index into the array of the page 245 * @iov: pointer to struct kvec that will hold the result 246 * 247 * Helper function to convert a slot in the rqst->rq_pages array into a kvec. 248 * The page will be kmapped and the address placed into iov_base. The length 249 * will then be adjusted according to the ptailoff. 250 */ 251void 252cifs_rqst_page_to_kvec(struct smb_rqst *rqst, unsigned int idx, 253 struct kvec *iov) 254{ 255 /* 256 * FIXME: We could avoid this kmap altogether if we used 257 * kernel_sendpage instead of kernel_sendmsg. That will only 258 * work if signing is disabled though as sendpage inlines the 259 * page directly into the fraglist. If userspace modifies the 260 * page after we calculate the signature, then the server will 261 * reject it and may break the connection. kernel_sendmsg does 262 * an extra copy of the data and avoids that issue. 263 */ 264 iov->iov_base = kmap(rqst->rq_pages[idx]); 265 266 /* if last page, don't send beyond this offset into page */ 267 if (idx == (rqst->rq_npages - 1)) 268 iov->iov_len = rqst->rq_tailsz; 269 else 270 iov->iov_len = rqst->rq_pagesz; 271} 272 273static unsigned long 274rqst_len(struct smb_rqst *rqst) 275{ 276 unsigned int i; 277 struct kvec *iov = rqst->rq_iov; 278 unsigned long buflen = 0; 279 280 /* total up iov array first */ 281 for (i = 0; i < rqst->rq_nvec; i++) 282 buflen += iov[i].iov_len; 283 284 /* add in the page array if there is one */ 285 if (rqst->rq_npages) { 286 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1); 287 buflen += rqst->rq_tailsz; 288 } 289 290 return buflen; 291} 292 293static int 294smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst) 295{ 296 int rc; 297 struct kvec *iov = rqst->rq_iov; 298 int n_vec = rqst->rq_nvec; 299 unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base); 300 unsigned long send_length; 301 unsigned int i; 302 size_t total_len = 0, sent; 303 struct socket *ssocket = server->ssocket; 304 int val = 1; 305 306 if (ssocket == NULL) 307 return -ENOTSOCK; 308 309 /* sanity check send length */ 310 send_length = rqst_len(rqst); 311 if (send_length != smb_buf_length + 4) { 312 WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n", 313 send_length, smb_buf_length); 314 return -EIO; 315 } 316 317 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length); 318 dump_smb(iov[0].iov_base, iov[0].iov_len); 319 320 /* cork the socket */ 321 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK, 322 (char *)&val, sizeof(val)); 323 324 rc = smb_send_kvec(server, iov, n_vec, &sent); 325 if (rc < 0) 326 goto uncork; 327 328 total_len += sent; 329 330 /* now walk the page array and send each page in it */ 331 for (i = 0; i < rqst->rq_npages; i++) { 332 struct kvec p_iov; 333 334 cifs_rqst_page_to_kvec(rqst, i, &p_iov); 335 rc = smb_send_kvec(server, &p_iov, 1, &sent); 336 kunmap(rqst->rq_pages[i]); 337 if (rc < 0) 338 break; 339 340 total_len += sent; 341 } 342 343uncork: 344 /* uncork it */ 345 val = 0; 346 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK, 347 (char *)&val, sizeof(val)); 348 349 if ((total_len > 0) && (total_len != smb_buf_length + 4)) { 350 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n", 351 smb_buf_length + 4, total_len); 352 /* 353 * If we have only sent part of an SMB then the next SMB could 354 * be taken as the remainder of this one. We need to kill the 355 * socket so the server throws away the partial SMB 356 */ 357 server->tcpStatus = CifsNeedReconnect; 358 } 359 360 if (rc < 0 && rc != -EINTR) 361 cifs_dbg(VFS, "Error %d sending data on socket to server\n", 362 rc); 363 else 364 rc = 0; 365 366 return rc; 367} 368 369static int 370smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) 371{ 372 struct smb_rqst rqst = { .rq_iov = iov, 373 .rq_nvec = n_vec }; 374 375 return smb_send_rqst(server, &rqst); 376} 377 378int 379smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer, 380 unsigned int smb_buf_length) 381{ 382 struct kvec iov; 383 384 iov.iov_base = smb_buffer; 385 iov.iov_len = smb_buf_length + 4; 386 387 return smb_sendv(server, &iov, 1); 388} 389 390static int 391wait_for_free_credits(struct TCP_Server_Info *server, const int timeout, 392 int *credits) 393{ 394 int rc; 395 396 spin_lock(&server->req_lock); 397 if (timeout == CIFS_ASYNC_OP) { 398 /* oplock breaks must not be held up */ 399 server->in_flight++; 400 *credits -= 1; 401 spin_unlock(&server->req_lock); 402 return 0; 403 } 404 405 while (1) { 406 if (*credits <= 0) { 407 spin_unlock(&server->req_lock); 408 cifs_num_waiters_inc(server); 409 rc = wait_event_killable(server->request_q, 410 has_credits(server, credits)); 411 cifs_num_waiters_dec(server); 412 if (rc) 413 return rc; 414 spin_lock(&server->req_lock); 415 } else { 416 if (server->tcpStatus == CifsExiting) { 417 spin_unlock(&server->req_lock); 418 return -ENOENT; 419 } 420 421 /* 422 * Can not count locking commands against total 423 * as they are allowed to block on server. 424 */ 425 426 /* update # of requests on the wire to server */ 427 if (timeout != CIFS_BLOCKING_OP) { 428 *credits -= 1; 429 server->in_flight++; 430 } 431 spin_unlock(&server->req_lock); 432 break; 433 } 434 } 435 return 0; 436} 437 438static int 439wait_for_free_request(struct TCP_Server_Info *server, const int timeout, 440 const int optype) 441{ 442 int *val; 443 444 val = server->ops->get_credits_field(server, optype); 445 /* Since an echo is already inflight, no need to wait to send another */ 446 if (*val <= 0 && optype == CIFS_ECHO_OP) 447 return -EAGAIN; 448 return wait_for_free_credits(server, timeout, val); 449} 450 451int 452cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size, 453 unsigned int *num, unsigned int *credits) 454{ 455 *num = size; 456 *credits = 0; 457 return 0; 458} 459 460static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf, 461 struct mid_q_entry **ppmidQ) 462{ 463 if (ses->server->tcpStatus == CifsExiting) { 464 return -ENOENT; 465 } 466 467 if (ses->server->tcpStatus == CifsNeedReconnect) { 468 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n"); 469 return -EAGAIN; 470 } 471 472 if (ses->status == CifsNew) { 473 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) && 474 (in_buf->Command != SMB_COM_NEGOTIATE)) 475 return -EAGAIN; 476 /* else ok - we are setting up session */ 477 } 478 479 if (ses->status == CifsExiting) { 480 /* check if SMB session is bad because we are setting it up */ 481 if (in_buf->Command != SMB_COM_LOGOFF_ANDX) 482 return -EAGAIN; 483 /* else ok - we are shutting down session */ 484 } 485 486 *ppmidQ = AllocMidQEntry(in_buf, ses->server); 487 if (*ppmidQ == NULL) 488 return -ENOMEM; 489 spin_lock(&GlobalMid_Lock); 490 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q); 491 spin_unlock(&GlobalMid_Lock); 492 return 0; 493} 494 495static int 496wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ) 497{ 498 int error; 499 500 error = wait_event_freezekillable_unsafe(server->response_q, 501 midQ->mid_state != MID_REQUEST_SUBMITTED); 502 if (error < 0) 503 return -ERESTARTSYS; 504 505 return 0; 506} 507 508struct mid_q_entry * 509cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst) 510{ 511 int rc; 512 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base; 513 struct mid_q_entry *mid; 514 515 /* enable signing if server requires it */ 516 if (server->sign) 517 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; 518 519 mid = AllocMidQEntry(hdr, server); 520 if (mid == NULL) 521 return ERR_PTR(-ENOMEM); 522 523 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number); 524 if (rc) { 525 DeleteMidQEntry(mid); 526 return ERR_PTR(rc); 527 } 528 529 return mid; 530} 531 532/* 533 * Send a SMB request and set the callback function in the mid to handle 534 * the result. Caller is responsible for dealing with timeouts. 535 */ 536int 537cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst, 538 mid_receive_t *receive, mid_callback_t *callback, 539 void *cbdata, const int flags) 540{ 541 int rc, timeout, optype; 542 struct mid_q_entry *mid; 543 unsigned int credits = 0; 544 545 timeout = flags & CIFS_TIMEOUT_MASK; 546 optype = flags & CIFS_OP_MASK; 547 548 if ((flags & CIFS_HAS_CREDITS) == 0) { 549 rc = wait_for_free_request(server, timeout, optype); 550 if (rc) 551 return rc; 552 credits = 1; 553 } 554 555 mutex_lock(&server->srv_mutex); 556 mid = server->ops->setup_async_request(server, rqst); 557 if (IS_ERR(mid)) { 558 mutex_unlock(&server->srv_mutex); 559 add_credits_and_wake_if(server, credits, optype); 560 return PTR_ERR(mid); 561 } 562 563 mid->receive = receive; 564 mid->callback = callback; 565 mid->callback_data = cbdata; 566 mid->mid_state = MID_REQUEST_SUBMITTED; 567 568 /* put it on the pending_mid_q */ 569 spin_lock(&GlobalMid_Lock); 570 list_add_tail(&mid->qhead, &server->pending_mid_q); 571 spin_unlock(&GlobalMid_Lock); 572 573 574 cifs_in_send_inc(server); 575 rc = smb_send_rqst(server, rqst); 576 cifs_in_send_dec(server); 577 cifs_save_when_sent(mid); 578 579 if (rc < 0) { 580 server->sequence_number -= 2; 581 cifs_delete_mid(mid); 582 } 583 584 mutex_unlock(&server->srv_mutex); 585 586 if (rc == 0) 587 return 0; 588 589 add_credits_and_wake_if(server, credits, optype); 590 return rc; 591} 592 593/* 594 * 595 * Send an SMB Request. No response info (other than return code) 596 * needs to be parsed. 597 * 598 * flags indicate the type of request buffer and how long to wait 599 * and whether to log NT STATUS code (error) before mapping it to POSIX error 600 * 601 */ 602int 603SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses, 604 char *in_buf, int flags) 605{ 606 int rc; 607 struct kvec iov[1]; 608 int resp_buf_type; 609 610 iov[0].iov_base = in_buf; 611 iov[0].iov_len = get_rfc1002_length(in_buf) + 4; 612 flags |= CIFS_NO_RESP; 613 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags); 614 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc); 615 616 return rc; 617} 618 619static int 620cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server) 621{ 622 int rc = 0; 623 624 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n", 625 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state); 626 627 spin_lock(&GlobalMid_Lock); 628 switch (mid->mid_state) { 629 case MID_RESPONSE_RECEIVED: 630 spin_unlock(&GlobalMid_Lock); 631 return rc; 632 case MID_RETRY_NEEDED: 633 rc = -EAGAIN; 634 break; 635 case MID_RESPONSE_MALFORMED: 636 rc = -EIO; 637 break; 638 case MID_SHUTDOWN: 639 rc = -EHOSTDOWN; 640 break; 641 default: 642 list_del_init(&mid->qhead); 643 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n", 644 __func__, mid->mid, mid->mid_state); 645 rc = -EIO; 646 } 647 spin_unlock(&GlobalMid_Lock); 648 649 DeleteMidQEntry(mid); 650 return rc; 651} 652 653static inline int 654send_cancel(struct TCP_Server_Info *server, void *buf, struct mid_q_entry *mid) 655{ 656 return server->ops->send_cancel ? 657 server->ops->send_cancel(server, buf, mid) : 0; 658} 659 660int 661cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server, 662 bool log_error) 663{ 664 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4; 665 666 dump_smb(mid->resp_buf, min_t(u32, 92, len)); 667 668 /* convert the length into a more usable form */ 669 if (server->sign) { 670 struct kvec iov; 671 int rc = 0; 672 struct smb_rqst rqst = { .rq_iov = &iov, 673 .rq_nvec = 1 }; 674 675 iov.iov_base = mid->resp_buf; 676 iov.iov_len = len; 677 /* FIXME: add code to kill session */ 678 rc = cifs_verify_signature(&rqst, server, 679 mid->sequence_number); 680 if (rc) 681 cifs_dbg(VFS, "SMB signature verification returned error = %d\n", 682 rc); 683 } 684 685 /* BB special case reconnect tid and uid here? */ 686 return map_smb_to_linux_error(mid->resp_buf, log_error); 687} 688 689struct mid_q_entry * 690cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst) 691{ 692 int rc; 693 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base; 694 struct mid_q_entry *mid; 695 696 rc = allocate_mid(ses, hdr, &mid); 697 if (rc) 698 return ERR_PTR(rc); 699 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number); 700 if (rc) { 701 cifs_delete_mid(mid); 702 return ERR_PTR(rc); 703 } 704 return mid; 705} 706 707int 708SendReceive2(const unsigned int xid, struct cifs_ses *ses, 709 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */, 710 const int flags) 711{ 712 int rc = 0; 713 int timeout, optype; 714 struct mid_q_entry *midQ; 715 char *buf = iov[0].iov_base; 716 unsigned int credits = 1; 717 struct smb_rqst rqst = { .rq_iov = iov, 718 .rq_nvec = n_vec }; 719 720 timeout = flags & CIFS_TIMEOUT_MASK; 721 optype = flags & CIFS_OP_MASK; 722 723 *resp_buf_type = CIFS_NO_BUFFER; /* no response buf yet */ 724 725 if ((ses == NULL) || (ses->server == NULL)) { 726 cifs_small_buf_release(buf); 727 cifs_dbg(VFS, "Null session\n"); 728 return -EIO; 729 } 730 731 if (ses->server->tcpStatus == CifsExiting) { 732 cifs_small_buf_release(buf); 733 return -ENOENT; 734 } 735 736 /* 737 * Ensure that we do not send more than 50 overlapping requests 738 * to the same server. We may make this configurable later or 739 * use ses->maxReq. 740 */ 741 742 rc = wait_for_free_request(ses->server, timeout, optype); 743 if (rc) { 744 cifs_small_buf_release(buf); 745 return rc; 746 } 747 748 /* 749 * Make sure that we sign in the same order that we send on this socket 750 * and avoid races inside tcp sendmsg code that could cause corruption 751 * of smb data. 752 */ 753 754 mutex_lock(&ses->server->srv_mutex); 755 756 midQ = ses->server->ops->setup_request(ses, &rqst); 757 if (IS_ERR(midQ)) { 758 mutex_unlock(&ses->server->srv_mutex); 759 cifs_small_buf_release(buf); 760 /* Update # of requests on wire to server */ 761 add_credits(ses->server, 1, optype); 762 return PTR_ERR(midQ); 763 } 764 765 midQ->mid_state = MID_REQUEST_SUBMITTED; 766 cifs_in_send_inc(ses->server); 767 rc = smb_sendv(ses->server, iov, n_vec); 768 cifs_in_send_dec(ses->server); 769 cifs_save_when_sent(midQ); 770 771 if (rc < 0) 772 ses->server->sequence_number -= 2; 773 mutex_unlock(&ses->server->srv_mutex); 774 775 if (rc < 0) { 776 cifs_small_buf_release(buf); 777 goto out; 778 } 779 780 if (timeout == CIFS_ASYNC_OP) { 781 cifs_small_buf_release(buf); 782 goto out; 783 } 784 785 rc = wait_for_response(ses->server, midQ); 786 if (rc != 0) { 787 send_cancel(ses->server, buf, midQ); 788 spin_lock(&GlobalMid_Lock); 789 if (midQ->mid_state == MID_REQUEST_SUBMITTED) { 790 midQ->callback = DeleteMidQEntry; 791 spin_unlock(&GlobalMid_Lock); 792 cifs_small_buf_release(buf); 793 add_credits(ses->server, 1, optype); 794 return rc; 795 } 796 spin_unlock(&GlobalMid_Lock); 797 } 798 799 cifs_small_buf_release(buf); 800 801 rc = cifs_sync_mid_result(midQ, ses->server); 802 if (rc != 0) { 803 add_credits(ses->server, 1, optype); 804 return rc; 805 } 806 807 if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) { 808 rc = -EIO; 809 cifs_dbg(FYI, "Bad MID state?\n"); 810 goto out; 811 } 812 813 buf = (char *)midQ->resp_buf; 814 iov[0].iov_base = buf; 815 iov[0].iov_len = get_rfc1002_length(buf) + 4; 816 if (midQ->large_buf) 817 *resp_buf_type = CIFS_LARGE_BUFFER; 818 else 819 *resp_buf_type = CIFS_SMALL_BUFFER; 820 821 credits = ses->server->ops->get_credits(midQ); 822 823 rc = ses->server->ops->check_receive(midQ, ses->server, 824 flags & CIFS_LOG_ERROR); 825 826 /* mark it so buf will not be freed by cifs_delete_mid */ 827 if ((flags & CIFS_NO_RESP) == 0) 828 midQ->resp_buf = NULL; 829out: 830 cifs_delete_mid(midQ); 831 add_credits(ses->server, credits, optype); 832 833 return rc; 834} 835 836int 837SendReceive(const unsigned int xid, struct cifs_ses *ses, 838 struct smb_hdr *in_buf, struct smb_hdr *out_buf, 839 int *pbytes_returned, const int timeout) 840{ 841 int rc = 0; 842 struct mid_q_entry *midQ; 843 844 if (ses == NULL) { 845 cifs_dbg(VFS, "Null smb session\n"); 846 return -EIO; 847 } 848 if (ses->server == NULL) { 849 cifs_dbg(VFS, "Null tcp session\n"); 850 return -EIO; 851 } 852 853 if (ses->server->tcpStatus == CifsExiting) 854 return -ENOENT; 855 856 /* Ensure that we do not send more than 50 overlapping requests 857 to the same server. We may make this configurable later or 858 use ses->maxReq */ 859 860 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize + 861 MAX_CIFS_HDR_SIZE - 4) { 862 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n", 863 be32_to_cpu(in_buf->smb_buf_length)); 864 return -EIO; 865 } 866 867 rc = wait_for_free_request(ses->server, timeout, 0); 868 if (rc) 869 return rc; 870 871 /* make sure that we sign in the same order that we send on this socket 872 and avoid races inside tcp sendmsg code that could cause corruption 873 of smb data */ 874 875 mutex_lock(&ses->server->srv_mutex); 876 877 rc = allocate_mid(ses, in_buf, &midQ); 878 if (rc) { 879 mutex_unlock(&ses->server->srv_mutex); 880 /* Update # of requests on wire to server */ 881 add_credits(ses->server, 1, 0); 882 return rc; 883 } 884 885 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number); 886 if (rc) { 887 mutex_unlock(&ses->server->srv_mutex); 888 goto out; 889 } 890 891 midQ->mid_state = MID_REQUEST_SUBMITTED; 892 893 cifs_in_send_inc(ses->server); 894 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length)); 895 cifs_in_send_dec(ses->server); 896 cifs_save_when_sent(midQ); 897 898 if (rc < 0) 899 ses->server->sequence_number -= 2; 900 901 mutex_unlock(&ses->server->srv_mutex); 902 903 if (rc < 0) 904 goto out; 905 906 if (timeout == CIFS_ASYNC_OP) 907 goto out; 908 909 rc = wait_for_response(ses->server, midQ); 910 if (rc != 0) { 911 send_cancel(ses->server, in_buf, midQ); 912 spin_lock(&GlobalMid_Lock); 913 if (midQ->mid_state == MID_REQUEST_SUBMITTED) { 914 /* no longer considered to be "in-flight" */ 915 midQ->callback = DeleteMidQEntry; 916 spin_unlock(&GlobalMid_Lock); 917 add_credits(ses->server, 1, 0); 918 return rc; 919 } 920 spin_unlock(&GlobalMid_Lock); 921 } 922 923 rc = cifs_sync_mid_result(midQ, ses->server); 924 if (rc != 0) { 925 add_credits(ses->server, 1, 0); 926 return rc; 927 } 928 929 if (!midQ->resp_buf || !out_buf || 930 midQ->mid_state != MID_RESPONSE_RECEIVED) { 931 rc = -EIO; 932 cifs_dbg(VFS, "Bad MID state?\n"); 933 goto out; 934 } 935 936 *pbytes_returned = get_rfc1002_length(midQ->resp_buf); 937 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4); 938 rc = cifs_check_receive(midQ, ses->server, 0); 939out: 940 cifs_delete_mid(midQ); 941 add_credits(ses->server, 1, 0); 942 943 return rc; 944} 945 946/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows 947 blocking lock to return. */ 948 949static int 950send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon, 951 struct smb_hdr *in_buf, 952 struct smb_hdr *out_buf) 953{ 954 int bytes_returned; 955 struct cifs_ses *ses = tcon->ses; 956 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf; 957 958 /* We just modify the current in_buf to change 959 the type of lock from LOCKING_ANDX_SHARED_LOCK 960 or LOCKING_ANDX_EXCLUSIVE_LOCK to 961 LOCKING_ANDX_CANCEL_LOCK. */ 962 963 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES; 964 pSMB->Timeout = 0; 965 pSMB->hdr.Mid = get_next_mid(ses->server); 966 967 return SendReceive(xid, ses, in_buf, out_buf, 968 &bytes_returned, 0); 969} 970 971int 972SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, 973 struct smb_hdr *in_buf, struct smb_hdr *out_buf, 974 int *pbytes_returned) 975{ 976 int rc = 0; 977 int rstart = 0; 978 struct mid_q_entry *midQ; 979 struct cifs_ses *ses; 980 981 if (tcon == NULL || tcon->ses == NULL) { 982 cifs_dbg(VFS, "Null smb session\n"); 983 return -EIO; 984 } 985 ses = tcon->ses; 986 987 if (ses->server == NULL) { 988 cifs_dbg(VFS, "Null tcp session\n"); 989 return -EIO; 990 } 991 992 if (ses->server->tcpStatus == CifsExiting) 993 return -ENOENT; 994 995 /* Ensure that we do not send more than 50 overlapping requests 996 to the same server. We may make this configurable later or 997 use ses->maxReq */ 998 999 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize + 1000 MAX_CIFS_HDR_SIZE - 4) { 1001 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n", 1002 be32_to_cpu(in_buf->smb_buf_length)); 1003 return -EIO; 1004 } 1005 1006 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0); 1007 if (rc) 1008 return rc; 1009 1010 /* make sure that we sign in the same order that we send on this socket 1011 and avoid races inside tcp sendmsg code that could cause corruption 1012 of smb data */ 1013 1014 mutex_lock(&ses->server->srv_mutex); 1015 1016 rc = allocate_mid(ses, in_buf, &midQ); 1017 if (rc) { 1018 mutex_unlock(&ses->server->srv_mutex); 1019 return rc; 1020 } 1021 1022 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number); 1023 if (rc) { 1024 cifs_delete_mid(midQ); 1025 mutex_unlock(&ses->server->srv_mutex); 1026 return rc; 1027 } 1028 1029 midQ->mid_state = MID_REQUEST_SUBMITTED; 1030 cifs_in_send_inc(ses->server); 1031 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length)); 1032 cifs_in_send_dec(ses->server); 1033 cifs_save_when_sent(midQ); 1034 1035 if (rc < 0) 1036 ses->server->sequence_number -= 2; 1037 1038 mutex_unlock(&ses->server->srv_mutex); 1039 1040 if (rc < 0) { 1041 cifs_delete_mid(midQ); 1042 return rc; 1043 } 1044 1045 /* Wait for a reply - allow signals to interrupt. */ 1046 rc = wait_event_interruptible(ses->server->response_q, 1047 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) || 1048 ((ses->server->tcpStatus != CifsGood) && 1049 (ses->server->tcpStatus != CifsNew))); 1050 1051 /* Were we interrupted by a signal ? */ 1052 if ((rc == -ERESTARTSYS) && 1053 (midQ->mid_state == MID_REQUEST_SUBMITTED) && 1054 ((ses->server->tcpStatus == CifsGood) || 1055 (ses->server->tcpStatus == CifsNew))) { 1056 1057 if (in_buf->Command == SMB_COM_TRANSACTION2) { 1058 /* POSIX lock. We send a NT_CANCEL SMB to cause the 1059 blocking lock to return. */ 1060 rc = send_cancel(ses->server, in_buf, midQ); 1061 if (rc) { 1062 cifs_delete_mid(midQ); 1063 return rc; 1064 } 1065 } else { 1066 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK 1067 to cause the blocking lock to return. */ 1068 1069 rc = send_lock_cancel(xid, tcon, in_buf, out_buf); 1070 1071 /* If we get -ENOLCK back the lock may have 1072 already been removed. Don't exit in this case. */ 1073 if (rc && rc != -ENOLCK) { 1074 cifs_delete_mid(midQ); 1075 return rc; 1076 } 1077 } 1078 1079 rc = wait_for_response(ses->server, midQ); 1080 if (rc) { 1081 send_cancel(ses->server, in_buf, midQ); 1082 spin_lock(&GlobalMid_Lock); 1083 if (midQ->mid_state == MID_REQUEST_SUBMITTED) { 1084 /* no longer considered to be "in-flight" */ 1085 midQ->callback = DeleteMidQEntry; 1086 spin_unlock(&GlobalMid_Lock); 1087 return rc; 1088 } 1089 spin_unlock(&GlobalMid_Lock); 1090 } 1091 1092 /* We got the response - restart system call. */ 1093 rstart = 1; 1094 } 1095 1096 rc = cifs_sync_mid_result(midQ, ses->server); 1097 if (rc != 0) 1098 return rc; 1099 1100 /* rcvd frame is ok */ 1101 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) { 1102 rc = -EIO; 1103 cifs_dbg(VFS, "Bad MID state?\n"); 1104 goto out; 1105 } 1106 1107 *pbytes_returned = get_rfc1002_length(midQ->resp_buf); 1108 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4); 1109 rc = cifs_check_receive(midQ, ses->server, 0); 1110out: 1111 cifs_delete_mid(midQ); 1112 if (rstart && rc == -EACCES) 1113 return -ERESTARTSYS; 1114 return rc; 1115} 1116