1/* 2 * Common NFS I/O operations for the pnfs file based 3 * layout drivers. 4 * 5 * Copyright (c) 2014, Primary Data, Inc. All rights reserved. 6 * 7 * Tom Haynes <loghyr@primarydata.com> 8 */ 9 10#include <linux/nfs_fs.h> 11#include <linux/nfs_page.h> 12#include <linux/sunrpc/addr.h> 13#include <linux/module.h> 14 15#include "nfs4session.h" 16#include "internal.h" 17#include "pnfs.h" 18 19#define NFSDBG_FACILITY NFSDBG_PNFS 20 21void pnfs_generic_rw_release(void *data) 22{ 23 struct nfs_pgio_header *hdr = data; 24 25 nfs_put_client(hdr->ds_clp); 26 hdr->mds_ops->rpc_release(data); 27} 28EXPORT_SYMBOL_GPL(pnfs_generic_rw_release); 29 30/* Fake up some data that will cause nfs_commit_release to retry the writes. */ 31void pnfs_generic_prepare_to_resend_writes(struct nfs_commit_data *data) 32{ 33 struct nfs_page *first = nfs_list_entry(data->pages.next); 34 35 data->task.tk_status = 0; 36 memcpy(&data->verf.verifier, &first->wb_verf, 37 sizeof(data->verf.verifier)); 38 data->verf.verifier.data[0]++; /* ensure verifier mismatch */ 39} 40EXPORT_SYMBOL_GPL(pnfs_generic_prepare_to_resend_writes); 41 42void pnfs_generic_write_commit_done(struct rpc_task *task, void *data) 43{ 44 struct nfs_commit_data *wdata = data; 45 46 /* Note this may cause RPC to be resent */ 47 wdata->mds_ops->rpc_call_done(task, data); 48} 49EXPORT_SYMBOL_GPL(pnfs_generic_write_commit_done); 50 51void pnfs_generic_commit_release(void *calldata) 52{ 53 struct nfs_commit_data *data = calldata; 54 55 data->completion_ops->completion(data); 56 pnfs_put_lseg(data->lseg); 57 nfs_put_client(data->ds_clp); 58 nfs_commitdata_release(data); 59} 60EXPORT_SYMBOL_GPL(pnfs_generic_commit_release); 61 62/* The generic layer is about to remove the req from the commit list. 63 * If this will make the bucket empty, it will need to put the lseg reference. 64 * Note this must be called holding the inode (/cinfo) lock 65 */ 66void 67pnfs_generic_clear_request_commit(struct nfs_page *req, 68 struct nfs_commit_info *cinfo) 69{ 70 struct pnfs_layout_segment *freeme = NULL; 71 72 if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags)) 73 goto out; 74 cinfo->ds->nwritten--; 75 if (list_is_singular(&req->wb_list)) { 76 struct pnfs_commit_bucket *bucket; 77 78 bucket = list_first_entry(&req->wb_list, 79 struct pnfs_commit_bucket, 80 written); 81 freeme = bucket->wlseg; 82 bucket->wlseg = NULL; 83 } 84out: 85 nfs_request_remove_commit_list(req, cinfo); 86 pnfs_put_lseg_locked(freeme); 87} 88EXPORT_SYMBOL_GPL(pnfs_generic_clear_request_commit); 89 90static int 91pnfs_generic_transfer_commit_list(struct list_head *src, struct list_head *dst, 92 struct nfs_commit_info *cinfo, int max) 93{ 94 struct nfs_page *req, *tmp; 95 int ret = 0; 96 97 list_for_each_entry_safe(req, tmp, src, wb_list) { 98 if (!nfs_lock_request(req)) 99 continue; 100 kref_get(&req->wb_kref); 101 if (cond_resched_lock(cinfo->lock)) 102 list_safe_reset_next(req, tmp, wb_list); 103 nfs_request_remove_commit_list(req, cinfo); 104 clear_bit(PG_COMMIT_TO_DS, &req->wb_flags); 105 nfs_list_add_request(req, dst); 106 ret++; 107 if ((ret == max) && !cinfo->dreq) 108 break; 109 } 110 return ret; 111} 112 113static int 114pnfs_generic_scan_ds_commit_list(struct pnfs_commit_bucket *bucket, 115 struct nfs_commit_info *cinfo, 116 int max) 117{ 118 struct list_head *src = &bucket->written; 119 struct list_head *dst = &bucket->committing; 120 int ret; 121 122 lockdep_assert_held(cinfo->lock); 123 ret = pnfs_generic_transfer_commit_list(src, dst, cinfo, max); 124 if (ret) { 125 cinfo->ds->nwritten -= ret; 126 cinfo->ds->ncommitting += ret; 127 bucket->clseg = bucket->wlseg; 128 if (list_empty(src)) 129 bucket->wlseg = NULL; 130 else 131 pnfs_get_lseg(bucket->clseg); 132 } 133 return ret; 134} 135 136/* Move reqs from written to committing lists, returning count 137 * of number moved. 138 */ 139int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo, 140 int max) 141{ 142 int i, rv = 0, cnt; 143 144 lockdep_assert_held(cinfo->lock); 145 for (i = 0; i < cinfo->ds->nbuckets && max != 0; i++) { 146 cnt = pnfs_generic_scan_ds_commit_list(&cinfo->ds->buckets[i], 147 cinfo, max); 148 max -= cnt; 149 rv += cnt; 150 } 151 return rv; 152} 153EXPORT_SYMBOL_GPL(pnfs_generic_scan_commit_lists); 154 155/* Pull everything off the committing lists and dump into @dst. */ 156void pnfs_generic_recover_commit_reqs(struct list_head *dst, 157 struct nfs_commit_info *cinfo) 158{ 159 struct pnfs_commit_bucket *b; 160 struct pnfs_layout_segment *freeme; 161 int i; 162 163 lockdep_assert_held(cinfo->lock); 164restart: 165 for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) { 166 if (pnfs_generic_transfer_commit_list(&b->written, dst, 167 cinfo, 0)) { 168 freeme = b->wlseg; 169 b->wlseg = NULL; 170 spin_unlock(cinfo->lock); 171 pnfs_put_lseg(freeme); 172 spin_lock(cinfo->lock); 173 goto restart; 174 } 175 } 176 cinfo->ds->nwritten = 0; 177} 178EXPORT_SYMBOL_GPL(pnfs_generic_recover_commit_reqs); 179 180static void pnfs_generic_retry_commit(struct nfs_commit_info *cinfo, int idx) 181{ 182 struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds; 183 struct pnfs_commit_bucket *bucket; 184 struct pnfs_layout_segment *freeme; 185 int i; 186 187 for (i = idx; i < fl_cinfo->nbuckets; i++) { 188 bucket = &fl_cinfo->buckets[i]; 189 if (list_empty(&bucket->committing)) 190 continue; 191 nfs_retry_commit(&bucket->committing, bucket->clseg, cinfo, i); 192 spin_lock(cinfo->lock); 193 freeme = bucket->clseg; 194 bucket->clseg = NULL; 195 spin_unlock(cinfo->lock); 196 pnfs_put_lseg(freeme); 197 } 198} 199 200static unsigned int 201pnfs_generic_alloc_ds_commits(struct nfs_commit_info *cinfo, 202 struct list_head *list) 203{ 204 struct pnfs_ds_commit_info *fl_cinfo; 205 struct pnfs_commit_bucket *bucket; 206 struct nfs_commit_data *data; 207 int i; 208 unsigned int nreq = 0; 209 210 fl_cinfo = cinfo->ds; 211 bucket = fl_cinfo->buckets; 212 for (i = 0; i < fl_cinfo->nbuckets; i++, bucket++) { 213 if (list_empty(&bucket->committing)) 214 continue; 215 data = nfs_commitdata_alloc(); 216 if (!data) 217 break; 218 data->ds_commit_index = i; 219 spin_lock(cinfo->lock); 220 data->lseg = bucket->clseg; 221 bucket->clseg = NULL; 222 spin_unlock(cinfo->lock); 223 list_add(&data->pages, list); 224 nreq++; 225 } 226 227 /* Clean up on error */ 228 pnfs_generic_retry_commit(cinfo, i); 229 return nreq; 230} 231 232/* This follows nfs_commit_list pretty closely */ 233int 234pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages, 235 int how, struct nfs_commit_info *cinfo, 236 int (*initiate_commit)(struct nfs_commit_data *data, 237 int how)) 238{ 239 struct nfs_commit_data *data, *tmp; 240 LIST_HEAD(list); 241 unsigned int nreq = 0; 242 243 if (!list_empty(mds_pages)) { 244 data = nfs_commitdata_alloc(); 245 if (data != NULL) { 246 data->lseg = NULL; 247 list_add(&data->pages, &list); 248 nreq++; 249 } else { 250 nfs_retry_commit(mds_pages, NULL, cinfo, 0); 251 pnfs_generic_retry_commit(cinfo, 0); 252 cinfo->completion_ops->error_cleanup(NFS_I(inode)); 253 return -ENOMEM; 254 } 255 } 256 257 nreq += pnfs_generic_alloc_ds_commits(cinfo, &list); 258 259 if (nreq == 0) { 260 cinfo->completion_ops->error_cleanup(NFS_I(inode)); 261 goto out; 262 } 263 264 atomic_add(nreq, &cinfo->mds->rpcs_out); 265 266 list_for_each_entry_safe(data, tmp, &list, pages) { 267 list_del_init(&data->pages); 268 if (!data->lseg) { 269 nfs_init_commit(data, mds_pages, NULL, cinfo); 270 nfs_initiate_commit(NFS_CLIENT(inode), data, 271 NFS_PROTO(data->inode), 272 data->mds_ops, how, 0); 273 } else { 274 struct pnfs_commit_bucket *buckets; 275 276 buckets = cinfo->ds->buckets; 277 nfs_init_commit(data, 278 &buckets[data->ds_commit_index].committing, 279 data->lseg, 280 cinfo); 281 initiate_commit(data, how); 282 } 283 } 284out: 285 cinfo->ds->ncommitting = 0; 286 return PNFS_ATTEMPTED; 287} 288EXPORT_SYMBOL_GPL(pnfs_generic_commit_pagelist); 289 290/* 291 * Data server cache 292 * 293 * Data servers can be mapped to different device ids. 294 * nfs4_pnfs_ds reference counting 295 * - set to 1 on allocation 296 * - incremented when a device id maps a data server already in the cache. 297 * - decremented when deviceid is removed from the cache. 298 */ 299static DEFINE_SPINLOCK(nfs4_ds_cache_lock); 300static LIST_HEAD(nfs4_data_server_cache); 301 302/* Debug routines */ 303static void 304print_ds(struct nfs4_pnfs_ds *ds) 305{ 306 if (ds == NULL) { 307 printk(KERN_WARNING "%s NULL device\n", __func__); 308 return; 309 } 310 printk(KERN_WARNING " ds %s\n" 311 " ref count %d\n" 312 " client %p\n" 313 " cl_exchange_flags %x\n", 314 ds->ds_remotestr, 315 atomic_read(&ds->ds_count), ds->ds_clp, 316 ds->ds_clp ? ds->ds_clp->cl_exchange_flags : 0); 317} 318 319static bool 320same_sockaddr(struct sockaddr *addr1, struct sockaddr *addr2) 321{ 322 struct sockaddr_in *a, *b; 323 struct sockaddr_in6 *a6, *b6; 324 325 if (addr1->sa_family != addr2->sa_family) 326 return false; 327 328 switch (addr1->sa_family) { 329 case AF_INET: 330 a = (struct sockaddr_in *)addr1; 331 b = (struct sockaddr_in *)addr2; 332 333 if (a->sin_addr.s_addr == b->sin_addr.s_addr && 334 a->sin_port == b->sin_port) 335 return true; 336 break; 337 338 case AF_INET6: 339 a6 = (struct sockaddr_in6 *)addr1; 340 b6 = (struct sockaddr_in6 *)addr2; 341 342 /* LINKLOCAL addresses must have matching scope_id */ 343 if (ipv6_addr_src_scope(&a6->sin6_addr) == 344 IPV6_ADDR_SCOPE_LINKLOCAL && 345 a6->sin6_scope_id != b6->sin6_scope_id) 346 return false; 347 348 if (ipv6_addr_equal(&a6->sin6_addr, &b6->sin6_addr) && 349 a6->sin6_port == b6->sin6_port) 350 return true; 351 break; 352 353 default: 354 dprintk("%s: unhandled address family: %u\n", 355 __func__, addr1->sa_family); 356 return false; 357 } 358 359 return false; 360} 361 362/* 363 * Checks if 'dsaddrs1' contains a subset of 'dsaddrs2'. If it does, 364 * declare a match. 365 */ 366static bool 367_same_data_server_addrs_locked(const struct list_head *dsaddrs1, 368 const struct list_head *dsaddrs2) 369{ 370 struct nfs4_pnfs_ds_addr *da1, *da2; 371 struct sockaddr *sa1, *sa2; 372 bool match = false; 373 374 list_for_each_entry(da1, dsaddrs1, da_node) { 375 sa1 = (struct sockaddr *)&da1->da_addr; 376 match = false; 377 list_for_each_entry(da2, dsaddrs2, da_node) { 378 sa2 = (struct sockaddr *)&da2->da_addr; 379 match = same_sockaddr(sa1, sa2); 380 if (match) 381 break; 382 } 383 if (!match) 384 break; 385 } 386 return match; 387} 388 389/* 390 * Lookup DS by addresses. nfs4_ds_cache_lock is held 391 */ 392static struct nfs4_pnfs_ds * 393_data_server_lookup_locked(const struct list_head *dsaddrs) 394{ 395 struct nfs4_pnfs_ds *ds; 396 397 list_for_each_entry(ds, &nfs4_data_server_cache, ds_node) 398 if (_same_data_server_addrs_locked(&ds->ds_addrs, dsaddrs)) 399 return ds; 400 return NULL; 401} 402 403static void destroy_ds(struct nfs4_pnfs_ds *ds) 404{ 405 struct nfs4_pnfs_ds_addr *da; 406 407 dprintk("--> %s\n", __func__); 408 ifdebug(FACILITY) 409 print_ds(ds); 410 411 nfs_put_client(ds->ds_clp); 412 413 while (!list_empty(&ds->ds_addrs)) { 414 da = list_first_entry(&ds->ds_addrs, 415 struct nfs4_pnfs_ds_addr, 416 da_node); 417 list_del_init(&da->da_node); 418 kfree(da->da_remotestr); 419 kfree(da); 420 } 421 422 kfree(ds->ds_remotestr); 423 kfree(ds); 424} 425 426void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds) 427{ 428 if (atomic_dec_and_lock(&ds->ds_count, 429 &nfs4_ds_cache_lock)) { 430 list_del_init(&ds->ds_node); 431 spin_unlock(&nfs4_ds_cache_lock); 432 destroy_ds(ds); 433 } 434} 435EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_put); 436 437/* 438 * Create a string with a human readable address and port to avoid 439 * complicated setup around many dprinks. 440 */ 441static char * 442nfs4_pnfs_remotestr(struct list_head *dsaddrs, gfp_t gfp_flags) 443{ 444 struct nfs4_pnfs_ds_addr *da; 445 char *remotestr; 446 size_t len; 447 char *p; 448 449 len = 3; /* '{', '}' and eol */ 450 list_for_each_entry(da, dsaddrs, da_node) { 451 len += strlen(da->da_remotestr) + 1; /* string plus comma */ 452 } 453 454 remotestr = kzalloc(len, gfp_flags); 455 if (!remotestr) 456 return NULL; 457 458 p = remotestr; 459 *(p++) = '{'; 460 len--; 461 list_for_each_entry(da, dsaddrs, da_node) { 462 size_t ll = strlen(da->da_remotestr); 463 464 if (ll > len) 465 goto out_err; 466 467 memcpy(p, da->da_remotestr, ll); 468 p += ll; 469 len -= ll; 470 471 if (len < 1) 472 goto out_err; 473 (*p++) = ','; 474 len--; 475 } 476 if (len < 2) 477 goto out_err; 478 *(p++) = '}'; 479 *p = '\0'; 480 return remotestr; 481out_err: 482 kfree(remotestr); 483 return NULL; 484} 485 486/* 487 * Given a list of multipath struct nfs4_pnfs_ds_addr, add it to ds cache if 488 * uncached and return cached struct nfs4_pnfs_ds. 489 */ 490struct nfs4_pnfs_ds * 491nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags) 492{ 493 struct nfs4_pnfs_ds *tmp_ds, *ds = NULL; 494 char *remotestr; 495 496 if (list_empty(dsaddrs)) { 497 dprintk("%s: no addresses defined\n", __func__); 498 goto out; 499 } 500 501 ds = kzalloc(sizeof(*ds), gfp_flags); 502 if (!ds) 503 goto out; 504 505 /* this is only used for debugging, so it's ok if its NULL */ 506 remotestr = nfs4_pnfs_remotestr(dsaddrs, gfp_flags); 507 508 spin_lock(&nfs4_ds_cache_lock); 509 tmp_ds = _data_server_lookup_locked(dsaddrs); 510 if (tmp_ds == NULL) { 511 INIT_LIST_HEAD(&ds->ds_addrs); 512 list_splice_init(dsaddrs, &ds->ds_addrs); 513 ds->ds_remotestr = remotestr; 514 atomic_set(&ds->ds_count, 1); 515 INIT_LIST_HEAD(&ds->ds_node); 516 ds->ds_clp = NULL; 517 list_add(&ds->ds_node, &nfs4_data_server_cache); 518 dprintk("%s add new data server %s\n", __func__, 519 ds->ds_remotestr); 520 } else { 521 kfree(remotestr); 522 kfree(ds); 523 atomic_inc(&tmp_ds->ds_count); 524 dprintk("%s data server %s found, inc'ed ds_count to %d\n", 525 __func__, tmp_ds->ds_remotestr, 526 atomic_read(&tmp_ds->ds_count)); 527 ds = tmp_ds; 528 } 529 spin_unlock(&nfs4_ds_cache_lock); 530out: 531 return ds; 532} 533EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_add); 534 535static void nfs4_wait_ds_connect(struct nfs4_pnfs_ds *ds) 536{ 537 might_sleep(); 538 wait_on_bit(&ds->ds_state, NFS4DS_CONNECTING, 539 TASK_KILLABLE); 540} 541 542static void nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds *ds) 543{ 544 smp_mb__before_atomic(); 545 clear_bit(NFS4DS_CONNECTING, &ds->ds_state); 546 smp_mb__after_atomic(); 547 wake_up_bit(&ds->ds_state, NFS4DS_CONNECTING); 548} 549 550static struct nfs_client *(*get_v3_ds_connect)( 551 struct nfs_client *mds_clp, 552 const struct sockaddr *ds_addr, 553 int ds_addrlen, 554 int ds_proto, 555 unsigned int ds_timeo, 556 unsigned int ds_retrans, 557 rpc_authflavor_t au_flavor); 558 559static bool load_v3_ds_connect(void) 560{ 561 if (!get_v3_ds_connect) { 562 get_v3_ds_connect = symbol_request(nfs3_set_ds_client); 563 WARN_ON_ONCE(!get_v3_ds_connect); 564 } 565 566 return(get_v3_ds_connect != NULL); 567} 568 569void nfs4_pnfs_v3_ds_connect_unload(void) 570{ 571 if (get_v3_ds_connect) { 572 symbol_put(nfs3_set_ds_client); 573 get_v3_ds_connect = NULL; 574 } 575} 576EXPORT_SYMBOL_GPL(nfs4_pnfs_v3_ds_connect_unload); 577 578static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv, 579 struct nfs4_pnfs_ds *ds, 580 unsigned int timeo, 581 unsigned int retrans, 582 rpc_authflavor_t au_flavor) 583{ 584 struct nfs_client *clp = ERR_PTR(-EIO); 585 struct nfs4_pnfs_ds_addr *da; 586 int status = 0; 587 588 dprintk("--> %s DS %s au_flavor %d\n", __func__, 589 ds->ds_remotestr, au_flavor); 590 591 if (!load_v3_ds_connect()) 592 goto out; 593 594 list_for_each_entry(da, &ds->ds_addrs, da_node) { 595 dprintk("%s: DS %s: trying address %s\n", 596 __func__, ds->ds_remotestr, da->da_remotestr); 597 598 clp = get_v3_ds_connect(mds_srv->nfs_client, 599 (struct sockaddr *)&da->da_addr, 600 da->da_addrlen, IPPROTO_TCP, 601 timeo, retrans, au_flavor); 602 if (!IS_ERR(clp)) 603 break; 604 } 605 606 if (IS_ERR(clp)) { 607 status = PTR_ERR(clp); 608 goto out; 609 } 610 611 smp_wmb(); 612 ds->ds_clp = clp; 613 dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr); 614out: 615 return status; 616} 617 618static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv, 619 struct nfs4_pnfs_ds *ds, 620 unsigned int timeo, 621 unsigned int retrans, 622 u32 minor_version, 623 rpc_authflavor_t au_flavor) 624{ 625 struct nfs_client *clp = ERR_PTR(-EIO); 626 struct nfs4_pnfs_ds_addr *da; 627 int status = 0; 628 629 dprintk("--> %s DS %s au_flavor %d\n", __func__, ds->ds_remotestr, 630 au_flavor); 631 632 list_for_each_entry(da, &ds->ds_addrs, da_node) { 633 dprintk("%s: DS %s: trying address %s\n", 634 __func__, ds->ds_remotestr, da->da_remotestr); 635 636 clp = nfs4_set_ds_client(mds_srv->nfs_client, 637 (struct sockaddr *)&da->da_addr, 638 da->da_addrlen, IPPROTO_TCP, 639 timeo, retrans, minor_version, 640 au_flavor); 641 if (!IS_ERR(clp)) 642 break; 643 } 644 645 if (IS_ERR(clp)) { 646 status = PTR_ERR(clp); 647 goto out; 648 } 649 650 status = nfs4_init_ds_session(clp, mds_srv->nfs_client->cl_lease_time); 651 if (status) 652 goto out_put; 653 654 smp_wmb(); 655 ds->ds_clp = clp; 656 dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr); 657out: 658 return status; 659out_put: 660 nfs_put_client(clp); 661 goto out; 662} 663 664/* 665 * Create an rpc connection to the nfs4_pnfs_ds data server. 666 * Currently only supports IPv4 and IPv6 addresses. 667 * If connection fails, make devid unavailable. 668 */ 669void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, 670 struct nfs4_deviceid_node *devid, unsigned int timeo, 671 unsigned int retrans, u32 version, 672 u32 minor_version, rpc_authflavor_t au_flavor) 673{ 674 if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) { 675 int err = 0; 676 677 if (version == 3) { 678 err = _nfs4_pnfs_v3_ds_connect(mds_srv, ds, timeo, 679 retrans, au_flavor); 680 } else if (version == 4) { 681 err = _nfs4_pnfs_v4_ds_connect(mds_srv, ds, timeo, 682 retrans, minor_version, 683 au_flavor); 684 } else { 685 dprintk("%s: unsupported DS version %d\n", __func__, 686 version); 687 err = -EPROTONOSUPPORT; 688 } 689 690 if (err) 691 nfs4_mark_deviceid_unavailable(devid); 692 nfs4_clear_ds_conn_bit(ds); 693 } else { 694 nfs4_wait_ds_connect(ds); 695 } 696} 697EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_connect); 698 699/* 700 * Currently only supports ipv4, ipv6 and one multi-path address. 701 */ 702struct nfs4_pnfs_ds_addr * 703nfs4_decode_mp_ds_addr(struct net *net, struct xdr_stream *xdr, gfp_t gfp_flags) 704{ 705 struct nfs4_pnfs_ds_addr *da = NULL; 706 char *buf, *portstr; 707 __be16 port; 708 int nlen, rlen; 709 int tmp[2]; 710 __be32 *p; 711 char *netid, *match_netid; 712 size_t len, match_netid_len; 713 char *startsep = ""; 714 char *endsep = ""; 715 716 717 /* r_netid */ 718 p = xdr_inline_decode(xdr, 4); 719 if (unlikely(!p)) 720 goto out_err; 721 nlen = be32_to_cpup(p++); 722 723 p = xdr_inline_decode(xdr, nlen); 724 if (unlikely(!p)) 725 goto out_err; 726 727 netid = kmalloc(nlen+1, gfp_flags); 728 if (unlikely(!netid)) 729 goto out_err; 730 731 netid[nlen] = '\0'; 732 memcpy(netid, p, nlen); 733 734 /* r_addr: ip/ip6addr with port in dec octets - see RFC 5665 */ 735 p = xdr_inline_decode(xdr, 4); 736 if (unlikely(!p)) 737 goto out_free_netid; 738 rlen = be32_to_cpup(p); 739 740 p = xdr_inline_decode(xdr, rlen); 741 if (unlikely(!p)) 742 goto out_free_netid; 743 744 /* port is ".ABC.DEF", 8 chars max */ 745 if (rlen > INET6_ADDRSTRLEN + IPV6_SCOPE_ID_LEN + 8) { 746 dprintk("%s: Invalid address, length %d\n", __func__, 747 rlen); 748 goto out_free_netid; 749 } 750 buf = kmalloc(rlen + 1, gfp_flags); 751 if (!buf) { 752 dprintk("%s: Not enough memory\n", __func__); 753 goto out_free_netid; 754 } 755 buf[rlen] = '\0'; 756 memcpy(buf, p, rlen); 757 758 /* replace port '.' with '-' */ 759 portstr = strrchr(buf, '.'); 760 if (!portstr) { 761 dprintk("%s: Failed finding expected dot in port\n", 762 __func__); 763 goto out_free_buf; 764 } 765 *portstr = '-'; 766 767 /* find '.' between address and port */ 768 portstr = strrchr(buf, '.'); 769 if (!portstr) { 770 dprintk("%s: Failed finding expected dot between address and " 771 "port\n", __func__); 772 goto out_free_buf; 773 } 774 *portstr = '\0'; 775 776 da = kzalloc(sizeof(*da), gfp_flags); 777 if (unlikely(!da)) 778 goto out_free_buf; 779 780 INIT_LIST_HEAD(&da->da_node); 781 782 if (!rpc_pton(net, buf, portstr-buf, (struct sockaddr *)&da->da_addr, 783 sizeof(da->da_addr))) { 784 dprintk("%s: error parsing address %s\n", __func__, buf); 785 goto out_free_da; 786 } 787 788 portstr++; 789 sscanf(portstr, "%d-%d", &tmp[0], &tmp[1]); 790 port = htons((tmp[0] << 8) | (tmp[1])); 791 792 switch (da->da_addr.ss_family) { 793 case AF_INET: 794 ((struct sockaddr_in *)&da->da_addr)->sin_port = port; 795 da->da_addrlen = sizeof(struct sockaddr_in); 796 match_netid = "tcp"; 797 match_netid_len = 3; 798 break; 799 800 case AF_INET6: 801 ((struct sockaddr_in6 *)&da->da_addr)->sin6_port = port; 802 da->da_addrlen = sizeof(struct sockaddr_in6); 803 match_netid = "tcp6"; 804 match_netid_len = 4; 805 startsep = "["; 806 endsep = "]"; 807 break; 808 809 default: 810 dprintk("%s: unsupported address family: %u\n", 811 __func__, da->da_addr.ss_family); 812 goto out_free_da; 813 } 814 815 if (nlen != match_netid_len || strncmp(netid, match_netid, nlen)) { 816 dprintk("%s: ERROR: r_netid \"%s\" != \"%s\"\n", 817 __func__, netid, match_netid); 818 goto out_free_da; 819 } 820 821 /* save human readable address */ 822 len = strlen(startsep) + strlen(buf) + strlen(endsep) + 7; 823 da->da_remotestr = kzalloc(len, gfp_flags); 824 825 /* NULL is ok, only used for dprintk */ 826 if (da->da_remotestr) 827 snprintf(da->da_remotestr, len, "%s%s%s:%u", startsep, 828 buf, endsep, ntohs(port)); 829 830 dprintk("%s: Parsed DS addr %s\n", __func__, da->da_remotestr); 831 kfree(buf); 832 kfree(netid); 833 return da; 834 835out_free_da: 836 kfree(da); 837out_free_buf: 838 dprintk("%s: Error parsing DS addr: %s\n", __func__, buf); 839 kfree(buf); 840out_free_netid: 841 kfree(netid); 842out_err: 843 return NULL; 844} 845EXPORT_SYMBOL_GPL(nfs4_decode_mp_ds_addr); 846 847void 848pnfs_layout_mark_request_commit(struct nfs_page *req, 849 struct pnfs_layout_segment *lseg, 850 struct nfs_commit_info *cinfo, 851 u32 ds_commit_idx) 852{ 853 struct list_head *list; 854 struct pnfs_commit_bucket *buckets; 855 856 spin_lock(cinfo->lock); 857 buckets = cinfo->ds->buckets; 858 list = &buckets[ds_commit_idx].written; 859 if (list_empty(list)) { 860 /* Non-empty buckets hold a reference on the lseg. That ref 861 * is normally transferred to the COMMIT call and released 862 * there. It could also be released if the last req is pulled 863 * off due to a rewrite, in which case it will be done in 864 * pnfs_common_clear_request_commit 865 */ 866 WARN_ON_ONCE(buckets[ds_commit_idx].wlseg != NULL); 867 buckets[ds_commit_idx].wlseg = pnfs_get_lseg(lseg); 868 } 869 set_bit(PG_COMMIT_TO_DS, &req->wb_flags); 870 cinfo->ds->nwritten++; 871 spin_unlock(cinfo->lock); 872 873 nfs_request_add_commit_list(req, list, cinfo); 874} 875EXPORT_SYMBOL_GPL(pnfs_layout_mark_request_commit); 876 877int 878pnfs_nfs_generic_sync(struct inode *inode, bool datasync) 879{ 880 if (datasync) 881 return 0; 882 return pnfs_layoutcommit_inode(inode, true); 883} 884EXPORT_SYMBOL_GPL(pnfs_nfs_generic_sync); 885 886