1/* 2 * SCSI Block Commands (SBC) parsing and emulation. 3 * 4 * (c) Copyright 2002-2013 Datera, Inc. 5 * 6 * Nicholas A. Bellinger <nab@kernel.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 */ 22 23#include <linux/kernel.h> 24#include <linux/module.h> 25#include <linux/ratelimit.h> 26#include <linux/crc-t10dif.h> 27#include <asm/unaligned.h> 28#include <scsi/scsi.h> 29#include <scsi/scsi_tcq.h> 30 31#include <target/target_core_base.h> 32#include <target/target_core_backend.h> 33#include <target/target_core_fabric.h> 34 35#include "target_core_internal.h" 36#include "target_core_ua.h" 37#include "target_core_alua.h" 38 39static sense_reason_t 40sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool); 41 42static sense_reason_t 43sbc_emulate_readcapacity(struct se_cmd *cmd) 44{ 45 struct se_device *dev = cmd->se_dev; 46 unsigned char *cdb = cmd->t_task_cdb; 47 unsigned long long blocks_long = dev->transport->get_blocks(dev); 48 unsigned char *rbuf; 49 unsigned char buf[8]; 50 u32 blocks; 51 52 /* 53 * SBC-2 says: 54 * If the PMI bit is set to zero and the LOGICAL BLOCK 55 * ADDRESS field is not set to zero, the device server shall 56 * terminate the command with CHECK CONDITION status with 57 * the sense key set to ILLEGAL REQUEST and the additional 58 * sense code set to INVALID FIELD IN CDB. 59 * 60 * In SBC-3, these fields are obsolete, but some SCSI 61 * compliance tests actually check this, so we might as well 62 * follow SBC-2. 63 */ 64 if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5])) 65 return TCM_INVALID_CDB_FIELD; 66 67 if (blocks_long >= 0x00000000ffffffff) 68 blocks = 0xffffffff; 69 else 70 blocks = (u32)blocks_long; 71 72 buf[0] = (blocks >> 24) & 0xff; 73 buf[1] = (blocks >> 16) & 0xff; 74 buf[2] = (blocks >> 8) & 0xff; 75 buf[3] = blocks & 0xff; 76 buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff; 77 buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff; 78 buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff; 79 buf[7] = dev->dev_attrib.block_size & 0xff; 80 81 rbuf = transport_kmap_data_sg(cmd); 82 if (rbuf) { 83 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 84 transport_kunmap_data_sg(cmd); 85 } 86 87 target_complete_cmd_with_length(cmd, GOOD, 8); 88 return 0; 89} 90 91static sense_reason_t 92sbc_emulate_readcapacity_16(struct se_cmd *cmd) 93{ 94 struct se_device *dev = cmd->se_dev; 95 struct se_session *sess = cmd->se_sess; 96 int pi_prot_type = dev->dev_attrib.pi_prot_type; 97 98 unsigned char *rbuf; 99 unsigned char buf[32]; 100 unsigned long long blocks = dev->transport->get_blocks(dev); 101 102 memset(buf, 0, sizeof(buf)); 103 buf[0] = (blocks >> 56) & 0xff; 104 buf[1] = (blocks >> 48) & 0xff; 105 buf[2] = (blocks >> 40) & 0xff; 106 buf[3] = (blocks >> 32) & 0xff; 107 buf[4] = (blocks >> 24) & 0xff; 108 buf[5] = (blocks >> 16) & 0xff; 109 buf[6] = (blocks >> 8) & 0xff; 110 buf[7] = blocks & 0xff; 111 buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff; 112 buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff; 113 buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff; 114 buf[11] = dev->dev_attrib.block_size & 0xff; 115 /* 116 * Set P_TYPE and PROT_EN bits for DIF support 117 */ 118 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 119 /* 120 * Only override a device's pi_prot_type if no T10-PI is 121 * available, and sess_prot_type has been explicitly enabled. 122 */ 123 if (!pi_prot_type) 124 pi_prot_type = sess->sess_prot_type; 125 126 if (pi_prot_type) 127 buf[12] = (pi_prot_type - 1) << 1 | 0x1; 128 } 129 130 if (dev->transport->get_lbppbe) 131 buf[13] = dev->transport->get_lbppbe(dev) & 0x0f; 132 133 if (dev->transport->get_alignment_offset_lbas) { 134 u16 lalba = dev->transport->get_alignment_offset_lbas(dev); 135 buf[14] = (lalba >> 8) & 0x3f; 136 buf[15] = lalba & 0xff; 137 } 138 139 /* 140 * Set Thin Provisioning Enable bit following sbc3r22 in section 141 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. 142 */ 143 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) 144 buf[14] |= 0x80; 145 146 rbuf = transport_kmap_data_sg(cmd); 147 if (rbuf) { 148 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 149 transport_kunmap_data_sg(cmd); 150 } 151 152 target_complete_cmd_with_length(cmd, GOOD, 32); 153 return 0; 154} 155 156sector_t sbc_get_write_same_sectors(struct se_cmd *cmd) 157{ 158 u32 num_blocks; 159 160 if (cmd->t_task_cdb[0] == WRITE_SAME) 161 num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]); 162 else if (cmd->t_task_cdb[0] == WRITE_SAME_16) 163 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); 164 else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */ 165 num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); 166 167 /* 168 * Use the explicit range when non zero is supplied, otherwise calculate 169 * the remaining range based on ->get_blocks() - starting LBA. 170 */ 171 if (num_blocks) 172 return num_blocks; 173 174 return cmd->se_dev->transport->get_blocks(cmd->se_dev) - 175 cmd->t_task_lba + 1; 176} 177EXPORT_SYMBOL(sbc_get_write_same_sectors); 178 179static sense_reason_t 180sbc_emulate_noop(struct se_cmd *cmd) 181{ 182 target_complete_cmd(cmd, GOOD); 183 return 0; 184} 185 186static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) 187{ 188 return cmd->se_dev->dev_attrib.block_size * sectors; 189} 190 191static inline u32 transport_get_sectors_6(unsigned char *cdb) 192{ 193 /* 194 * Use 8-bit sector value. SBC-3 says: 195 * 196 * A TRANSFER LENGTH field set to zero specifies that 256 197 * logical blocks shall be written. Any other value 198 * specifies the number of logical blocks that shall be 199 * written. 200 */ 201 return cdb[4] ? : 256; 202} 203 204static inline u32 transport_get_sectors_10(unsigned char *cdb) 205{ 206 return (u32)(cdb[7] << 8) + cdb[8]; 207} 208 209static inline u32 transport_get_sectors_12(unsigned char *cdb) 210{ 211 return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; 212} 213 214static inline u32 transport_get_sectors_16(unsigned char *cdb) 215{ 216 return (u32)(cdb[10] << 24) + (cdb[11] << 16) + 217 (cdb[12] << 8) + cdb[13]; 218} 219 220/* 221 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants 222 */ 223static inline u32 transport_get_sectors_32(unsigned char *cdb) 224{ 225 return (u32)(cdb[28] << 24) + (cdb[29] << 16) + 226 (cdb[30] << 8) + cdb[31]; 227 228} 229 230static inline u32 transport_lba_21(unsigned char *cdb) 231{ 232 return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; 233} 234 235static inline u32 transport_lba_32(unsigned char *cdb) 236{ 237 return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 238} 239 240static inline unsigned long long transport_lba_64(unsigned char *cdb) 241{ 242 unsigned int __v1, __v2; 243 244 __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; 245 __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 246 247 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 248} 249 250/* 251 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs 252 */ 253static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) 254{ 255 unsigned int __v1, __v2; 256 257 __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; 258 __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; 259 260 return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; 261} 262 263static sense_reason_t 264sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops) 265{ 266 struct se_device *dev = cmd->se_dev; 267 sector_t end_lba = dev->transport->get_blocks(dev) + 1; 268 unsigned int sectors = sbc_get_write_same_sectors(cmd); 269 sense_reason_t ret; 270 271 if ((flags[0] & 0x04) || (flags[0] & 0x02)) { 272 pr_err("WRITE_SAME PBDATA and LBDATA" 273 " bits not supported for Block Discard" 274 " Emulation\n"); 275 return TCM_UNSUPPORTED_SCSI_OPCODE; 276 } 277 if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) { 278 pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n", 279 sectors, cmd->se_dev->dev_attrib.max_write_same_len); 280 return TCM_INVALID_CDB_FIELD; 281 } 282 /* 283 * Sanity check for LBA wrap and request past end of device. 284 */ 285 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || 286 ((cmd->t_task_lba + sectors) > end_lba)) { 287 pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n", 288 (unsigned long long)end_lba, cmd->t_task_lba, sectors); 289 return TCM_ADDRESS_OUT_OF_RANGE; 290 } 291 292 /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */ 293 if (flags[0] & 0x10) { 294 pr_warn("WRITE SAME with ANCHOR not supported\n"); 295 return TCM_INVALID_CDB_FIELD; 296 } 297 /* 298 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting 299 * translated into block discard requests within backend code. 300 */ 301 if (flags[0] & 0x08) { 302 if (!ops->execute_write_same_unmap) 303 return TCM_UNSUPPORTED_SCSI_OPCODE; 304 305 if (!dev->dev_attrib.emulate_tpws) { 306 pr_err("Got WRITE_SAME w/ UNMAP=1, but backend device" 307 " has emulate_tpws disabled\n"); 308 return TCM_UNSUPPORTED_SCSI_OPCODE; 309 } 310 cmd->execute_cmd = ops->execute_write_same_unmap; 311 return 0; 312 } 313 if (!ops->execute_write_same) 314 return TCM_UNSUPPORTED_SCSI_OPCODE; 315 316 ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true); 317 if (ret) 318 return ret; 319 320 cmd->execute_cmd = ops->execute_write_same; 321 return 0; 322} 323 324static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success, 325 int *post_ret) 326{ 327 unsigned char *buf, *addr; 328 struct scatterlist *sg; 329 unsigned int offset; 330 sense_reason_t ret = TCM_NO_SENSE; 331 int i, count; 332 /* 333 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command 334 * 335 * 1) read the specified logical block(s); 336 * 2) transfer logical blocks from the data-out buffer; 337 * 3) XOR the logical blocks transferred from the data-out buffer with 338 * the logical blocks read, storing the resulting XOR data in a buffer; 339 * 4) if the DISABLE WRITE bit is set to zero, then write the logical 340 * blocks transferred from the data-out buffer; and 341 * 5) transfer the resulting XOR data to the data-in buffer. 342 */ 343 buf = kmalloc(cmd->data_length, GFP_KERNEL); 344 if (!buf) { 345 pr_err("Unable to allocate xor_callback buf\n"); 346 return TCM_OUT_OF_RESOURCES; 347 } 348 /* 349 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg 350 * into the locally allocated *buf 351 */ 352 sg_copy_to_buffer(cmd->t_data_sg, 353 cmd->t_data_nents, 354 buf, 355 cmd->data_length); 356 357 /* 358 * Now perform the XOR against the BIDI read memory located at 359 * cmd->t_mem_bidi_list 360 */ 361 362 offset = 0; 363 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { 364 addr = kmap_atomic(sg_page(sg)); 365 if (!addr) { 366 ret = TCM_OUT_OF_RESOURCES; 367 goto out; 368 } 369 370 for (i = 0; i < sg->length; i++) 371 *(addr + sg->offset + i) ^= *(buf + offset + i); 372 373 offset += sg->length; 374 kunmap_atomic(addr); 375 } 376 377out: 378 kfree(buf); 379 return ret; 380} 381 382static sense_reason_t 383sbc_execute_rw(struct se_cmd *cmd) 384{ 385 return cmd->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents, 386 cmd->data_direction); 387} 388 389static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success, 390 int *post_ret) 391{ 392 struct se_device *dev = cmd->se_dev; 393 394 /* 395 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through 396 * within target_complete_ok_work() if the command was successfully 397 * sent to the backend driver. 398 */ 399 spin_lock_irq(&cmd->t_state_lock); 400 if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) { 401 cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST; 402 *post_ret = 1; 403 } 404 spin_unlock_irq(&cmd->t_state_lock); 405 406 /* 407 * Unlock ->caw_sem originally obtained during sbc_compare_and_write() 408 * before the original READ I/O submission. 409 */ 410 up(&dev->caw_sem); 411 412 return TCM_NO_SENSE; 413} 414 415static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success, 416 int *post_ret) 417{ 418 struct se_device *dev = cmd->se_dev; 419 struct scatterlist *write_sg = NULL, *sg; 420 unsigned char *buf = NULL, *addr; 421 struct sg_mapping_iter m; 422 unsigned int offset = 0, len; 423 unsigned int nlbas = cmd->t_task_nolb; 424 unsigned int block_size = dev->dev_attrib.block_size; 425 unsigned int compare_len = (nlbas * block_size); 426 sense_reason_t ret = TCM_NO_SENSE; 427 int rc, i; 428 429 /* 430 * Handle early failure in transport_generic_request_failure(), 431 * which will not have taken ->caw_sem yet.. 432 */ 433 if (!success && (!cmd->t_data_sg || !cmd->t_bidi_data_sg)) 434 return TCM_NO_SENSE; 435 /* 436 * Handle special case for zero-length COMPARE_AND_WRITE 437 */ 438 if (!cmd->data_length) 439 goto out; 440 /* 441 * Immediately exit + release dev->caw_sem if command has already 442 * been failed with a non-zero SCSI status. 443 */ 444 if (cmd->scsi_status) { 445 pr_err("compare_and_write_callback: non zero scsi_status:" 446 " 0x%02x\n", cmd->scsi_status); 447 goto out; 448 } 449 450 buf = kzalloc(cmd->data_length, GFP_KERNEL); 451 if (!buf) { 452 pr_err("Unable to allocate compare_and_write buf\n"); 453 ret = TCM_OUT_OF_RESOURCES; 454 goto out; 455 } 456 457 write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents, 458 GFP_KERNEL); 459 if (!write_sg) { 460 pr_err("Unable to allocate compare_and_write sg\n"); 461 ret = TCM_OUT_OF_RESOURCES; 462 goto out; 463 } 464 sg_init_table(write_sg, cmd->t_data_nents); 465 /* 466 * Setup verify and write data payloads from total NumberLBAs. 467 */ 468 rc = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, buf, 469 cmd->data_length); 470 if (!rc) { 471 pr_err("sg_copy_to_buffer() failed for compare_and_write\n"); 472 ret = TCM_OUT_OF_RESOURCES; 473 goto out; 474 } 475 /* 476 * Compare against SCSI READ payload against verify payload 477 */ 478 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) { 479 addr = (unsigned char *)kmap_atomic(sg_page(sg)); 480 if (!addr) { 481 ret = TCM_OUT_OF_RESOURCES; 482 goto out; 483 } 484 485 len = min(sg->length, compare_len); 486 487 if (memcmp(addr, buf + offset, len)) { 488 pr_warn("Detected MISCOMPARE for addr: %p buf: %p\n", 489 addr, buf + offset); 490 kunmap_atomic(addr); 491 goto miscompare; 492 } 493 kunmap_atomic(addr); 494 495 offset += len; 496 compare_len -= len; 497 if (!compare_len) 498 break; 499 } 500 501 i = 0; 502 len = cmd->t_task_nolb * block_size; 503 sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG); 504 /* 505 * Currently assumes NoLB=1 and SGLs are PAGE_SIZE.. 506 */ 507 while (len) { 508 sg_miter_next(&m); 509 510 if (block_size < PAGE_SIZE) { 511 sg_set_page(&write_sg[i], m.page, block_size, 512 m.piter.sg->offset + block_size); 513 } else { 514 sg_miter_next(&m); 515 sg_set_page(&write_sg[i], m.page, block_size, 516 m.piter.sg->offset); 517 } 518 len -= block_size; 519 i++; 520 } 521 sg_miter_stop(&m); 522 /* 523 * Save the original SGL + nents values before updating to new 524 * assignments, to be released in transport_free_pages() -> 525 * transport_reset_sgl_orig() 526 */ 527 cmd->t_data_sg_orig = cmd->t_data_sg; 528 cmd->t_data_sg = write_sg; 529 cmd->t_data_nents_orig = cmd->t_data_nents; 530 cmd->t_data_nents = 1; 531 532 cmd->sam_task_attr = TCM_HEAD_TAG; 533 cmd->transport_complete_callback = compare_and_write_post; 534 /* 535 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler 536 * for submitting the adjusted SGL to write instance user-data. 537 */ 538 cmd->execute_cmd = sbc_execute_rw; 539 540 spin_lock_irq(&cmd->t_state_lock); 541 cmd->t_state = TRANSPORT_PROCESSING; 542 cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; 543 spin_unlock_irq(&cmd->t_state_lock); 544 545 __target_execute_cmd(cmd); 546 547 kfree(buf); 548 return ret; 549 550miscompare: 551 pr_warn("Target/%s: Send MISCOMPARE check condition and sense\n", 552 dev->transport->name); 553 ret = TCM_MISCOMPARE_VERIFY; 554out: 555 /* 556 * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in 557 * sbc_compare_and_write() before the original READ I/O submission. 558 */ 559 up(&dev->caw_sem); 560 kfree(write_sg); 561 kfree(buf); 562 return ret; 563} 564 565static sense_reason_t 566sbc_compare_and_write(struct se_cmd *cmd) 567{ 568 struct se_device *dev = cmd->se_dev; 569 sense_reason_t ret; 570 int rc; 571 /* 572 * Submit the READ first for COMPARE_AND_WRITE to perform the 573 * comparision using SGLs at cmd->t_bidi_data_sg.. 574 */ 575 rc = down_interruptible(&dev->caw_sem); 576 if (rc != 0) { 577 cmd->transport_complete_callback = NULL; 578 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 579 } 580 /* 581 * Reset cmd->data_length to individual block_size in order to not 582 * confuse backend drivers that depend on this value matching the 583 * size of the I/O being submitted. 584 */ 585 cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size; 586 587 ret = cmd->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents, 588 DMA_FROM_DEVICE); 589 if (ret) { 590 cmd->transport_complete_callback = NULL; 591 up(&dev->caw_sem); 592 return ret; 593 } 594 /* 595 * Unlock of dev->caw_sem to occur in compare_and_write_callback() 596 * upon MISCOMPARE, or in compare_and_write_done() upon completion 597 * of WRITE instance user-data. 598 */ 599 return TCM_NO_SENSE; 600} 601 602static int 603sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_type, 604 bool is_write, struct se_cmd *cmd) 605{ 606 if (is_write) { 607 cmd->prot_op = fabric_prot ? TARGET_PROT_DOUT_STRIP : 608 protect ? TARGET_PROT_DOUT_PASS : 609 TARGET_PROT_DOUT_INSERT; 610 switch (protect) { 611 case 0x0: 612 case 0x3: 613 cmd->prot_checks = 0; 614 break; 615 case 0x1: 616 case 0x5: 617 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 618 if (prot_type == TARGET_DIF_TYPE1_PROT) 619 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; 620 break; 621 case 0x2: 622 if (prot_type == TARGET_DIF_TYPE1_PROT) 623 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; 624 break; 625 case 0x4: 626 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 627 break; 628 default: 629 pr_err("Unsupported protect field %d\n", protect); 630 return -EINVAL; 631 } 632 } else { 633 cmd->prot_op = fabric_prot ? TARGET_PROT_DIN_INSERT : 634 protect ? TARGET_PROT_DIN_PASS : 635 TARGET_PROT_DIN_STRIP; 636 switch (protect) { 637 case 0x0: 638 case 0x1: 639 case 0x5: 640 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 641 if (prot_type == TARGET_DIF_TYPE1_PROT) 642 cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG; 643 break; 644 case 0x2: 645 if (prot_type == TARGET_DIF_TYPE1_PROT) 646 cmd->prot_checks = TARGET_DIF_CHECK_REFTAG; 647 break; 648 case 0x3: 649 cmd->prot_checks = 0; 650 break; 651 case 0x4: 652 cmd->prot_checks = TARGET_DIF_CHECK_GUARD; 653 break; 654 default: 655 pr_err("Unsupported protect field %d\n", protect); 656 return -EINVAL; 657 } 658 } 659 660 return 0; 661} 662 663static sense_reason_t 664sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, 665 u32 sectors, bool is_write) 666{ 667 u8 protect = cdb[1] >> 5; 668 int sp_ops = cmd->se_sess->sup_prot_ops; 669 int pi_prot_type = dev->dev_attrib.pi_prot_type; 670 bool fabric_prot = false; 671 672 if (!cmd->t_prot_sg || !cmd->t_prot_nents) { 673 if (unlikely(protect && 674 !dev->dev_attrib.pi_prot_type && !cmd->se_sess->sess_prot_type)) { 675 pr_err("CDB contains protect bit, but device + fabric does" 676 " not advertise PROTECT=1 feature bit\n"); 677 return TCM_INVALID_CDB_FIELD; 678 } 679 if (cmd->prot_pto) 680 return TCM_NO_SENSE; 681 } 682 683 switch (dev->dev_attrib.pi_prot_type) { 684 case TARGET_DIF_TYPE3_PROT: 685 cmd->reftag_seed = 0xffffffff; 686 break; 687 case TARGET_DIF_TYPE2_PROT: 688 if (protect) 689 return TCM_INVALID_CDB_FIELD; 690 691 cmd->reftag_seed = cmd->t_task_lba; 692 break; 693 case TARGET_DIF_TYPE1_PROT: 694 cmd->reftag_seed = cmd->t_task_lba; 695 break; 696 case TARGET_DIF_TYPE0_PROT: 697 /* 698 * See if the fabric supports T10-PI, and the session has been 699 * configured to allow export PROTECT=1 feature bit with backend 700 * devices that don't support T10-PI. 701 */ 702 fabric_prot = is_write ? 703 !!(sp_ops & (TARGET_PROT_DOUT_PASS | TARGET_PROT_DOUT_STRIP)) : 704 !!(sp_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DIN_INSERT)); 705 706 if (fabric_prot && cmd->se_sess->sess_prot_type) { 707 pi_prot_type = cmd->se_sess->sess_prot_type; 708 break; 709 } 710 if (!protect) 711 return TCM_NO_SENSE; 712 /* Fallthrough */ 713 default: 714 pr_err("Unable to determine pi_prot_type for CDB: 0x%02x " 715 "PROTECT: 0x%02x\n", cdb[0], protect); 716 return TCM_INVALID_CDB_FIELD; 717 } 718 719 if (sbc_set_prot_op_checks(protect, fabric_prot, pi_prot_type, is_write, cmd)) 720 return TCM_INVALID_CDB_FIELD; 721 722 cmd->prot_type = pi_prot_type; 723 cmd->prot_length = dev->prot_length * sectors; 724 725 /** 726 * In case protection information exists over the wire 727 * we modify command data length to describe pure data. 728 * The actual transfer length is data length + protection 729 * length 730 **/ 731 if (protect) 732 cmd->data_length = sectors * dev->dev_attrib.block_size; 733 734 pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d " 735 "prot_op=%d prot_checks=%d\n", 736 __func__, cmd->prot_type, cmd->data_length, cmd->prot_length, 737 cmd->prot_op, cmd->prot_checks); 738 739 return TCM_NO_SENSE; 740} 741 742static int 743sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb) 744{ 745 if (cdb[1] & 0x10) { 746 if (!dev->dev_attrib.emulate_dpo) { 747 pr_err("Got CDB: 0x%02x with DPO bit set, but device" 748 " does not advertise support for DPO\n", cdb[0]); 749 return -EINVAL; 750 } 751 } 752 if (cdb[1] & 0x8) { 753 if (!dev->dev_attrib.emulate_fua_write || !se_dev_check_wce(dev)) { 754 pr_err("Got CDB: 0x%02x with FUA bit set, but device" 755 " does not advertise support for FUA write\n", 756 cdb[0]); 757 return -EINVAL; 758 } 759 cmd->se_cmd_flags |= SCF_FUA; 760 } 761 return 0; 762} 763 764sense_reason_t 765sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) 766{ 767 struct se_device *dev = cmd->se_dev; 768 unsigned char *cdb = cmd->t_task_cdb; 769 unsigned int size; 770 u32 sectors = 0; 771 sense_reason_t ret; 772 773 switch (cdb[0]) { 774 case READ_6: 775 sectors = transport_get_sectors_6(cdb); 776 cmd->t_task_lba = transport_lba_21(cdb); 777 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 778 cmd->execute_rw = ops->execute_rw; 779 cmd->execute_cmd = sbc_execute_rw; 780 break; 781 case READ_10: 782 sectors = transport_get_sectors_10(cdb); 783 cmd->t_task_lba = transport_lba_32(cdb); 784 785 if (sbc_check_dpofua(dev, cmd, cdb)) 786 return TCM_INVALID_CDB_FIELD; 787 788 ret = sbc_check_prot(dev, cmd, cdb, sectors, false); 789 if (ret) 790 return ret; 791 792 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 793 cmd->execute_rw = ops->execute_rw; 794 cmd->execute_cmd = sbc_execute_rw; 795 break; 796 case READ_12: 797 sectors = transport_get_sectors_12(cdb); 798 cmd->t_task_lba = transport_lba_32(cdb); 799 800 if (sbc_check_dpofua(dev, cmd, cdb)) 801 return TCM_INVALID_CDB_FIELD; 802 803 ret = sbc_check_prot(dev, cmd, cdb, sectors, false); 804 if (ret) 805 return ret; 806 807 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 808 cmd->execute_rw = ops->execute_rw; 809 cmd->execute_cmd = sbc_execute_rw; 810 break; 811 case READ_16: 812 sectors = transport_get_sectors_16(cdb); 813 cmd->t_task_lba = transport_lba_64(cdb); 814 815 if (sbc_check_dpofua(dev, cmd, cdb)) 816 return TCM_INVALID_CDB_FIELD; 817 818 ret = sbc_check_prot(dev, cmd, cdb, sectors, false); 819 if (ret) 820 return ret; 821 822 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 823 cmd->execute_rw = ops->execute_rw; 824 cmd->execute_cmd = sbc_execute_rw; 825 break; 826 case WRITE_6: 827 sectors = transport_get_sectors_6(cdb); 828 cmd->t_task_lba = transport_lba_21(cdb); 829 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 830 cmd->execute_rw = ops->execute_rw; 831 cmd->execute_cmd = sbc_execute_rw; 832 break; 833 case WRITE_10: 834 case WRITE_VERIFY: 835 sectors = transport_get_sectors_10(cdb); 836 cmd->t_task_lba = transport_lba_32(cdb); 837 838 if (sbc_check_dpofua(dev, cmd, cdb)) 839 return TCM_INVALID_CDB_FIELD; 840 841 ret = sbc_check_prot(dev, cmd, cdb, sectors, true); 842 if (ret) 843 return ret; 844 845 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 846 cmd->execute_rw = ops->execute_rw; 847 cmd->execute_cmd = sbc_execute_rw; 848 break; 849 case WRITE_12: 850 sectors = transport_get_sectors_12(cdb); 851 cmd->t_task_lba = transport_lba_32(cdb); 852 853 if (sbc_check_dpofua(dev, cmd, cdb)) 854 return TCM_INVALID_CDB_FIELD; 855 856 ret = sbc_check_prot(dev, cmd, cdb, sectors, true); 857 if (ret) 858 return ret; 859 860 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 861 cmd->execute_rw = ops->execute_rw; 862 cmd->execute_cmd = sbc_execute_rw; 863 break; 864 case WRITE_16: 865 sectors = transport_get_sectors_16(cdb); 866 cmd->t_task_lba = transport_lba_64(cdb); 867 868 if (sbc_check_dpofua(dev, cmd, cdb)) 869 return TCM_INVALID_CDB_FIELD; 870 871 ret = sbc_check_prot(dev, cmd, cdb, sectors, true); 872 if (ret) 873 return ret; 874 875 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 876 cmd->execute_rw = ops->execute_rw; 877 cmd->execute_cmd = sbc_execute_rw; 878 break; 879 case XDWRITEREAD_10: 880 if (cmd->data_direction != DMA_TO_DEVICE || 881 !(cmd->se_cmd_flags & SCF_BIDI)) 882 return TCM_INVALID_CDB_FIELD; 883 sectors = transport_get_sectors_10(cdb); 884 885 if (sbc_check_dpofua(dev, cmd, cdb)) 886 return TCM_INVALID_CDB_FIELD; 887 888 cmd->t_task_lba = transport_lba_32(cdb); 889 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 890 891 /* 892 * Setup BIDI XOR callback to be run after I/O completion. 893 */ 894 cmd->execute_rw = ops->execute_rw; 895 cmd->execute_cmd = sbc_execute_rw; 896 cmd->transport_complete_callback = &xdreadwrite_callback; 897 break; 898 case VARIABLE_LENGTH_CMD: 899 { 900 u16 service_action = get_unaligned_be16(&cdb[8]); 901 switch (service_action) { 902 case XDWRITEREAD_32: 903 sectors = transport_get_sectors_32(cdb); 904 905 if (sbc_check_dpofua(dev, cmd, cdb)) 906 return TCM_INVALID_CDB_FIELD; 907 /* 908 * Use WRITE_32 and READ_32 opcodes for the emulated 909 * XDWRITE_READ_32 logic. 910 */ 911 cmd->t_task_lba = transport_lba_64_ext(cdb); 912 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; 913 914 /* 915 * Setup BIDI XOR callback to be run during after I/O 916 * completion. 917 */ 918 cmd->execute_rw = ops->execute_rw; 919 cmd->execute_cmd = sbc_execute_rw; 920 cmd->transport_complete_callback = &xdreadwrite_callback; 921 break; 922 case WRITE_SAME_32: 923 sectors = transport_get_sectors_32(cdb); 924 if (!sectors) { 925 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" 926 " supported\n"); 927 return TCM_INVALID_CDB_FIELD; 928 } 929 930 size = sbc_get_size(cmd, 1); 931 cmd->t_task_lba = get_unaligned_be64(&cdb[12]); 932 933 ret = sbc_setup_write_same(cmd, &cdb[10], ops); 934 if (ret) 935 return ret; 936 break; 937 default: 938 pr_err("VARIABLE_LENGTH_CMD service action" 939 " 0x%04x not supported\n", service_action); 940 return TCM_UNSUPPORTED_SCSI_OPCODE; 941 } 942 break; 943 } 944 case COMPARE_AND_WRITE: 945 sectors = cdb[13]; 946 /* 947 * Currently enforce COMPARE_AND_WRITE for a single sector 948 */ 949 if (sectors > 1) { 950 pr_err("COMPARE_AND_WRITE contains NoLB: %u greater" 951 " than 1\n", sectors); 952 return TCM_INVALID_CDB_FIELD; 953 } 954 /* 955 * Double size because we have two buffers, note that 956 * zero is not an error.. 957 */ 958 size = 2 * sbc_get_size(cmd, sectors); 959 cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 960 cmd->t_task_nolb = sectors; 961 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE; 962 cmd->execute_rw = ops->execute_rw; 963 cmd->execute_cmd = sbc_compare_and_write; 964 cmd->transport_complete_callback = compare_and_write_callback; 965 break; 966 case READ_CAPACITY: 967 size = READ_CAP_LEN; 968 cmd->execute_cmd = sbc_emulate_readcapacity; 969 break; 970 case SERVICE_ACTION_IN_16: 971 switch (cmd->t_task_cdb[1] & 0x1f) { 972 case SAI_READ_CAPACITY_16: 973 cmd->execute_cmd = sbc_emulate_readcapacity_16; 974 break; 975 case SAI_REPORT_REFERRALS: 976 cmd->execute_cmd = target_emulate_report_referrals; 977 break; 978 default: 979 pr_err("Unsupported SA: 0x%02x\n", 980 cmd->t_task_cdb[1] & 0x1f); 981 return TCM_INVALID_CDB_FIELD; 982 } 983 size = (cdb[10] << 24) | (cdb[11] << 16) | 984 (cdb[12] << 8) | cdb[13]; 985 break; 986 case SYNCHRONIZE_CACHE: 987 case SYNCHRONIZE_CACHE_16: 988 if (cdb[0] == SYNCHRONIZE_CACHE) { 989 sectors = transport_get_sectors_10(cdb); 990 cmd->t_task_lba = transport_lba_32(cdb); 991 } else { 992 sectors = transport_get_sectors_16(cdb); 993 cmd->t_task_lba = transport_lba_64(cdb); 994 } 995 if (ops->execute_sync_cache) { 996 cmd->execute_cmd = ops->execute_sync_cache; 997 goto check_lba; 998 } 999 size = 0; 1000 cmd->execute_cmd = sbc_emulate_noop; 1001 break; 1002 case UNMAP: 1003 if (!ops->execute_unmap) 1004 return TCM_UNSUPPORTED_SCSI_OPCODE; 1005 1006 if (!dev->dev_attrib.emulate_tpu) { 1007 pr_err("Got UNMAP, but backend device has" 1008 " emulate_tpu disabled\n"); 1009 return TCM_UNSUPPORTED_SCSI_OPCODE; 1010 } 1011 size = get_unaligned_be16(&cdb[7]); 1012 cmd->execute_cmd = ops->execute_unmap; 1013 break; 1014 case WRITE_SAME_16: 1015 sectors = transport_get_sectors_16(cdb); 1016 if (!sectors) { 1017 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 1018 return TCM_INVALID_CDB_FIELD; 1019 } 1020 1021 size = sbc_get_size(cmd, 1); 1022 cmd->t_task_lba = get_unaligned_be64(&cdb[2]); 1023 1024 ret = sbc_setup_write_same(cmd, &cdb[1], ops); 1025 if (ret) 1026 return ret; 1027 break; 1028 case WRITE_SAME: 1029 sectors = transport_get_sectors_10(cdb); 1030 if (!sectors) { 1031 pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); 1032 return TCM_INVALID_CDB_FIELD; 1033 } 1034 1035 size = sbc_get_size(cmd, 1); 1036 cmd->t_task_lba = get_unaligned_be32(&cdb[2]); 1037 1038 /* 1039 * Follow sbcr26 with WRITE_SAME (10) and check for the existence 1040 * of byte 1 bit 3 UNMAP instead of original reserved field 1041 */ 1042 ret = sbc_setup_write_same(cmd, &cdb[1], ops); 1043 if (ret) 1044 return ret; 1045 break; 1046 case VERIFY: 1047 size = 0; 1048 sectors = transport_get_sectors_10(cdb); 1049 cmd->t_task_lba = transport_lba_32(cdb); 1050 cmd->execute_cmd = sbc_emulate_noop; 1051 goto check_lba; 1052 case REZERO_UNIT: 1053 case SEEK_6: 1054 case SEEK_10: 1055 /* 1056 * There are still clients out there which use these old SCSI-2 1057 * commands. This mainly happens when running VMs with legacy 1058 * guest systems, connected via SCSI command pass-through to 1059 * iSCSI targets. Make them happy and return status GOOD. 1060 */ 1061 size = 0; 1062 cmd->execute_cmd = sbc_emulate_noop; 1063 break; 1064 default: 1065 ret = spc_parse_cdb(cmd, &size); 1066 if (ret) 1067 return ret; 1068 } 1069 1070 /* reject any command that we don't have a handler for */ 1071 if (!cmd->execute_cmd) 1072 return TCM_UNSUPPORTED_SCSI_OPCODE; 1073 1074 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 1075 unsigned long long end_lba; 1076check_lba: 1077 end_lba = dev->transport->get_blocks(dev) + 1; 1078 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || 1079 ((cmd->t_task_lba + sectors) > end_lba)) { 1080 pr_err("cmd exceeds last lba %llu " 1081 "(lba %llu, sectors %u)\n", 1082 end_lba, cmd->t_task_lba, sectors); 1083 return TCM_ADDRESS_OUT_OF_RANGE; 1084 } 1085 1086 if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) 1087 size = sbc_get_size(cmd, sectors); 1088 } 1089 1090 return target_cmd_size_check(cmd, size); 1091} 1092EXPORT_SYMBOL(sbc_parse_cdb); 1093 1094u32 sbc_get_device_type(struct se_device *dev) 1095{ 1096 return TYPE_DISK; 1097} 1098EXPORT_SYMBOL(sbc_get_device_type); 1099 1100sense_reason_t 1101sbc_execute_unmap(struct se_cmd *cmd, 1102 sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *, 1103 sector_t, sector_t), 1104 void *priv) 1105{ 1106 struct se_device *dev = cmd->se_dev; 1107 unsigned char *buf, *ptr = NULL; 1108 sector_t lba; 1109 int size; 1110 u32 range; 1111 sense_reason_t ret = 0; 1112 int dl, bd_dl; 1113 1114 /* We never set ANC_SUP */ 1115 if (cmd->t_task_cdb[1]) 1116 return TCM_INVALID_CDB_FIELD; 1117 1118 if (cmd->data_length == 0) { 1119 target_complete_cmd(cmd, SAM_STAT_GOOD); 1120 return 0; 1121 } 1122 1123 if (cmd->data_length < 8) { 1124 pr_warn("UNMAP parameter list length %u too small\n", 1125 cmd->data_length); 1126 return TCM_PARAMETER_LIST_LENGTH_ERROR; 1127 } 1128 1129 buf = transport_kmap_data_sg(cmd); 1130 if (!buf) 1131 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1132 1133 dl = get_unaligned_be16(&buf[0]); 1134 bd_dl = get_unaligned_be16(&buf[2]); 1135 1136 size = cmd->data_length - 8; 1137 if (bd_dl > size) 1138 pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n", 1139 cmd->data_length, bd_dl); 1140 else 1141 size = bd_dl; 1142 1143 if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) { 1144 ret = TCM_INVALID_PARAMETER_LIST; 1145 goto err; 1146 } 1147 1148 /* First UNMAP block descriptor starts at 8 byte offset */ 1149 ptr = &buf[8]; 1150 pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u" 1151 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); 1152 1153 while (size >= 16) { 1154 lba = get_unaligned_be64(&ptr[0]); 1155 range = get_unaligned_be32(&ptr[8]); 1156 pr_debug("UNMAP: Using lba: %llu and range: %u\n", 1157 (unsigned long long)lba, range); 1158 1159 if (range > dev->dev_attrib.max_unmap_lba_count) { 1160 ret = TCM_INVALID_PARAMETER_LIST; 1161 goto err; 1162 } 1163 1164 if (lba + range > dev->transport->get_blocks(dev) + 1) { 1165 ret = TCM_ADDRESS_OUT_OF_RANGE; 1166 goto err; 1167 } 1168 1169 ret = do_unmap_fn(cmd, priv, lba, range); 1170 if (ret) 1171 goto err; 1172 1173 ptr += 16; 1174 size -= 16; 1175 } 1176 1177err: 1178 transport_kunmap_data_sg(cmd); 1179 if (!ret) 1180 target_complete_cmd(cmd, GOOD); 1181 return ret; 1182} 1183EXPORT_SYMBOL(sbc_execute_unmap); 1184 1185void 1186sbc_dif_generate(struct se_cmd *cmd) 1187{ 1188 struct se_device *dev = cmd->se_dev; 1189 struct se_dif_v1_tuple *sdt; 1190 struct scatterlist *dsg, *psg = cmd->t_prot_sg; 1191 sector_t sector = cmd->t_task_lba; 1192 void *daddr, *paddr; 1193 int i, j, offset = 0; 1194 1195 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { 1196 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1197 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1198 1199 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { 1200 1201 if (offset >= psg->length) { 1202 kunmap_atomic(paddr); 1203 psg = sg_next(psg); 1204 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1205 offset = 0; 1206 } 1207 1208 sdt = paddr + offset; 1209 sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j, 1210 dev->dev_attrib.block_size)); 1211 if (cmd->prot_type == TARGET_DIF_TYPE1_PROT) 1212 sdt->ref_tag = cpu_to_be32(sector & 0xffffffff); 1213 sdt->app_tag = 0; 1214 1215 pr_debug("DIF %s INSERT sector: %llu guard_tag: 0x%04x" 1216 " app_tag: 0x%04x ref_tag: %u\n", 1217 (cmd->data_direction == DMA_TO_DEVICE) ? 1218 "WRITE" : "READ", (unsigned long long)sector, 1219 sdt->guard_tag, sdt->app_tag, 1220 be32_to_cpu(sdt->ref_tag)); 1221 1222 sector++; 1223 offset += sizeof(struct se_dif_v1_tuple); 1224 } 1225 1226 kunmap_atomic(paddr); 1227 kunmap_atomic(daddr); 1228 } 1229} 1230 1231static sense_reason_t 1232sbc_dif_v1_verify(struct se_cmd *cmd, struct se_dif_v1_tuple *sdt, 1233 const void *p, sector_t sector, unsigned int ei_lba) 1234{ 1235 struct se_device *dev = cmd->se_dev; 1236 int block_size = dev->dev_attrib.block_size; 1237 __be16 csum; 1238 1239 if (!(cmd->prot_checks & TARGET_DIF_CHECK_GUARD)) 1240 goto check_ref; 1241 1242 csum = cpu_to_be16(crc_t10dif(p, block_size)); 1243 1244 if (sdt->guard_tag != csum) { 1245 pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x" 1246 " csum 0x%04x\n", (unsigned long long)sector, 1247 be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum)); 1248 return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; 1249 } 1250 1251check_ref: 1252 if (!(cmd->prot_checks & TARGET_DIF_CHECK_REFTAG)) 1253 return 0; 1254 1255 if (cmd->prot_type == TARGET_DIF_TYPE1_PROT && 1256 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { 1257 pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x" 1258 " sector MSB: 0x%08x\n", (unsigned long long)sector, 1259 be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff)); 1260 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1261 } 1262 1263 if (cmd->prot_type == TARGET_DIF_TYPE2_PROT && 1264 be32_to_cpu(sdt->ref_tag) != ei_lba) { 1265 pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x" 1266 " ei_lba: 0x%08x\n", (unsigned long long)sector, 1267 be32_to_cpu(sdt->ref_tag), ei_lba); 1268 return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; 1269 } 1270 1271 return 0; 1272} 1273 1274static void 1275sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, 1276 struct scatterlist *sg, int sg_off) 1277{ 1278 struct se_device *dev = cmd->se_dev; 1279 struct scatterlist *psg; 1280 void *paddr, *addr; 1281 unsigned int i, len, left; 1282 unsigned int offset = sg_off; 1283 1284 if (!sg) 1285 return; 1286 1287 left = sectors * dev->prot_length; 1288 1289 for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { 1290 unsigned int psg_len, copied = 0; 1291 1292 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1293 psg_len = min(left, psg->length); 1294 while (psg_len) { 1295 len = min(psg_len, sg->length - offset); 1296 addr = kmap_atomic(sg_page(sg)) + sg->offset + offset; 1297 1298 if (read) 1299 memcpy(paddr + copied, addr, len); 1300 else 1301 memcpy(addr, paddr + copied, len); 1302 1303 left -= len; 1304 offset += len; 1305 copied += len; 1306 psg_len -= len; 1307 1308 if (offset >= sg->length) { 1309 sg = sg_next(sg); 1310 offset = 0; 1311 } 1312 kunmap_atomic(addr); 1313 } 1314 kunmap_atomic(paddr); 1315 } 1316} 1317 1318sense_reason_t 1319sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors, 1320 unsigned int ei_lba, struct scatterlist *sg, int sg_off) 1321{ 1322 struct se_device *dev = cmd->se_dev; 1323 struct se_dif_v1_tuple *sdt; 1324 struct scatterlist *dsg, *psg = cmd->t_prot_sg; 1325 sector_t sector = start; 1326 void *daddr, *paddr; 1327 int i, j, offset = 0; 1328 sense_reason_t rc; 1329 1330 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { 1331 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1332 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1333 1334 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { 1335 1336 if (offset >= psg->length) { 1337 kunmap_atomic(paddr); 1338 psg = sg_next(psg); 1339 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1340 offset = 0; 1341 } 1342 1343 sdt = paddr + offset; 1344 1345 pr_debug("DIF WRITE sector: %llu guard_tag: 0x%04x" 1346 " app_tag: 0x%04x ref_tag: %u\n", 1347 (unsigned long long)sector, sdt->guard_tag, 1348 sdt->app_tag, be32_to_cpu(sdt->ref_tag)); 1349 1350 rc = sbc_dif_v1_verify(cmd, sdt, daddr + j, sector, 1351 ei_lba); 1352 if (rc) { 1353 kunmap_atomic(paddr); 1354 kunmap_atomic(daddr); 1355 cmd->bad_sector = sector; 1356 return rc; 1357 } 1358 1359 sector++; 1360 ei_lba++; 1361 offset += sizeof(struct se_dif_v1_tuple); 1362 } 1363 1364 kunmap_atomic(paddr); 1365 kunmap_atomic(daddr); 1366 } 1367 if (!sg) 1368 return 0; 1369 1370 sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off); 1371 1372 return 0; 1373} 1374EXPORT_SYMBOL(sbc_dif_verify_write); 1375 1376static sense_reason_t 1377__sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, 1378 unsigned int ei_lba, struct scatterlist *sg, int sg_off) 1379{ 1380 struct se_device *dev = cmd->se_dev; 1381 struct se_dif_v1_tuple *sdt; 1382 struct scatterlist *dsg, *psg = sg; 1383 sector_t sector = start; 1384 void *daddr, *paddr; 1385 int i, j, offset = sg_off; 1386 sense_reason_t rc; 1387 1388 for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) { 1389 daddr = kmap_atomic(sg_page(dsg)) + dsg->offset; 1390 paddr = kmap_atomic(sg_page(psg)) + sg->offset; 1391 1392 for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) { 1393 1394 if (offset >= psg->length) { 1395 kunmap_atomic(paddr); 1396 psg = sg_next(psg); 1397 paddr = kmap_atomic(sg_page(psg)) + psg->offset; 1398 offset = 0; 1399 } 1400 1401 sdt = paddr + offset; 1402 1403 pr_debug("DIF READ sector: %llu guard_tag: 0x%04x" 1404 " app_tag: 0x%04x ref_tag: %u\n", 1405 (unsigned long long)sector, sdt->guard_tag, 1406 sdt->app_tag, be32_to_cpu(sdt->ref_tag)); 1407 1408 if (sdt->app_tag == cpu_to_be16(0xffff)) { 1409 sector++; 1410 offset += sizeof(struct se_dif_v1_tuple); 1411 continue; 1412 } 1413 1414 rc = sbc_dif_v1_verify(cmd, sdt, daddr + j, sector, 1415 ei_lba); 1416 if (rc) { 1417 kunmap_atomic(paddr); 1418 kunmap_atomic(daddr); 1419 cmd->bad_sector = sector; 1420 return rc; 1421 } 1422 1423 sector++; 1424 ei_lba++; 1425 offset += sizeof(struct se_dif_v1_tuple); 1426 } 1427 1428 kunmap_atomic(paddr); 1429 kunmap_atomic(daddr); 1430 } 1431 1432 return 0; 1433} 1434 1435sense_reason_t 1436sbc_dif_read_strip(struct se_cmd *cmd) 1437{ 1438 struct se_device *dev = cmd->se_dev; 1439 u32 sectors = cmd->prot_length / dev->prot_length; 1440 1441 return __sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0, 1442 cmd->t_prot_sg, 0); 1443} 1444 1445sense_reason_t 1446sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, 1447 unsigned int ei_lba, struct scatterlist *sg, int sg_off) 1448{ 1449 sense_reason_t rc; 1450 1451 rc = __sbc_dif_verify_read(cmd, start, sectors, ei_lba, sg, sg_off); 1452 if (rc) 1453 return rc; 1454 1455 sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off); 1456 return 0; 1457} 1458EXPORT_SYMBOL(sbc_dif_verify_read); 1459