/linux-4.1.27/lib/raid6/ |
H A D | recov.c | 25 static void raid6_2data_recov_intx1(int disks, size_t bytes, int faila, raid6_2data_recov_intx1() argument 33 p = (u8 *)ptrs[disks-2]; raid6_2data_recov_intx1() 34 q = (u8 *)ptrs[disks-1]; raid6_2data_recov_intx1() 41 ptrs[disks-2] = dp; raid6_2data_recov_intx1() 44 ptrs[disks-1] = dq; raid6_2data_recov_intx1() 46 raid6_call.gen_syndrome(disks, bytes, ptrs); raid6_2data_recov_intx1() 51 ptrs[disks-2] = p; raid6_2data_recov_intx1() 52 ptrs[disks-1] = q; raid6_2data_recov_intx1() 69 static void raid6_datap_recov_intx1(int disks, size_t bytes, int faila, raid6_datap_recov_intx1() argument 75 p = (u8 *)ptrs[disks-2]; raid6_datap_recov_intx1() 76 q = (u8 *)ptrs[disks-1]; raid6_datap_recov_intx1() 82 ptrs[disks-1] = dq; raid6_datap_recov_intx1() 84 raid6_call.gen_syndrome(disks, bytes, ptrs); raid6_datap_recov_intx1() 88 ptrs[disks-1] = q; raid6_datap_recov_intx1() 113 void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, void **ptrs) raid6_dual_recov() argument 121 if ( failb == disks-1 ) { raid6_dual_recov() 122 if ( faila == disks-2 ) { raid6_dual_recov() 124 raid6_call.gen_syndrome(disks, bytes, ptrs); raid6_dual_recov() 131 if ( failb == disks-2 ) { raid6_dual_recov() 133 raid6_datap_recov(disks, bytes, faila, ptrs); raid6_dual_recov() 136 raid6_2data_recov(disks, bytes, faila, failb, ptrs); raid6_dual_recov()
|
H A D | algos.c | 132 void *(*const dptrs)[(65536/PAGE_SIZE)+2], const int disks) raid6_choose_gen() 135 int start = (disks>>1)-1, stop = disks-3; /* work on the second half of the disks */ raid6_choose_gen() 152 (*algo)->gen_syndrome(disks, PAGE_SIZE, *dptrs); raid6_choose_gen() 175 (*algo)->xor_syndrome(disks, start, stop, raid6_choose_gen() 209 const int disks = (65536/PAGE_SIZE)+2; raid6_select_algo() local 217 for (i = 0; i < disks-2; i++) raid6_select_algo() 228 dptrs[disks-2] = syndromes; raid6_select_algo() 229 dptrs[disks-1] = syndromes + PAGE_SIZE; raid6_select_algo() 232 gen_best = raid6_choose_gen(&dptrs, disks); raid6_select_algo() 131 raid6_choose_gen( void *(*const dptrs)[(65536/PAGE_SIZE)+2], const int disks) raid6_choose_gen() argument
|
H A D | recov_avx2.c | 22 static void raid6_2data_recov_avx2(int disks, size_t bytes, int faila, raid6_2data_recov_avx2() argument 30 p = (u8 *)ptrs[disks-2]; raid6_2data_recov_avx2() 31 q = (u8 *)ptrs[disks-1]; raid6_2data_recov_avx2() 38 ptrs[disks-2] = dp; raid6_2data_recov_avx2() 41 ptrs[disks-1] = dq; raid6_2data_recov_avx2() 43 raid6_call.gen_syndrome(disks, bytes, ptrs); raid6_2data_recov_avx2() 48 ptrs[disks-2] = p; raid6_2data_recov_avx2() 49 ptrs[disks-1] = q; raid6_2data_recov_avx2() 192 static void raid6_datap_recov_avx2(int disks, size_t bytes, int faila, raid6_datap_recov_avx2() argument 199 p = (u8 *)ptrs[disks-2]; raid6_datap_recov_avx2() 200 q = (u8 *)ptrs[disks-1]; raid6_datap_recov_avx2() 206 ptrs[disks-1] = dq; raid6_datap_recov_avx2() 208 raid6_call.gen_syndrome(disks, bytes, ptrs); raid6_datap_recov_avx2() 212 ptrs[disks-1] = q; raid6_datap_recov_avx2()
|
H A D | recov_ssse3.c | 22 static void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila, raid6_2data_recov_ssse3() argument 32 p = (u8 *)ptrs[disks-2]; raid6_2data_recov_ssse3() 33 q = (u8 *)ptrs[disks-1]; raid6_2data_recov_ssse3() 40 ptrs[disks-2] = dp; raid6_2data_recov_ssse3() 43 ptrs[disks-1] = dq; raid6_2data_recov_ssse3() 45 raid6_call.gen_syndrome(disks, bytes, ptrs); raid6_2data_recov_ssse3() 50 ptrs[disks-2] = p; raid6_2data_recov_ssse3() 51 ptrs[disks-1] = q; raid6_2data_recov_ssse3() 197 static void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila, raid6_datap_recov_ssse3() argument 206 p = (u8 *)ptrs[disks-2]; raid6_datap_recov_ssse3() 207 q = (u8 *)ptrs[disks-1]; raid6_datap_recov_ssse3() 213 ptrs[disks-1] = dq; raid6_datap_recov_ssse3() 215 raid6_call.gen_syndrome(disks, bytes, ptrs); raid6_datap_recov_ssse3() 219 ptrs[disks-1] = q; raid6_datap_recov_ssse3()
|
H A D | mmx.c | 40 static void raid6_mmx1_gen_syndrome(int disks, size_t bytes, void **ptrs) raid6_mmx1_gen_syndrome() argument 46 z0 = disks - 3; /* Highest data disk */ raid6_mmx1_gen_syndrome() 88 static void raid6_mmx2_gen_syndrome(int disks, size_t bytes, void **ptrs) raid6_mmx2_gen_syndrome() argument 94 z0 = disks - 3; /* Highest data disk */ raid6_mmx2_gen_syndrome()
|
H A D | sse2.c | 41 static void raid6_sse21_gen_syndrome(int disks, size_t bytes, void **ptrs) raid6_sse21_gen_syndrome() argument 47 z0 = disks - 3; /* Highest data disk */ raid6_sse21_gen_syndrome() 92 static void raid6_sse21_xor_syndrome(int disks, int start, int stop, raid6_sse21_xor_syndrome() argument 100 p = dptr[disks-2]; /* XOR parity */ raid6_sse21_xor_syndrome() 101 q = dptr[disks-1]; /* RS syndrome */ raid6_sse21_xor_syndrome() 151 static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs) raid6_sse22_gen_syndrome() argument 157 z0 = disks - 3; /* Highest data disk */ raid6_sse22_gen_syndrome() 203 static void raid6_sse22_xor_syndrome(int disks, int start, int stop, raid6_sse22_xor_syndrome() argument 211 p = dptr[disks-2]; /* XOR parity */ raid6_sse22_xor_syndrome() 212 q = dptr[disks-1]; /* RS syndrome */ raid6_sse22_xor_syndrome() 283 static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs) raid6_sse24_gen_syndrome() argument 289 z0 = disks - 3; /* Highest data disk */ raid6_sse24_gen_syndrome() 369 static void raid6_sse24_xor_syndrome(int disks, int start, int stop, raid6_sse24_xor_syndrome() argument 377 p = dptr[disks-2]; /* XOR parity */ raid6_sse24_xor_syndrome() 378 q = dptr[disks-1]; /* RS syndrome */ raid6_sse24_xor_syndrome()
|
H A D | neon.c | 33 static void raid6_neon ## _n ## _gen_syndrome(int disks, \ 39 raid6_neon ## _n ## _gen_syndrome_real(disks, \
|
H A D | sse1.c | 45 static void raid6_sse11_gen_syndrome(int disks, size_t bytes, void **ptrs) raid6_sse11_gen_syndrome() argument 51 z0 = disks - 3; /* Highest data disk */ raid6_sse11_gen_syndrome() 104 static void raid6_sse12_gen_syndrome(int disks, size_t bytes, void **ptrs) raid6_sse12_gen_syndrome() argument 110 z0 = disks - 3; /* Highest data disk */ raid6_sse12_gen_syndrome()
|
H A D | avx2.c | 42 static void raid6_avx21_gen_syndrome(int disks, size_t bytes, void **ptrs) raid6_avx21_gen_syndrome() argument 48 z0 = disks - 3; /* Highest data disk */ raid6_avx21_gen_syndrome() 101 static void raid6_avx22_gen_syndrome(int disks, size_t bytes, void **ptrs) raid6_avx22_gen_syndrome() argument 107 z0 = disks - 3; /* Highest data disk */ raid6_avx22_gen_syndrome() 165 static void raid6_avx24_gen_syndrome(int disks, size_t bytes, void **ptrs) raid6_avx24_gen_syndrome() argument 171 z0 = disks - 3; /* Highest data disk */ raid6_avx24_gen_syndrome()
|
/linux-4.1.27/crypto/async_tx/ |
H A D | async_pq.c | 38 * blocks[disks-2] and the 'Q' destination address at blocks[disks-1] 50 const unsigned char *scfs, int disks, do_async_gen_syndrome() 60 int src_cnt = disks - 2; do_async_gen_syndrome() 91 dma_dest[0] = unmap->addr[disks - 2]; do_async_gen_syndrome() 92 dma_dest[1] = unmap->addr[disks - 1]; do_async_gen_syndrome() 122 do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, do_sync_gen_syndrome() argument 127 int start = -1, stop = disks - 3; do_sync_gen_syndrome() 134 for (i = 0; i < disks; i++) { do_sync_gen_syndrome() 136 BUG_ON(i > disks - 3); /* P or Q can't be zero */ do_sync_gen_syndrome() 140 if (i < disks - 2) { do_sync_gen_syndrome() 150 raid6_call.xor_syndrome(disks, start, stop, len, srcs); do_sync_gen_syndrome() 152 raid6_call.gen_syndrome(disks, len, srcs); do_sync_gen_syndrome() 158 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 160 * @disks: number of blocks (including missing P or Q, see below) 167 * 'disks' note: callers can optionally omit either P or Q (but not 168 * both) from the calculation by setting blocks[disks-2] or 169 * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <= 171 * synchronous path. 'disks' always accounts for both destination 172 * buffers. If any source buffers (blocks[i] where i < disks - 2) are 178 async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, async_gen_syndrome() argument 181 int src_cnt = disks - 2; async_gen_syndrome() 183 &P(blocks, disks), 2, async_gen_syndrome() 188 BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); async_gen_syndrome() 191 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); async_gen_syndrome() 204 pr_debug("%s: (async) disks: %d len: %zu\n", async_gen_syndrome() 205 __func__, disks, len); async_gen_syndrome() 226 if (P(blocks, disks)) async_gen_syndrome() 227 unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks), async_gen_syndrome() 235 if (Q(blocks, disks)) async_gen_syndrome() 236 unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks), async_gen_syndrome() 251 pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len); async_gen_syndrome() 256 if (!P(blocks, disks)) { async_gen_syndrome() 257 P(blocks, disks) = pq_scribble_page; async_gen_syndrome() 260 if (!Q(blocks, disks)) { async_gen_syndrome() 261 Q(blocks, disks) = pq_scribble_page; async_gen_syndrome() 264 do_sync_gen_syndrome(blocks, offset, disks, len, submit); async_gen_syndrome() 271 pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len) pq_val_chan() argument 277 disks, len); pq_val_chan() 282 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 284 * @disks: number of blocks (including missing P or Q, see below) 291 * and 'disks' parameters of this routine. The synchronous path 296 async_syndrome_val(struct page **blocks, unsigned int offset, int disks, async_syndrome_val() argument 300 struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len); async_syndrome_val() 303 unsigned char coefs[disks-2]; async_syndrome_val() 307 BUG_ON(disks < 4); async_syndrome_val() 310 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); async_syndrome_val() 312 if (unmap && disks <= dma_maxpq(device, 0) && async_syndrome_val() 318 pr_debug("%s: (async) disks: %d len: %zu\n", async_syndrome_val() 319 __func__, disks, len); async_syndrome_val() 322 for (i = 0; i < disks-2; i++) async_syndrome_val() 333 if (!P(blocks, disks)) { async_syndrome_val() 337 pq[0] = dma_map_page(dev, P(blocks, disks), async_syndrome_val() 343 if (!Q(blocks, disks)) { async_syndrome_val() 347 pq[1] = dma_map_page(dev, Q(blocks, disks), async_syndrome_val() 374 struct page *p_src = P(blocks, disks); async_syndrome_val() 375 struct page *q_src = Q(blocks, disks); async_syndrome_val() 382 pr_debug("%s: (sync) disks: %d len: %zu\n", async_syndrome_val() 383 __func__, disks, len); async_syndrome_val() 401 tx = async_xor(spare, blocks, offset, disks-2, len, submit); async_syndrome_val() 409 P(blocks, disks) = NULL; async_syndrome_val() 410 Q(blocks, disks) = spare; async_syndrome_val() 412 tx = async_gen_syndrome(blocks, offset, disks, len, submit); async_syndrome_val() 420 P(blocks, disks) = p_src; async_syndrome_val() 421 Q(blocks, disks) = q_src; async_syndrome_val() 49 do_async_gen_syndrome(struct dma_chan *chan, const unsigned char *scfs, int disks, struct dmaengine_unmap_data *unmap, enum dma_ctrl_flags dma_flags, struct async_submit_ctl *submit) do_async_gen_syndrome() argument
|
H A D | raid6test.c | 47 static void makedata(int disks) makedata() argument 51 for (i = 0; i < disks; i++) { makedata() 57 static char disk_type(int d, int disks) disk_type() argument 59 if (d == disks - 2) disk_type() 61 else if (d == disks - 1) disk_type() 68 static void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, struct page **ptrs) raid6_dual_recov() argument 78 if (failb == disks-1) { raid6_dual_recov() 79 if (faila == disks-2) { raid6_dual_recov() 82 tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit); raid6_dual_recov() 84 struct page *blocks[disks]; raid6_dual_recov() 92 for (i = disks; i-- ; ) { raid6_dual_recov() 103 tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit); raid6_dual_recov() 106 if (failb == disks-2) { raid6_dual_recov() 109 tx = async_raid6_datap_recov(disks, bytes, faila, ptrs, &submit); raid6_dual_recov() 113 tx = async_raid6_2data_recov(disks, bytes, faila, failb, ptrs, &submit); raid6_dual_recov() 118 tx = async_syndrome_val(ptrs, 0, disks, bytes, &result, spare, &submit); raid6_dual_recov() 122 pr("%s: timeout! (faila: %d failb: %d disks: %d)\n", raid6_dual_recov() 123 __func__, faila, failb, disks); raid6_dual_recov() 130 static int test_disks(int i, int j, int disks) test_disks() argument 140 raid6_dual_recov(disks, PAGE_SIZE, i, j, dataptrs); test_disks() 146 __func__, i, j, i, disk_type(i, disks), j, disk_type(j, disks), test_disks() 155 static int test(int disks, int *tests) test() argument 163 recovi = data[disks]; test() 164 recovj = data[disks+1]; test() 165 spare = data[disks+2]; test() 167 makedata(disks); test() 170 memset(page_address(data[disks-2]), 0xee, PAGE_SIZE); test() 171 memset(page_address(data[disks-1]), 0xee, PAGE_SIZE); test() 176 tx = async_gen_syndrome(dataptrs, 0, disks, PAGE_SIZE, &submit); test() 180 pr("error: initial gen_syndrome(%d) timed out\n", disks); test() 184 pr("testing the %d-disk case...\n", disks); test() 185 for (i = 0; i < disks-1; i++) test() 186 for (j = i+1; j < disks; j++) { test() 188 err += test_disks(i, j, disks); test()
|
H A D | async_raid6_recov.c | 160 __2data_recov_4(int disks, size_t bytes, int faila, int failb, __2data_recov_4() argument 172 p = blocks[disks-2]; __2data_recov_4() 173 q = blocks[disks-1]; __2data_recov_4() 199 __2data_recov_5(int disks, size_t bytes, int faila, int failb, __2data_recov_5() argument 214 for (i = 0; i < disks-2; i++) { __2data_recov_5() 224 p = blocks[disks-2]; __2data_recov_5() 225 q = blocks[disks-1]; __2data_recov_5() 273 __2data_recov_n(int disks, size_t bytes, int faila, int failb, __2data_recov_n() argument 285 p = blocks[disks-2]; __2data_recov_n() 286 q = blocks[disks-1]; __2data_recov_n() 294 blocks[disks-2] = dp; __2data_recov_n() 297 blocks[disks-1] = dq; __2data_recov_n() 300 tx = async_gen_syndrome(blocks, 0, disks, bytes, submit); __2data_recov_n() 305 blocks[disks-2] = p; __2data_recov_n() 306 blocks[disks-1] = q; __2data_recov_n() 342 * @disks: number of disks in the RAID-6 array 350 async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, async_raid6_2data_recov() argument 360 pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); async_raid6_2data_recov() 371 for (i = 0; i < disks; i++) async_raid6_2data_recov() 377 raid6_2data_recov(disks, bytes, faila, failb, ptrs); async_raid6_2data_recov() 385 for (i = 0; i < disks-2 && non_zero_srcs < 4; i++) async_raid6_2data_recov() 398 * both data disks missing. async_raid6_2data_recov() 400 return __2data_recov_4(disks, bytes, faila, failb, blocks, submit); async_raid6_2data_recov() 405 * array with 2 of 3 data disks missing. async_raid6_2data_recov() 407 return __2data_recov_5(disks, bytes, faila, failb, blocks, submit); async_raid6_2data_recov() 409 return __2data_recov_n(disks, bytes, faila, failb, blocks, submit); async_raid6_2data_recov() 416 * @disks: number of disks in the RAID-6 array 423 async_raid6_datap_recov(int disks, size_t bytes, int faila, async_raid6_datap_recov() argument 436 pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); async_raid6_datap_recov() 447 for (i = 0; i < disks; i++) async_raid6_datap_recov() 453 raid6_datap_recov(disks, bytes, faila, ptrs); async_raid6_datap_recov() 462 for (i = 0; i < disks-2; i++) { async_raid6_datap_recov() 474 p = blocks[disks-2]; async_raid6_datap_recov() 475 q = blocks[disks-1]; async_raid6_datap_recov() 482 blocks[disks-1] = dq; async_raid6_datap_recov() 500 tx = async_gen_syndrome(blocks, 0, disks, bytes, submit); async_raid6_datap_recov() 505 blocks[disks-1] = q; async_raid6_datap_recov()
|
/linux-4.1.27/drivers/md/ |
H A D | linear.h | 13 struct dev_info disks[0]; member in struct:linear_conf
|
H A D | linear.c | 46 if (sector < conf->disks[mid].end_sector) which_dev() 52 return conf->disks + lo; which_dev() 105 struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); linear_congested() 142 struct dev_info *disk = conf->disks + j; rdev_for_each() 181 conf->disks[0].end_sector = conf->disks[0].rdev->sectors; 184 conf->disks[i].end_sector = 185 conf->disks[i-1].end_sector + 186 conf->disks[i].rdev->sectors;
|
H A D | raid5.c | 194 if (sh->qd_idx == sh->disks - 1) raid6_d0() 205 /* When walking through the disks in a raid5, starting at raid6_d0, 206 * We need to map each disk to a 'slot', where the data disks are slot 536 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; init_stripe() 541 for (i = sh->disks; i--; ) { init_stripe() 597 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); calc_degraded() 599 rdev = rcu_dereference(conf->disks[i].replacement); calc_degraded() 623 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); calc_degraded() 625 rdev = rcu_dereference(conf->disks[i].replacement); calc_degraded() 725 BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded)); is_full_stripe_write() 726 return sh->overwrite_disks == (sh->disks - sh->raid_conf->max_degraded); is_full_stripe_write() 886 int i, disks = sh->disks; ops_run_io() local 891 for (i = disks; i--; ) { ops_run_io() 921 rrdev = rcu_dereference(conf->disks[i].replacement); ops_run_io() 923 rdev = rcu_dereference(conf->disks[i].rdev); ops_run_io() 1176 for (i = sh->disks; i--; ) { ops_complete_biofill() 1219 for (i = sh->disks; i--; ) { ops_run_biofill() 1279 return addr + sizeof(struct page *) * (sh->disks + 2); to_addr_conv() 1294 int disks = sh->disks; ops_run_compute5() local 1310 for (i = disks; i--; ) ops_run_compute5() 1327 * @srcs - (struct page *) array of size sh->disks 1339 int disks = sh->disks; set_syndrome_sources() local 1340 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); set_syndrome_sources() 1345 for (i = 0; i < disks; i++) set_syndrome_sources() 1361 i = raid6_next_disk(i, disks); set_syndrome_sources() 1370 int disks = sh->disks; ops_run_compute6_1() local 1410 for (i = disks; i-- ; ) { ops_run_compute6_1() 1428 int i, count, disks = sh->disks; ops_run_compute6_2() local 1429 int syndrome_disks = sh->ddf_layout ? disks : disks-2; ops_run_compute6_2() 1450 for (i = 0; i < disks ; i++) ops_run_compute6_2() 1463 i = raid6_next_disk(i, disks); ops_run_compute6_2() 1475 /* Q disk is one of the missing disks */ ops_run_compute6_2() 1495 for (i = disks; i-- ; ) { ops_run_compute6_2() 1545 int disks = sh->disks; ops_run_prexor5() local 1557 for (i = disks; i--; ) { ops_run_prexor5() 1594 int disks = sh->disks; ops_run_biodrain() local 1601 for (i = disks; i--; ) { ops_run_biodrain() 1657 int disks = sh->disks; ops_complete_reconstruct() local 1666 for (i = disks; i--; ) { ops_complete_reconstruct() 1672 for (i = disks; i--; ) { ops_complete_reconstruct() 1702 int disks = sh->disks; ops_run_reconstruct5() local 1716 for (i = 0; i < sh->disks; i++) { ops_run_reconstruct5() 1722 if (i >= sh->disks) { ops_run_reconstruct5() 1737 for (i = disks; i--; ) { ops_run_reconstruct5() 1744 for (i = disks; i--; ) { ops_run_reconstruct5() 1798 for (i = 0; i < sh->disks; i++) { ops_run_reconstruct6() 1804 if (i >= sh->disks) { ops_run_reconstruct6() 1858 int disks = sh->disks; ops_run_check_p() local 1875 for (i = disks; i--; ) { ops_run_check_p() 1914 int overlap_clear = 0, i, disks = sh->disks; raid_run_ops() local 1973 for (i = disks; i--; ) { raid_run_ops() 2050 * @num - total number of disks in the array 2133 * 3/ reallocate conf->disks to be suitable bigger. If this fails, resize_stripes() 2221 * conf->disks and the scribble region resize_stripes() 2226 ndisks[i] = conf->disks[i]; resize_stripes() 2227 kfree(conf->disks); resize_stripes() 2228 conf->disks = ndisks; resize_stripes() 2290 int disks = sh->disks, i; raid5_end_read_request() local 2296 for (i=0 ; i<disks; i++) raid5_end_read_request() 2303 if (i == disks) { raid5_end_read_request() 2313 rdev = conf->disks[i].replacement; raid5_end_read_request() 2315 rdev = conf->disks[i].rdev; raid5_end_read_request() 2413 int disks = sh->disks, i; raid5_end_write_request() local 2420 for (i = 0 ; i < disks; i++) { raid5_end_write_request() 2422 rdev = conf->disks[i].rdev; raid5_end_write_request() 2426 rdev = conf->disks[i].replacement; raid5_end_write_request() 2434 rdev = conf->disks[i].rdev; raid5_end_write_request() 2441 if (i == disks) { raid5_end_write_request() 2741 int raid_disks = sh->disks; compute_blocknr() 2862 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks; schedule_reconstruction() local 2868 for (i = disks; i--; ) { schedule_reconstruction() 2894 if (s->locked + conf->max_degraded == disks) schedule_reconstruction() 2904 for (i = disks; i--; ) { schedule_reconstruction() 3063 int disks = previous ? conf->previous_raid_disks : conf->raid_disks; stripe_set_idx() local 3066 stripe * (disks - conf->max_degraded) stripe_set_idx() 3074 struct stripe_head_state *s, int disks, handle_failed_stripe() 3079 for (i = disks; i--; ) { handle_failed_stripe() 3086 rdev = rcu_dereference(conf->disks[i].rdev); handle_failed_stripe() 3212 struct md_rdev *rdev = conf->disks[i].rdev; handle_failed_sync() 3219 rdev = conf->disks[i].replacement; handle_failed_sync() 3239 rdev = sh->raid_conf->disks[disk_idx].replacement; want_replace() 3258 int disk_idx, int disks) need_this_block() 3348 int disk_idx, int disks) fetch_block() 3353 if (need_this_block(sh, s, disk_idx, disks)) { fetch_block() 3360 if ((s->uptodate == disks - 1) && fetch_block() 3382 } else if (s->uptodate == disks-2 && s->failed >= 2) { fetch_block() 3387 for (other = disks; other--; ) { fetch_block() 3424 int disks) handle_stripe_fill() 3434 for (i = disks; i--; ) handle_stripe_fill() 3435 if (fetch_block(sh, s, i, disks)) handle_stripe_fill() 3448 struct stripe_head *sh, int disks, struct bio **return_bi) handle_stripe_clean_event() 3456 for (i = disks; i--; ) handle_stripe_clean_event() 3552 int disks) handle_stripe_dirtying() 3574 } else for (i = disks; i--; ) { handle_stripe_dirtying() 3584 rmw += 2*disks; /* cannot read it */ handle_stripe_dirtying() 3595 rcw += 2*disks; handle_stripe_dirtying() 3607 for (i = disks; i--; ) { handle_stripe_dirtying() 3632 for (i = disks; i--; ) { handle_stripe_dirtying() 3661 if (rcw > disks && rmw > disks && handle_stripe_dirtying() 3682 struct stripe_head_state *s, int disks) handle_parity_checks5() 3693 BUG_ON(s->uptodate != disks); handle_parity_checks5() 3713 BUG_ON(s->uptodate != disks); handle_parity_checks5() 3771 int disks) handle_parity_checks6() 3838 BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */ handle_parity_checks6() 3943 for (i = 0; i < sh->disks; i++) handle_stripe_expansion() 4007 int disks = sh->disks; analyse_stripe() local 4021 for (i=disks; i--; ) { analyse_stripe() 4065 rdev = rcu_dereference(conf->disks[i].replacement); analyse_stripe() 4074 rdev = rcu_dereference(conf->disks[i].rdev); analyse_stripe() 4122 conf->disks[i].rdev); analyse_stripe() 4135 conf->disks[i].rdev); analyse_stripe() 4144 conf->disks[i].replacement); analyse_stripe() 4255 for (i = 0; i < sh->disks; i++) { break_stripe_batch_list() 4272 for (i = 0; i < head_sh->disks; i++) break_stripe_batch_list() 4288 int disks = sh->disks; handle_stripe() local 4361 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); handle_stripe() 4384 for (i = disks; i--; ) { handle_stripe() 4426 handle_stripe_clean_event(conf, sh, disks, &s.return_bi); handle_stripe() 4434 || (s.syncing && (s.uptodate + s.compute < disks)) handle_stripe() 4437 handle_stripe_fill(sh, &s, disks); handle_stripe() 4446 handle_stripe_dirtying(conf, sh, &s, disks); handle_stripe() 4458 handle_parity_checks6(conf, sh, &s, disks); handle_stripe() 4460 handle_parity_checks5(conf, sh, &s, disks); handle_stripe() 4542 sh->disks = conf->raid_disks; handle_stripe() 4572 for (i = disks; i--; ) { handle_stripe() 4577 rdev = conf->disks[i].rdev; handle_stripe() 4584 rdev = conf->disks[i].rdev; handle_stripe() 4590 rdev = conf->disks[i].replacement; handle_stripe() 4593 rdev = conf->disks[i].rdev; handle_stripe() 4843 rdev = rcu_dereference(conf->disks[dd_idx].replacement); chunk_aligned_read() 4846 rdev = rcu_dereference(conf->disks[dd_idx].rdev); chunk_aligned_read() 5478 for (j=sh->disks; j--;) { reshape_request() 5642 struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev); sync_request() 6320 kfree(conf->disks); free_conf() 6496 conf->disks = kzalloc(max_disks * sizeof(struct disk_info), setup_conf() 6498 if (!conf->disks) setup_conf() 6533 disk = conf->disks + raid_disk; rdev_for_each() 6776 rdev = conf->disks[i].rdev; 6777 if (!rdev && conf->disks[i].replacement) { 6779 rdev = conf->disks[i].replacement; 6780 conf->disks[i].replacement = NULL; 6782 conf->disks[i].rdev = rdev; 6786 if (conf->disks[i].replacement && 6940 * depending on which disks are used to calculate rdev_for_each() 7000 conf->disks[i].rdev && status() 7001 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); status() 7021 tmp = conf->disks + i; print_raid5_conf() 7038 tmp = conf->disks + i; raid5_spare_active() 7078 struct disk_info *p = conf->disks + number; raid5_remove_disk() 7159 conf->disks[rdev->saved_raid_disk].rdev == NULL) raid5_add_disk() 7163 p = conf->disks + disk; raid5_add_disk() 7175 p = conf->disks + disk; raid5_add_disk() 7323 "before number of disks\n", mdname(mddev)); 7478 struct md_rdev *rdev = conf->disks[d].rdev; raid5_finish_reshape() 7481 rdev = conf->disks[d].replacement; raid5_finish_reshape() 3073 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, struct stripe_head_state *s, int disks, struct bio **return_bi) handle_failed_stripe() argument 3257 need_this_block(struct stripe_head *sh, struct stripe_head_state *s, int disk_idx, int disks) need_this_block() argument 3347 fetch_block(struct stripe_head *sh, struct stripe_head_state *s, int disk_idx, int disks) fetch_block() argument 3422 handle_stripe_fill(struct stripe_head *sh, struct stripe_head_state *s, int disks) handle_stripe_fill() argument 3447 handle_stripe_clean_event(struct r5conf *conf, struct stripe_head *sh, int disks, struct bio **return_bi) handle_stripe_clean_event() argument 3549 handle_stripe_dirtying(struct r5conf *conf, struct stripe_head *sh, struct stripe_head_state *s, int disks) handle_stripe_dirtying() argument 3681 handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, struct stripe_head_state *s, int disks) handle_parity_checks5() argument 3769 handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, struct stripe_head_state *s, int disks) handle_parity_checks6() argument
|
H A D | raid0.c | 214 printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - " 567 printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n", raid0_takeover_raid45() 603 * - disks number must be even raid0_takeover_raid10() 682 * raid4 - if all data disks are active. raid0_takeover() 684 * raid10 - assuming we have all necessary active disks raid0_takeover()
|
H A D | md.h | 217 struct list_head disks; member in struct:mddev 318 /* resync even though the same disks are shared among md-devices */ 591 * iterates through the 'same array disks' ringlist 594 list_for_each_entry(rdev, &((mddev)->disks), same_set) 597 list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set) 600 list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
|
H A D | raid5.h | 212 int disks; /* disks in stripe */ member in struct:stripe_head 213 int overwrite_disks; /* total overwrite disks in stripe, 532 int pool_size; /* number of disks in stripeheads in pool */ 534 struct disk_info *disks; member in struct:r5conf
|
H A D | md.c | 484 if (!mddev->raid_disks && list_empty(&mddev->disks) && mddev_put() 512 INIT_LIST_HEAD(&mddev->disks); mddev_init() 1138 if (sb->disks[rdev->desc_nr].state & ( super_90_validate() 1157 desc = sb->disks + rdev->desc_nr; super_90_validate() 1193 * 1/ zero out disks super_90_sync() 1195 * 3/ any empty disks < next_spare become removed super_90_sync() 1197 * disks[0] gets initialised to REMOVED because super_90_sync() 1257 sb->disks[0].state = (1<<MD_DISK_REMOVED); rdev_for_each() 1278 d = &sb->disks[rdev2->desc_nr]; rdev_for_each() 1305 mdp_disk_t *d = &sb->disks[i]; 1320 sb->this_disk = sb->disks[rdev->desc_nr]; 1959 if (list_empty(&mddev->disks)) md_integrity_register() 1964 /* skip spares and non-functional disks */ rdev_for_each() 2089 list_add_rcu(&rdev->same_set, &mddev->disks); bind_rdev_to_array() 2189 while (!list_empty(&mddev->disks)) { export_array() 2190 rdev = list_first_entry(&mddev->disks, struct md_rdev, export_array() 2391 * then added disks for geometry changes, add_bound_rdev() 3794 if (list_empty(&mddev->disks) && array_state_show() 3996 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { new_dev_store() 3998 = list_entry(mddev->disks.next, new_dev_store() 4139 else if (!list_empty(&mddev->disks)) metadata_store() 5009 if (list_empty(&mddev->disks)) md_run() 5246 if (list_empty(&mddev->disks)) restart_array() 5556 if (list_empty(&mddev->disks)) autorun_array() 5575 * lets try to run arrays based on all disks that have arrived 5578 * the method: pick the first pending disk, collect all disks with 5581 * update time (freshest comes first), kick out 'old' disks and 5641 || !list_empty(&mddev->disks)) { autorun_devices() 5835 if (!list_empty(&mddev->disks)) { add_new_disk() 5837 = list_entry(mddev->disks.next, add_new_disk() 6327 /* change the number of raid disks */ update_raid_disks() 6678 if (!list_empty(&mddev->disks)) { md_ioctl() 6680 "md: array %s already has disks!\n", md_ioctl() 7253 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { md_seq_show() 7284 if (!list_empty(&mddev->disks)) {
|
H A D | multipath.c | 36 int i, disks = conf->raid_disks; multipath_map() local 44 for (i = 0; i < disks; i++) { multipath_map()
|
H A D | raid1.c | 466 * disks. Setting the Returned bit ensures that this raid1_end_write_request() 674 * If all disks are rotational, choose the closest disk. If any disk is read_balance() 677 * mixed ratation/non-rotational disks depending on workload. read_balance() 1085 int i, disks; make_request() local 1257 disks = conf->raid_disks * 2; make_request() 1263 for (i = 0; i < disks; i++) { make_request() 1367 for (i = 0; i < disks; i++) { make_request() 1482 * else if it is the last working disks, ignore the error, let the error() 1567 * Find all failed disks within the RAID1 configuration raid1_spare_active() 2048 int disks = conf->raid_disks * 2; sync_request_write() local 2065 for (i = 0; i < disks ; i++) { sync_request_write()
|
H A D | raid10.c | 1611 int disks, ncopies; _enough() local 1613 disks = conf->prev.raid_disks; _enough() 1616 disks = conf->geo.raid_disks; _enough() 1631 this = (this+1) % disks; _enough() 1635 first = (first + ncopies) % disks; _enough() 1662 * else if it is the last working disks, ignore the error, let the error() 1734 * Find all non-in_sync disks within the RAID10 configuration raid10_spare_active() 3470 int layout, chunk, disks; setup_geo() local 3475 disks = mddev->raid_disks - mddev->delta_disks; setup_geo() 3480 disks = mddev->raid_disks; setup_geo() 3487 disks = mddev->raid_disks + mddev->delta_disks; setup_geo() 3498 geo->raid_disks = disks; setup_geo() 3502 geo->far_set_size = (layout & (1<<17)) ? disks / fc : disks; setup_geo() 4108 printk(KERN_ERR "md/raid10:%s: array size must be reduce before number of disks\n",
|
H A D | bitmap.c | 173 /* Iterate the disks of an mddev, using rcu to protect access to the next_active_rdev() 189 rdev = list_entry(&mddev->disks, struct md_rdev, same_set); next_active_rdev() 194 list_for_each_entry_continue_rcu(rdev, &mddev->disks, same_set) { next_active_rdev()
|
H A D | dm-raid.c | 281 list_add(&rs->dev[i].rdev.same_set, &rs->md.disks); dev_parms()
|
/linux-4.1.27/drivers/staging/lustre/lustre/llite/ |
H A D | lloop.c | 163 static struct gendisk **disks; variable in typeref:struct:gendisk 531 set_capacity(disks[lo->lo_number], size); loop_set_fd() 573 set_capacity(disks[lo->lo_number], 0); loop_clr_fd() 674 if (disks == NULL) { lloop_ioctl() 798 disks = kcalloc(max_loop, sizeof(*disks), GFP_KERNEL); lloop_init() 799 if (!disks) lloop_init() 803 disks[i] = alloc_disk(1); lloop_init() 804 if (!disks[i]) lloop_init() 812 struct gendisk *disk = disks[i]; lloop_init() 833 add_disk(disks[i]); lloop_init() 842 put_disk(disks[i]); lloop_init() 843 OBD_FREE(disks, max_loop * sizeof(*disks)); lloop_init() 859 del_gendisk(disks[i]); lloop_exit() 861 put_disk(disks[i]); lloop_exit() 866 OBD_FREE(disks, max_loop * sizeof(*disks)); lloop_exit()
|
/linux-4.1.27/block/partitions/ |
H A D | atari.h | 33 u16 checksum; /* checksum for bootable disks */
|
H A D | sgi.c | 64 /* All SGI disk labels have 16 partitions, disks under Linux only sgi_partition()
|
H A D | msdos.c | 12 * and LILO, as well as loadlin and bootln. Note that disks other than 17 * Check partition table on IDE disks for common CHS translations 462 * Note order! (some AIX disks, e.g. unbootable kind, msdos_partition()
|
H A D | ldm.c | 369 break; /* FIXME ignore for now, 3rd PH can fail on odd-sized disks */ ldm_validate_privheads() 549 * dynamic disks). 596 * The LDM Database contains a list of all partitions on all dynamic disks.
|
/linux-4.1.27/include/uapi/linux/raid/ |
H A D | md_u.h | 96 int active_disks; /* 2 Number of currently active disks */ 97 int working_disks; /* 3 Number of working disks */ 98 int failed_disks; /* 4 Number of failed disks */ 99 int spare_disks; /* 5 Number of spare disks */
|
H A D | md_p.h | 35 * 128 - 511 12 32-words descriptors of the disks in the raid set. 137 __u32 nr_disks; /* 9 total disks in the raid set */ 138 __u32 raid_disks; /* 10 disks in a fully functional raid set */ 151 __u32 active_disks; /* 2 Number of currently active disks */ 152 __u32 working_disks; /* 3 Number of working disks */ 153 __u32 failed_disks; /* 4 Number of failed disks */ 154 __u32 spare_disks; /* 5 Number of spare disks */ 190 mdp_disk_t disks[MD_SB_DISKS]; member in struct:mdp_superblock_s
|
/linux-4.1.27/arch/x86/xen/ |
H A D | platform-pci-unplug.c | 172 "been compiled for this kernel: unplug emulated disks.\n" xen_unplug_emulated_devices() 200 else if (!strncmp(p, "ide-disks", l)) parse_xen_emul_unplug() 202 else if (!strncmp(p, "aux-ide-disks", l)) parse_xen_emul_unplug()
|
/linux-4.1.27/include/linux/raid/ |
H A D | pq.h | 143 extern void (*raid6_2data_recov)(int disks, size_t bytes, int faila, int failb, 145 extern void (*raid6_datap_recov)(int disks, size_t bytes, int faila, 147 void raid6_dual_recov(int disks, size_t bytes, int faila, int failb,
|
/linux-4.1.27/drivers/usb/storage/ |
H A D | uas-detect.h | 93 * (*) ASM1051 chips do work with UAS with some disks (with the uas_use_uas_driver() 94 * US_FL_NO_REPORT_OPCODES quirk), but are broken with other disks uas_use_uas_driver()
|
H A D | scsiglue.c | 174 /* Many disks only accept MODE SENSE transfer lengths of slave_configure() 201 /* Some disks return the total number of blocks in response slave_configure() 207 /* A few disks have two indistinguishable version, one of slave_configure() 238 /* USB disks should allow restart. Some drives spin down slave_configure() 244 * impact is negligible we set this flag for all USB disks */ slave_configure()
|
H A D | isd200.c | 268 #define DIRECT_ACCESS_DEVICE 0x00 /* disks */ 1145 /* Standard IDE interface only supports disks */ isd200_get_inquiry_data()
|
H A D | usb.c | 860 * synchronized, disks spun down, etc. quiesce_and_remove_host()
|
/linux-4.1.27/fs/isofs/ |
H A D | util.c | 42 * The timezone offset is unreliable on some disks, iso_date()
|
H A D | isofs.h | 44 unsigned int s_cruft:1; /* Broken disks with high byte of length
|
H A D | inode.c | 481 * Multisession is legal only with XA disks.
|
/linux-4.1.27/drivers/block/ |
H A D | cciss_scsi.h | 26 #include <scsi/scsicam.h> /* possibly irrelevant, since we don't show disks */
|
H A D | floppy.c | 86 /* 1995/4/24 -- Dan Fandrich -- added support for Commodore 1581 3.5" disks 411 static struct gendisk *disks[N_DRIVE]; variable in typeref:struct:gendisk 431 * The LSB (bit 2) is flipped. For most disks, the first sector 433 * disks (such as Ensoniq EPS 16plus) it is 0 (represented as 0x01<<2). 434 * For Amstrad CPC disks it is 0xC1 (represented as 0xC0<<2). 2564 /* 2M disks have phantom sectors on the first track */ make_raw_rw_request() 2803 q = disks[fdc_queue]->queue; set_next_request() 3398 * We do this in order to provide a means to eject floppy disks before fd_locked_ioctl() 3699 set_capacity(disks[drive], floppy_sizes[new_dev]); floppy_open() 4168 return get_disk(disks[drive]); floppy_find() 4190 disks[drive] = alloc_disk(1); do_floppy_init() 4191 if (!disks[drive]) { do_floppy_init() 4196 disks[drive]->queue = blk_init_queue(do_fd_request, &floppy_lock); do_floppy_init() 4197 if (!disks[drive]->queue) { do_floppy_init() 4202 blk_queue_max_hw_sectors(disks[drive]->queue, 64); do_floppy_init() 4203 disks[drive]->major = FLOPPY_MAJOR; do_floppy_init() 4204 disks[drive]->first_minor = TOMINOR(drive); do_floppy_init() 4205 disks[drive]->fops = &floppy_fops; do_floppy_init() 4206 sprintf(disks[drive]->disk_name, "fd%d", drive); do_floppy_init() 4341 disks[drive]->private_data = (void *)(long)drive; do_floppy_init() 4342 disks[drive]->flags |= GENHD_FL_REMOVABLE; do_floppy_init() 4343 disks[drive]->driverfs_dev = &floppy_device[drive].dev; do_floppy_init() 4344 add_disk(disks[drive]); do_floppy_init() 4352 del_gendisk(disks[drive]); do_floppy_init() 4367 if (!disks[drive]) do_floppy_init() 4369 if (disks[drive]->queue) { do_floppy_init() 4371 blk_cleanup_queue(disks[drive]->queue); do_floppy_init() 4372 disks[drive]->queue = NULL; do_floppy_init() 4374 put_disk(disks[drive]); do_floppy_init() 4596 del_gendisk(disks[drive]); floppy_module_exit() 4599 blk_cleanup_queue(disks[drive]->queue); floppy_module_exit() 4602 * These disks have not called add_disk(). Don't put down floppy_module_exit() 4607 disks[drive]->queue = NULL; floppy_module_exit() 4609 put_disk(disks[drive]); floppy_module_exit()
|
H A D | swim3.c | 16 * handle GCR disks 45 static struct gendisk *disks[MAX_FLOPPIES]; variable in typeref:struct:gendisk 327 fs->cur_req = blk_fetch_request(disks[fs->index]->queue); start_request() 1218 disk = disks[index] = alloc_disk(1); swim3_attach()
|
H A D | ataflop.c | 18 * - Support for 5 1/4'' disks 50 * - increase gap size at start of track for HD/ED disks 1316 * as long as no write protected disks are used. TOS solves this 1324 * disks. But at least this is better than working with incorrect data
|
H A D | ps3disk.c | 423 dev_err(&dev->sbd.core, "%s:%u: Too many disks\n", __func__, ps3disk_probe()
|
H A D | DAC960.c | 99 if (!get_capacity(p->disks[drive_nr])) DAC960_open() 2532 struct gendisk *disk = Controller->disks[n]; DAC960_RegisterBlockDevice() 2571 del_gendisk(Controller->disks[disk]); DAC960_UnregisterBlockDevice() 2591 set_capacity(Controller->disks[disk], disk_size(Controller, disk)); DAC960_ComputeGenericDiskInfo() 2697 for (i = 0; (i < DAC960_MaxLogicalDrives) && Controller->disks[i]; i++) DAC960_DetectCleanup() 2698 put_disk(Controller->disks[i]); DAC960_DetectCleanup() 2773 Controller->disks[i] = alloc_disk(1<<DAC960_MaxPartitionsBits); DAC960_DetectController() 2774 if (!Controller->disks[i]) DAC960_DetectController() 2776 Controller->disks[i]->private_data = (void *)((long)i); DAC960_DetectController() 3177 set_capacity(Controller->disks[disk], disk_size(Controller, disk)); DAC960_Probe() 3178 add_disk(Controller->disks[disk]); DAC960_Probe()
|
H A D | amiflop.c | 16 * - currently only 9 and 18 sector disks 19 * disks the same time 50 * - Fixed Bug accessing multiple disks
|
H A D | cpqarray.c | 329 * To write all data in the battery backed cache to disks cpqarray_remove_one() 398 * Find disks and fill in structs cpqarray_register_ctlr()
|
H A D | cciss_scsi.c | 29 physical nor logical disks are presented through the scsi layer. */
|
H A D | xen-blkfront.c | 822 "emulated IDE disks,\n\t choose an xvd device name" xlvbd_alloc_gendisk()
|
H A D | DAC960.h | 2318 struct gendisk *disks[DAC960_MaxLogicalDrives]; member in struct:DAC960_Controller
|
/linux-4.1.27/drivers/scsi/ |
H A D | scsicam.c | 223 * will support rather large disks before the number of heads 226 * of the disk while allowing for very large disks to be
|
H A D | sd.h | 12 * Time out in seconds for disks and Magneto-opticals (which are slower).
|
H A D | dtc.c | 299 * using hard disks on a trantor should verify that this mapping corresponds
|
H A D | t128.c | 284 * using hard disks on a trantor should verify that this mapping corresponds
|
H A D | sd.c | 13 * - Jirka Hanika <geo@ff.cuni.cz> support more scsi disks using 24 * Support 32k/1M disks. 545 * Inside a major, we have 16k disks, however mapped non- 546 * contiguously. The first 16 disks are for major0, the next 2799 * @prefix: name prefix - ie. "sd" for SCSI disks 2904 * for each scsi device (not just disks) present.
|
H A D | pas16.c | 459 * using hard disks on a trantor should verify that this mapping corresponds
|
H A D | hpsa.h | 67 * disks. We need these pointers for counting i/o's out to physical
|
H A D | g_NCR5380.c | 492 * using hard disks on a trantor should verify that this mapping
|
H A D | hpsa_cmd.h | 204 * valid for data disks only */
|
H A D | 3w-9xxx.h | 246 {0x0256, "Unable to write configuration to all disks during CreateUnit"},
|
H A D | atari_NCR5380.c | 169 * is done, since some BROKEN disks fail to issue an explicit SAVE POINTERS. 1499 * IO while SEL is true. But again, there are some disks out the in the NCR5380_select() 2318 * Unfortunately, some disks violate the SCSI spec and NCR5380_information_transfer()
|
H A D | wd7000.c | 133 * Now, driver can handle hard disks with capacity >1GB. 1606 * for disks >1GB do some guessing wd7000_biosparam()
|
H A D | scsi_debug.c | 4 * Simulate a host adapter with 2 disks attached. Do a lot of checking 12 * SAS disks. 18 * dpg: work for devfs large number of disks [20010809] 2030 /* set DPOFUA bit for disks */ resp_mode_sense()
|
H A D | gdth.h | 565 u32 ld_dcnt; /* number of disks */
|
H A D | ultrastor.c | 21 * mscp.command_link_id). (Does not work with many disks,
|
H A D | NCR5380.c | 182 * is done, since some BROKEN disks fail to issue an explicit SAVE POINTERS. 2261 * Unfortunately, some disks violate the SCSI spec and
|
H A D | aha152x.c | 89 * - support for extended translation for >1GB disks 1248 /* for disks >1GB do some guessing */ aha152x_biosparam()
|
H A D | scsi_scan.c | 1102 * For disks, this implies that there is no scsi_probe_and_add_lun()
|
H A D | fdomain.c | 1645 printk("scsi: <fdomain> fdomain_16x0_biosparam: too many disks"); fdomain_16x0_biosparam()
|
H A D | ipr.c | 41 * - Supports attachment of non-RAID disks, tape, and optical devices 47 * by adding disks 542 "9061: One or more disks are missing from an array"}, 544 "9062: One or more disks are missing from an array"},
|
H A D | megaraid.c | 4418 * the physical drives would appear before the logical disks. megaraid_probe_one() 4566 /* Flush disks cache */ __megaraid_shutdown()
|
H A D | hpsa.c | 2845 * 2. Find a matching ioaccel2 handle from list of physical disks. 3108 /* We might see up to the maximum number of logical and physical disks hpsa_update_scsi_devices() 7077 * To write all data in the battery backed cache to disks hpsa_shutdown()
|
H A D | eata.c | 461 * disks/arrays. It could also be useful when switching between Adaptec boards
|
H A D | in2000.c | 97 * If your IN2000 card has more than 2 disks on its bus, you
|
H A D | gdth.c | 2347 /* you can here set all disks to removable, if you want to do gdth_internal_cache_cmd()
|
H A D | BusLogic.c | 3375 order for disks equal to or larger than 1 GB to be addressable by the BIOS
|
/linux-4.1.27/arch/ia64/hp/sim/ |
H A D | simscsi.c | 27 #define DEFAULT_SIMSCSI_ROOT "/var/ski-disks/sd" 74 /* base name for default disks */
|
/linux-4.1.27/arch/x86/boot/ |
H A D | edd.c | 164 * Scan the BIOS-supported hard disks and query EDD query_edd()
|
/linux-4.1.27/drivers/ide/ |
H A D | qd65xx.c | 75 * bit 1 : 1 = only disks on primary port 76 * 0 = disks & ATAPI devices on primary port
|
H A D | ali14xx.c | 29 * I've since upgraded to two disks and a CD-ROM, with no trouble, and
|
H A D | ide-disk.c | 287 * Some disks report total number of sectors instead of 668 * Removable disks (eg. SYQUEST); ignore 'WD' drives ide_disk_setup()
|
H A D | cmd64x.c | 390 * then it only works with Quantum disks due to some cmd64x_init_one()
|
H A D | ide-gd.c | 126 firmware doesn't handle disks in standby mode properly.
|
H A D | ide-eh.c | 423 * state when the disks are reset this way. At least, the Winbond
|
H A D | ide-floppy.c | 425 /* Normal Zip/LS-120 disks */ ide_floppy_get_capacity()
|
H A D | ide-probe.c | 619 /* Ignore disks that we will not probe for later. */ ide_port_for_each_dev()
|
/linux-4.1.27/drivers/message/fusion/lsi/ |
H A D | mpi_log_sas.h | 257 /* Phys Disk failed, too many phys disks */ 291 /* Compatibility Error : membership count error, too many or too few disks for volume type */
|
/linux-4.1.27/drivers/block/aoe/ |
H A D | aoe.h | 6 /* set AOE_PARTITIONS to 1 to use whole-disks only
|
/linux-4.1.27/drivers/ata/ |
H A D | ahci_sunxi.c | 204 * requested, otherwise directly attached disks do not work. ahci_sunxi_probe()
|
H A D | pata_serverworks.c | 148 * specific rules. OSB4 requires no UDMA for disks due to a FIFO
|
H A D | pdc_adma.c | 27 * Supports ATA disks in single-packet ADMA mode.
|
H A D | sata_svw.c | 304 can also appear on an system with very fast disks, where k2_bmdma_start_mmio()
|
H A D | libata-pmp.c | 456 /* SRST breaks detection and disks get misclassified ata_for_each_link()
|
H A D | ata_piix.c | 1598 /* On Hyper-V hypervisors the disks are exposed on piix_ignore_devices_quirk()
|
H A D | libata-scsi.c | 2206 * This is always one physical block, but for disks with a smaller ata_scsiop_inq_b0() 2380 * access devices (e.g. disks) only. There should be no block 3239 * Assume this is invoked for direct access devices (e.g. disks) only.
|
H A D | libata-eh.c | 2998 * While disks spinup behind PMP, some controllers fail sending SRST.
|
H A D | libata-core.c | 4502 /* On some disks, this command causes spin-up, so we need longer timeout */ ata_dev_set_xfermode()
|
/linux-4.1.27/arch/arm/mach-orion5x/ |
H A D | net2big-setup.c | 174 pr_info("net2big: power up SATA hard disks\n"); net2big_sata_power_init()
|
/linux-4.1.27/block/ |
H A D | blk-integrity.c | 136 * blk_integrity_compare - Compare integrity profile of two disks
|
H A D | scsi_ioctl.c | 712 * not have partitions, so we get here only for disks. scsi_verify_blk_ioctl()
|
/linux-4.1.27/fs/jfs/ |
H A D | jfs_filsys.h | 48 /* mount time flag to enable TRIM to ssd disks */
|
/linux-4.1.27/include/xen/interface/io/ |
H A D | blkif.h | 232 /* Xen-defined major numbers for virtual disks, they look strangely
|
/linux-4.1.27/fs/btrfs/ |
H A D | reada.c | 49 * will have its on read pointer and all disks will by utilized in parallel. 50 * Also will no two disks read both sides of a mirror simultaneously, as this 51 * would waste seeking capacity. Instead both disks will read different parts
|
H A D | scrub.c | 1148 * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one scrub_handle_errored_block() 1788 * doubled the write performance on spinning disks when measured scrub_wr_submit()
|
H A D | volumes.c | 5029 /* For writes to RAID[56], allow a full stripeset across all disks. __btrfs_map_block()
|
/linux-4.1.27/drivers/scsi/libsas/ |
H A D | sas_init.c | 425 * flush out disks that did not return sas_resume_ha()
|
H A D | sas_ata.c | 757 * disks are in one of three states: sas_ata_strategy_handler()
|
H A D | sas_expander.c | 1816 * when the first level expander is self-configuring, hotplug the disks in
|
/linux-4.1.27/drivers/scsi/megaraid/ |
H A D | megaraid_mbox.c | 157 "Set to expose unconfigured disks to kernel (default=0)"); 2984 * attached to the controller. We will expose all the disks megaraid_mbox_product_info() 3260 * megaraid_mbox_flush_cache - flush adapter and disks cache 3263 * Flush adapter cache followed by disks cache. 3285 con_log(CL_ANN, ("megaraid: flush disks cache failed\n")); megaraid_mbox_flush_cache()
|
/linux-4.1.27/drivers/scsi/isci/ |
H A D | host.h | 85 * Timer to control when the directed attached disks can consume power.
|
/linux-4.1.27/drivers/s390/char/ |
H A D | zcore.c | 3 * dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same
|
/linux-4.1.27/include/linux/ |
H A D | genhd.h | 172 * disks that can't be partitioned. */
|
H A D | device.h | 349 * at the class level, they are all simply disks. Classes allow user space 496 * like "partitions" and "disks", "mouse" and "event".
|
H A D | ata.h | 590 * some Maxtor disks have bit 13 defined incorrectly ata_id_flush_ext_enabled()
|
/linux-4.1.27/drivers/firewire/ |
H A D | sbp2.c | 98 * Some disks need this to spin down or to resume properly. 207 * a fixed length only. 16 bytes is enough for disks larger than 2 TB.
|
/linux-4.1.27/drivers/message/fusion/ |
H A D | mptscsih.c | 2187 * Check inactive list for matching phys disks mptscsih_is_phys_disk() 2264 * Check inactive list for matching phys disks mptscsih_raid_id_to_num() 3006 * mptscsih_synchronize_cache - Send SYNCHRONIZE_CACHE to all disks.
|
H A D | mptbase.h | 527 IOCPage3_t *pIocPg3; /* table of physical disks */
|
H A D | mptbase.c | 5941 * mpt_findImVolumes - Identify IDs of hidden disks and RAID Volumes
|
/linux-4.1.27/drivers/staging/media/lirc/ |
H A D | lirc_serial.c | 207 * buffer. If you have a slow computer or non-busmastering IDE disks,
|
/linux-4.1.27/drivers/block/paride/ |
H A D | pd.c | 58 <slv> IDE disks can be jumpered to master or slave.
|
/linux-4.1.27/drivers/block/zram/ |
H A D | zram_drv.c | 1196 /* zram devices sort of resembles non-rotational disks */ create_device()
|
/linux-4.1.27/include/uapi/linux/ |
H A D | cdrom.h | 76 address of multi session disks
|
/linux-4.1.27/drivers/scsi/aacraid/ |
H A D | linit.c | 298 * disks equal to or larger than 1 GB to be addressable by the BIOS
|
/linux-4.1.27/drivers/scsi/aic7xxx/ |
H A D | aic7xxx.h | 784 #define CFWBCACHEENB 0x4000 /* Enable W-Behind Cache on disks */
|
/linux-4.1.27/fs/ext3/ |
H A D | ext3.h | 487 __le32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/
|
/linux-4.1.27/drivers/staging/unisys/visorchipset/ |
H A D | visorchipset_main.c | 1391 * and disks mounted for the partition chipset_ready()
|
/linux-4.1.27/drivers/block/drbd/ |
H A D | drbd_req.c | 1607 * Note that for 32bit jiffies and very stable connections/disks, request_timer_fn()
|
/linux-4.1.27/drivers/base/ |
H A D | core.c | 739 /* block disks show up in /sys/block */ get_device_parent()
|
/linux-4.1.27/fs/udf/ |
H A D | super.c | 1192 * VAT file entry is in the last recorded block. Some broken disks have udf_find_vat_block()
|
/linux-4.1.27/drivers/scsi/ibmvscsi/ |
H A D | ibmvscsi.c | 81 * OS/400 will use 3 busses (disks, CDs, tapes, I think.)
|
/linux-4.1.27/fs/jbd/ |
H A D | transaction.c | 1403 * sleep for the delta and commit. This greatly helps super fast disks journal_stop()
|
/linux-4.1.27/fs/jbd2/ |
H A D | transaction.c | 1586 * greatly helps super fast disks that would see slowdowns as jbd2_journal_stop()
|
/linux-4.1.27/fs/reiserfs/ |
H A D | reiserfs.h | 668 * reiserfs disks from 3.5.19 or earlier. 99% of the time, this
|
/linux-4.1.27/fs/xfs/libxfs/ |
H A D | xfs_btree.c | 3964 * though for slow disks this is unlikely to make much difference to performance
|
/linux-4.1.27/fs/ext4/ |
H A D | ext4.h | 1186 __le32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/
|
/linux-4.1.27/drivers/scsi/mpt2sas/ |
H A D | mpt2sas_scsih.c | 1822 * WARPDRIVE:If number of physical disks in a volume exceeds the max pds _scsih_init_warpdrive_properties()
|