Lines Matching refs:q
280 struct request_queue *q; in device_area_is_invalid() local
294 q = bdev_get_queue(bdev); in device_area_is_invalid()
295 if (!q || !q->make_request_fn) { in device_area_is_invalid()
436 struct request_queue *q = bdev_get_queue(bdev); in dm_set_device_limits() local
439 if (unlikely(!q)) { in dm_set_device_limits()
450 q->limits.physical_block_size, in dm_set_device_limits()
451 q->limits.logical_block_size, in dm_set_device_limits()
452 q->limits.alignment_offset, in dm_set_device_limits()
893 struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev); in dm_table_set_type() local
895 if (!blk_queue_stackable(q)) { in dm_table_set_type()
901 if (q->mq_ops) in dm_table_set_type()
1328 struct request_queue *q = bdev_get_queue(dev->bdev); in device_flush_capable() local
1330 return q && (q->flush_flags & flush); in device_flush_capable()
1380 struct request_queue *q = bdev_get_queue(dev->bdev); in device_is_nonrot() local
1382 return q && blk_queue_nonrot(q); in device_is_nonrot()
1388 struct request_queue *q = bdev_get_queue(dev->bdev); in device_is_not_random() local
1390 return q && !blk_queue_add_random(q); in device_is_not_random()
1396 struct request_queue *q = bdev_get_queue(dev->bdev); in queue_supports_sg_merge() local
1398 return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags); in queue_supports_sg_merge()
1421 struct request_queue *q = bdev_get_queue(dev->bdev); in device_not_write_same_capable() local
1423 return q && !q->limits.max_write_same_sectors; in device_not_write_same_capable()
1448 struct request_queue *q = bdev_get_queue(dev->bdev); in device_discard_capable() local
1450 return q && blk_queue_discard(q); in device_discard_capable()
1482 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, in dm_table_set_restrictions() argument
1490 q->limits = *limits; in dm_table_set_restrictions()
1493 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); in dm_table_set_restrictions()
1495 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); in dm_table_set_restrictions()
1502 blk_queue_flush(q, flush); in dm_table_set_restrictions()
1505 q->limits.discard_zeroes_data = 0; in dm_table_set_restrictions()
1509 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); in dm_table_set_restrictions()
1511 queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); in dm_table_set_restrictions()
1514 q->limits.max_write_same_sectors = 0; in dm_table_set_restrictions()
1517 queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); in dm_table_set_restrictions()
1519 queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); in dm_table_set_restrictions()
1529 if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) in dm_table_set_restrictions()
1530 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); in dm_table_set_restrictions()
1543 queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q); in dm_table_set_restrictions()
1658 struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev); in dm_table_any_congested() local
1661 if (likely(q)) in dm_table_any_congested()
1662 r |= bdi_congested(&q->backing_dev_info, bdi_bits); in dm_table_any_congested()