bdev               83 arch/m68k/emu/nfblock.c static int nfhd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
bdev               85 arch/m68k/emu/nfblock.c 	struct nfhd_device *dev = bdev->bd_disk->private_data;
bdev              103 arch/um/drivers/ubd_kern.c static int ubd_open(struct block_device *bdev, fmode_t mode);
bdev              105 arch/um/drivers/ubd_kern.c static int ubd_ioctl(struct block_device *bdev, fmode_t mode,
bdev              107 arch/um/drivers/ubd_kern.c static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo);
bdev             1213 arch/um/drivers/ubd_kern.c static int ubd_open(struct block_device *bdev, fmode_t mode)
bdev             1215 arch/um/drivers/ubd_kern.c 	struct gendisk *disk = bdev->bd_disk;
bdev             1416 arch/um/drivers/ubd_kern.c static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
bdev             1418 arch/um/drivers/ubd_kern.c 	struct ubd *ubd_dev = bdev->bd_disk->private_data;
bdev             1426 arch/um/drivers/ubd_kern.c static int ubd_ioctl(struct block_device *bdev, fmode_t mode,
bdev             1429 arch/um/drivers/ubd_kern.c 	struct ubd *ubd_dev = bdev->bd_disk->private_data;
bdev              125 arch/xtensa/platforms/iss/simdisk.c static int simdisk_open(struct block_device *bdev, fmode_t mode)
bdev              127 arch/xtensa/platforms/iss/simdisk.c 	struct simdisk *dev = bdev->bd_disk->private_data;
bdev              131 arch/xtensa/platforms/iss/simdisk.c 		check_disk_change(bdev);
bdev              433 block/blk-flush.c int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
bdev              440 block/blk-flush.c 	if (bdev->bd_disk == NULL)
bdev              443 block/blk-flush.c 	q = bdev_get_queue(bdev);
bdev              457 block/blk-flush.c 	bio_set_dev(bio, bdev);
bdev               25 block/blk-lib.c int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
bdev               29 block/blk-lib.c 	struct request_queue *q = bdev_get_queue(bdev);
bdev               37 block/blk-lib.c 	if (bdev_read_only(bdev))
bdev               50 block/blk-lib.c 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
bdev               65 block/blk-lib.c 		bio_set_dev(bio, bdev);
bdev               97 block/blk-lib.c int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
bdev              105 block/blk-lib.c 	ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
bdev              131 block/blk-lib.c static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
bdev              135 block/blk-lib.c 	struct request_queue *q = bdev_get_queue(bdev);
bdev              143 block/blk-lib.c 	if (bdev_read_only(bdev))
bdev              146 block/blk-lib.c 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
bdev              150 block/blk-lib.c 	if (!bdev_write_same(bdev))
bdev              159 block/blk-lib.c 		bio_set_dev(bio, bdev);
bdev              163 block/blk-lib.c 		bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
bdev              192 block/blk-lib.c int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
bdev              201 block/blk-lib.c 	ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
bdev              212 block/blk-lib.c static int __blkdev_issue_write_zeroes(struct block_device *bdev,
bdev              218 block/blk-lib.c 	struct request_queue *q = bdev_get_queue(bdev);
bdev              223 block/blk-lib.c 	if (bdev_read_only(bdev))
bdev              227 block/blk-lib.c 	max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
bdev              235 block/blk-lib.c 		bio_set_dev(bio, bdev);
bdev              268 block/blk-lib.c static int __blkdev_issue_zero_pages(struct block_device *bdev,
bdev              272 block/blk-lib.c 	struct request_queue *q = bdev_get_queue(bdev);
bdev              280 block/blk-lib.c 	if (bdev_read_only(bdev))
bdev              287 block/blk-lib.c 		bio_set_dev(bio, bdev);
bdev              324 block/blk-lib.c int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
bdev              331 block/blk-lib.c 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
bdev              335 block/blk-lib.c 	ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
bdev              340 block/blk-lib.c 	return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
bdev              358 block/blk-lib.c int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
bdev              365 block/blk-lib.c 	bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
bdev              367 block/blk-lib.c 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
bdev              375 block/blk-lib.c 		ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
bdev              378 block/blk-lib.c 		ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
bdev              394 block/blk-lib.c 		if (!bdev_write_zeroes_sectors(bdev)) {
bdev              632 block/blk-settings.c int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
bdev              635 block/blk-settings.c 	struct request_queue *bq = bdev_get_queue(bdev);
bdev              637 block/blk-settings.c 	start += get_start_sect(bdev);
bdev              653 block/blk-settings.c void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
bdev              658 block/blk-settings.c 	if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
bdev              662 block/blk-settings.c 		bdevname(bdev, bottom);
bdev               89 block/blk-zoned.c unsigned int blkdev_nr_zones(struct block_device *bdev)
bdev               91 block/blk-zoned.c 	struct request_queue *q = bdev_get_queue(bdev);
bdev               96 block/blk-zoned.c 	return __blkdev_nr_zones(q, bdev->bd_part->nr_sects);
bdev              104 block/blk-zoned.c static bool blkdev_report_zone(struct block_device *bdev, struct blk_zone *rep)
bdev              106 block/blk-zoned.c 	sector_t offset = get_start_sect(bdev);
bdev              112 block/blk-zoned.c 	if (rep->start + rep->len > bdev->bd_part->nr_sects)
bdev              163 block/blk-zoned.c int blkdev_report_zones(struct block_device *bdev, sector_t sector,
bdev              166 block/blk-zoned.c 	struct request_queue *q = bdev_get_queue(bdev);
bdev              178 block/blk-zoned.c 	if (WARN_ON_ONCE(!bdev->bd_disk->fops->report_zones))
bdev              181 block/blk-zoned.c 	if (!*nr_zones || sector >= bdev->bd_part->nr_sects) {
bdev              187 block/blk-zoned.c 		  __blkdev_nr_zones(q, bdev->bd_part->nr_sects - sector));
bdev              188 block/blk-zoned.c 	ret = blk_report_zones(bdev->bd_disk, get_start_sect(bdev) + sector,
bdev              194 block/blk-zoned.c 		if (!blkdev_report_zone(bdev, zones))
bdev              209 block/blk-zoned.c static int __blkdev_reset_all_zones(struct block_device *bdev, gfp_t gfp_mask)
bdev              215 block/blk-zoned.c 	bio_set_dev(bio, bdev);
bdev              224 block/blk-zoned.c static inline bool blkdev_allow_reset_all_zones(struct block_device *bdev,
bdev              227 block/blk-zoned.c 	if (!blk_queue_zone_resetall(bdev_get_queue(bdev)))
bdev              230 block/blk-zoned.c 	if (nr_sectors != part_nr_sects_read(bdev->bd_part))
bdev              237 block/blk-zoned.c 	return get_start_sect(bdev) == 0 &&
bdev              238 block/blk-zoned.c 	       part_nr_sects_read(bdev->bd_part) == get_capacity(bdev->bd_disk);
bdev              253 block/blk-zoned.c int blkdev_reset_zones(struct block_device *bdev,
bdev              257 block/blk-zoned.c 	struct request_queue *q = bdev_get_queue(bdev);
bdev              267 block/blk-zoned.c 	if (bdev_read_only(bdev))
bdev              270 block/blk-zoned.c 	if (!nr_sectors || end_sector > bdev->bd_part->nr_sects)
bdev              274 block/blk-zoned.c 	if (blkdev_allow_reset_all_zones(bdev, nr_sectors))
bdev              275 block/blk-zoned.c 		return  __blkdev_reset_all_zones(bdev, gfp_mask);
bdev              283 block/blk-zoned.c 	    end_sector != bdev->bd_part->nr_sects)
bdev              291 block/blk-zoned.c 		bio_set_dev(bio, bdev);
bdev              314 block/blk-zoned.c int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
bdev              326 block/blk-zoned.c 	q = bdev_get_queue(bdev);
bdev              342 block/blk-zoned.c 	rep.nr_zones = min(blkdev_nr_zones(bdev), rep.nr_zones);
bdev              349 block/blk-zoned.c 	ret = blkdev_report_zones(bdev, rep.sector, zones, &rep.nr_zones);
bdev              374 block/blk-zoned.c int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
bdev              384 block/blk-zoned.c 	q = bdev_get_queue(bdev);
bdev              400 block/blk-zoned.c 	return blkdev_reset_zones(bdev, zrange.sector, zrange.nr_sectors,
bdev              213 block/cmdline-parser.c 					 const char *bdev)
bdev              215 block/cmdline-parser.c 	while (parts && strncmp(bdev, parts->name, sizeof(parts->name)))
bdev               52 block/compat_ioctl.c static int compat_hdio_getgeo(struct gendisk *disk, struct block_device *bdev,
bdev               68 block/compat_ioctl.c 	geo.start = get_start_sect(bdev);
bdev               69 block/compat_ioctl.c 	ret = disk->fops->getgeo(bdev, &geo);
bdev               81 block/compat_ioctl.c static int compat_hdio_ioctl(struct block_device *bdev, fmode_t mode,
bdev               88 block/compat_ioctl.c 	error = __blkdev_driver_ioctl(bdev, mode,
bdev              118 block/compat_ioctl.c static int compat_cdrom_read_audio(struct block_device *bdev, fmode_t mode,
bdev              141 block/compat_ioctl.c 	return __blkdev_driver_ioctl(bdev, mode, cmd,
bdev              145 block/compat_ioctl.c static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
bdev              174 block/compat_ioctl.c 	return __blkdev_driver_ioctl(bdev, mode, cmd, (unsigned long)cgc);
bdev              184 block/compat_ioctl.c static int compat_blkpg_ioctl(struct block_device *bdev, fmode_t mode,
bdev              203 block/compat_ioctl.c 	return blkdev_ioctl(bdev, mode, cmd, (unsigned long)a);
bdev              210 block/compat_ioctl.c static int compat_blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
bdev              225 block/compat_ioctl.c 		return compat_hdio_ioctl(bdev, mode, cmd, arg);
bdev              227 block/compat_ioctl.c 		return compat_cdrom_read_audio(bdev, mode, cmd, arg);
bdev              229 block/compat_ioctl.c 		return compat_cdrom_generic_command(bdev, mode, cmd, arg);
bdev              310 block/compat_ioctl.c 	return __blkdev_driver_ioctl(bdev, mode, cmd, arg);
bdev              320 block/compat_ioctl.c 	struct block_device *bdev = inode->i_bdev;
bdev              321 block/compat_ioctl.c 	struct gendisk *disk = bdev->bd_disk;
bdev              337 block/compat_ioctl.c 		return compat_hdio_getgeo(disk, bdev, compat_ptr(arg));
bdev              339 block/compat_ioctl.c 		return compat_put_uint(arg, bdev_physical_block_size(bdev));
bdev              341 block/compat_ioctl.c 		return compat_put_uint(arg, bdev_io_min(bdev));
bdev              343 block/compat_ioctl.c 		return compat_put_uint(arg, bdev_io_opt(bdev));
bdev              345 block/compat_ioctl.c 		return compat_put_int(arg, bdev_alignment_offset(bdev));
bdev              362 block/compat_ioctl.c 		return blkdev_ioctl(bdev, mode, cmd,
bdev              365 block/compat_ioctl.c 		return blkdev_ioctl(bdev, mode, BLKBSZSET,
bdev              368 block/compat_ioctl.c 		return compat_blkpg_ioctl(bdev, mode, cmd, compat_ptr(arg));
bdev              374 block/compat_ioctl.c 			       (bdev->bd_bdi->ra_pages * PAGE_SIZE) / 512);
bdev              376 block/compat_ioctl.c 		return compat_put_int(arg, bdev_read_only(bdev) != 0);
bdev              378 block/compat_ioctl.c 		return compat_put_int(arg, block_size(bdev));
bdev              380 block/compat_ioctl.c 		return compat_put_int(arg, bdev_logical_block_size(bdev));
bdev              383 block/compat_ioctl.c 				    queue_max_sectors(bdev_get_queue(bdev)));
bdev              387 block/compat_ioctl.c 					 !blk_queue_nonrot(bdev_get_queue(bdev)));
bdev              392 block/compat_ioctl.c 		bdev->bd_bdi->ra_pages = (arg * 512) / PAGE_SIZE;
bdev              395 block/compat_ioctl.c 		size = i_size_read(bdev->bd_inode);
bdev              401 block/compat_ioctl.c 		return compat_put_u64(arg, i_size_read(bdev->bd_inode));
bdev              407 block/compat_ioctl.c 		ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg));
bdev              415 block/compat_ioctl.c 		return blkdev_ioctl(bdev, mode, cmd,
bdev              419 block/compat_ioctl.c 			ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg);
bdev              421 block/compat_ioctl.c 			ret = compat_blkdev_driver_ioctl(bdev, mode, cmd, arg);
bdev              600 block/genhd.c  	struct block_device *bdev;
bdev              650 block/genhd.c  	bdev = bdget_disk(disk, 0);
bdev              651 block/genhd.c  	if (!bdev)
bdev              654 block/genhd.c  	bdev->bd_invalidated = 1;
bdev              655 block/genhd.c  	err = blkdev_get(bdev, FMODE_READ, NULL);
bdev              658 block/genhd.c  	blkdev_put(bdev, FMODE_READ);
bdev              929 block/genhd.c  	struct block_device *bdev = NULL;
bdev              933 block/genhd.c  		bdev = bdget(part_devt(part));
bdev              936 block/genhd.c  	return bdev;
bdev             1570 block/genhd.c  void set_device_ro(struct block_device *bdev, int flag)
bdev             1572 block/genhd.c  	bdev->bd_part->policy = flag;
bdev             1595 block/genhd.c  int bdev_read_only(struct block_device *bdev)
bdev             1597 block/genhd.c  	if (!bdev)
bdev             1599 block/genhd.c  	return bdev->bd_part->policy;
bdev             1607 block/genhd.c  	struct block_device *bdev = bdget_disk(disk, partno);
bdev             1608 block/genhd.c  	if (bdev) {
bdev             1609 block/genhd.c  		fsync_bdev(bdev);
bdev             1610 block/genhd.c  		res = __invalidate_device(bdev, true);
bdev             1611 block/genhd.c  		bdput(bdev);
bdev               14 block/ioctl.c  static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg)
bdev               31 block/ioctl.c  	disk = bdev->bd_disk;
bdev               32 block/ioctl.c  	if (bdev != bdev->bd_contains)
bdev               50 block/ioctl.c  			if (p.start & (bdev_logical_block_size(bdev) - 1))
bdev               53 block/ioctl.c  			mutex_lock(&bdev->bd_mutex);
bdev               62 block/ioctl.c  					mutex_unlock(&bdev->bd_mutex);
bdev               71 block/ioctl.c  			mutex_unlock(&bdev->bd_mutex);
bdev               93 block/ioctl.c  			mutex_lock_nested(&bdev->bd_mutex, 1);
bdev               95 block/ioctl.c  			mutex_unlock(&bdev->bd_mutex);
bdev              121 block/ioctl.c  			mutex_lock_nested(&bdev->bd_mutex, 1);
bdev              124 block/ioctl.c  				mutex_unlock(&bdev->bd_mutex);
bdev              139 block/ioctl.c  					mutex_unlock(&bdev->bd_mutex);
bdev              149 block/ioctl.c  			mutex_unlock(&bdev->bd_mutex);
bdev              163 block/ioctl.c  int __blkdev_reread_part(struct block_device *bdev)
bdev              165 block/ioctl.c  	struct gendisk *disk = bdev->bd_disk;
bdev              167 block/ioctl.c  	if (!disk_part_scan_enabled(disk) || bdev != bdev->bd_contains)
bdev              172 block/ioctl.c  	lockdep_assert_held(&bdev->bd_mutex);
bdev              174 block/ioctl.c  	return rescan_partitions(disk, bdev);
bdev              189 block/ioctl.c  int blkdev_reread_part(struct block_device *bdev)
bdev              193 block/ioctl.c  	mutex_lock(&bdev->bd_mutex);
bdev              194 block/ioctl.c  	res = __blkdev_reread_part(bdev);
bdev              195 block/ioctl.c  	mutex_unlock(&bdev->bd_mutex);
bdev              201 block/ioctl.c  static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode,
bdev              206 block/ioctl.c  	struct request_queue *q = bdev_get_queue(bdev);
bdev              207 block/ioctl.c  	struct address_space *mapping = bdev->bd_inode->i_mapping;
bdev              227 block/ioctl.c  	if (start + len > i_size_read(bdev->bd_inode))
bdev              230 block/ioctl.c  	return blkdev_issue_discard(bdev, start >> 9, len >> 9,
bdev              234 block/ioctl.c  static int blk_ioctl_zeroout(struct block_device *bdev, fmode_t mode,
bdev              255 block/ioctl.c  	if (end >= (uint64_t)i_size_read(bdev->bd_inode))
bdev              261 block/ioctl.c  	mapping = bdev->bd_inode->i_mapping;
bdev              264 block/ioctl.c  	return blkdev_issue_zeroout(bdev, start >> 9, len >> 9, GFP_KERNEL,
bdev              298 block/ioctl.c  int __blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
bdev              301 block/ioctl.c  	struct gendisk *disk = bdev->bd_disk;
bdev              304 block/ioctl.c  		return disk->fops->ioctl(bdev, mode, cmd, arg);
bdev              315 block/ioctl.c  static int blkdev_pr_register(struct block_device *bdev,
bdev              318 block/ioctl.c  	const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
bdev              330 block/ioctl.c  	return ops->pr_register(bdev, reg.old_key, reg.new_key, reg.flags);
bdev              333 block/ioctl.c  static int blkdev_pr_reserve(struct block_device *bdev,
bdev              336 block/ioctl.c  	const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
bdev              348 block/ioctl.c  	return ops->pr_reserve(bdev, rsv.key, rsv.type, rsv.flags);
bdev              351 block/ioctl.c  static int blkdev_pr_release(struct block_device *bdev,
bdev              354 block/ioctl.c  	const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
bdev              366 block/ioctl.c  	return ops->pr_release(bdev, rsv.key, rsv.type);
bdev              369 block/ioctl.c  static int blkdev_pr_preempt(struct block_device *bdev,
bdev              372 block/ioctl.c  	const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
bdev              384 block/ioctl.c  	return ops->pr_preempt(bdev, p.old_key, p.new_key, p.type, abort);
bdev              387 block/ioctl.c  static int blkdev_pr_clear(struct block_device *bdev,
bdev              390 block/ioctl.c  	const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
bdev              402 block/ioctl.c  	return ops->pr_clear(bdev, c.key);
bdev              425 block/ioctl.c  static int blkdev_flushbuf(struct block_device *bdev, fmode_t mode,
bdev              433 block/ioctl.c  	ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
bdev              437 block/ioctl.c  	fsync_bdev(bdev);
bdev              438 block/ioctl.c  	invalidate_bdev(bdev);
bdev              442 block/ioctl.c  static int blkdev_roset(struct block_device *bdev, fmode_t mode,
bdev              450 block/ioctl.c  	ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
bdev              455 block/ioctl.c  	set_device_ro(bdev, n);
bdev              459 block/ioctl.c  static int blkdev_getgeo(struct block_device *bdev,
bdev              462 block/ioctl.c  	struct gendisk *disk = bdev->bd_disk;
bdev              476 block/ioctl.c  	geo.start = get_start_sect(bdev);
bdev              477 block/ioctl.c  	ret = disk->fops->getgeo(bdev, &geo);
bdev              486 block/ioctl.c  static int blkdev_bszset(struct block_device *bdev, fmode_t mode,
bdev              499 block/ioctl.c  		bdgrab(bdev);
bdev              500 block/ioctl.c  		if (blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
bdev              504 block/ioctl.c  	ret = set_blocksize(bdev, n);
bdev              506 block/ioctl.c  		blkdev_put(bdev, mode | FMODE_EXCL);
bdev              513 block/ioctl.c  int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
bdev              522 block/ioctl.c  		return blkdev_flushbuf(bdev, mode, cmd, arg);
bdev              524 block/ioctl.c  		return blkdev_roset(bdev, mode, cmd, arg);
bdev              526 block/ioctl.c  		return blk_ioctl_discard(bdev, mode, arg, 0);
bdev              528 block/ioctl.c  		return blk_ioctl_discard(bdev, mode, arg,
bdev              531 block/ioctl.c  		return blk_ioctl_zeroout(bdev, mode, arg);
bdev              533 block/ioctl.c  		return blkdev_report_zones_ioctl(bdev, mode, cmd, arg);
bdev              535 block/ioctl.c  		return blkdev_reset_zones_ioctl(bdev, mode, cmd, arg);
bdev              537 block/ioctl.c  		return put_uint(arg, bdev_zone_sectors(bdev));
bdev              539 block/ioctl.c  		return put_uint(arg, blkdev_nr_zones(bdev));
bdev              541 block/ioctl.c  		return blkdev_getgeo(bdev, argp);
bdev              546 block/ioctl.c  		return put_long(arg, (bdev->bd_bdi->ra_pages*PAGE_SIZE) / 512);
bdev              548 block/ioctl.c  		return put_int(arg, bdev_read_only(bdev) != 0);
bdev              550 block/ioctl.c  		return put_int(arg, block_size(bdev));
bdev              552 block/ioctl.c  		return put_int(arg, bdev_logical_block_size(bdev));
bdev              554 block/ioctl.c  		return put_uint(arg, bdev_physical_block_size(bdev));
bdev              556 block/ioctl.c  		return put_uint(arg, bdev_io_min(bdev));
bdev              558 block/ioctl.c  		return put_uint(arg, bdev_io_opt(bdev));
bdev              560 block/ioctl.c  		return put_int(arg, bdev_alignment_offset(bdev));
bdev              565 block/ioctl.c  				    queue_max_sectors(bdev_get_queue(bdev)));
bdev              568 block/ioctl.c  		return put_ushort(arg, !blk_queue_nonrot(bdev_get_queue(bdev)));
bdev              573 block/ioctl.c  		bdev->bd_bdi->ra_pages = (arg * 512) / PAGE_SIZE;
bdev              576 block/ioctl.c  		return blkdev_bszset(bdev, mode, argp);
bdev              578 block/ioctl.c  		return blkpg_ioctl(bdev, argp);
bdev              580 block/ioctl.c  		return blkdev_reread_part(bdev);
bdev              582 block/ioctl.c  		size = i_size_read(bdev->bd_inode);
bdev              587 block/ioctl.c  		return put_u64(arg, i_size_read(bdev->bd_inode));
bdev              592 block/ioctl.c  		return blk_trace_ioctl(bdev, cmd, argp);
bdev              594 block/ioctl.c  		return blkdev_pr_register(bdev, argp);
bdev              596 block/ioctl.c  		return blkdev_pr_reserve(bdev, argp);
bdev              598 block/ioctl.c  		return blkdev_pr_release(bdev, argp);
bdev              600 block/ioctl.c  		return blkdev_pr_preempt(bdev, argp, false);
bdev              602 block/ioctl.c  		return blkdev_pr_preempt(bdev, argp, true);
bdev              604 block/ioctl.c  		return blkdev_pr_clear(bdev, argp);
bdev              606 block/ioctl.c  		return __blkdev_driver_ioctl(bdev, mode, cmd, arg);
bdev               47 block/partition-generic.c const char *bdevname(struct block_device *bdev, char *buf)
bdev               49 block/partition-generic.c 	return disk_name(bdev->bd_disk, bdev->bd_part->partno, buf);
bdev              442 block/partition-generic.c static int drop_partitions(struct gendisk *disk, struct block_device *bdev)
bdev              448 block/partition-generic.c 	if (bdev->bd_part_count || bdev->bd_super)
bdev              463 block/partition-generic.c 			      struct block_device *bdev,
bdev              466 block/partition-generic.c 	unsigned int zone_sectors = bdev_zone_sectors(bdev);
bdev              512 block/partition-generic.c int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
bdev              523 block/partition-generic.c 	res = drop_partitions(disk, bdev);
bdev              529 block/partition-generic.c 	check_disk_size_change(disk, bdev, true);
bdev              530 block/partition-generic.c 	bdev->bd_invalidated = 0;
bdev              531 block/partition-generic.c 	if (!get_capacity(disk) || !(state = check_partition(disk, bdev)))
bdev              616 block/partition-generic.c 		if (bdev_is_zoned(bdev) &&
bdev              617 block/partition-generic.c 		    !part_zone_aligned(disk, bdev, from, size)) {
bdev              642 block/partition-generic.c int invalidate_partitions(struct gendisk *disk, struct block_device *bdev)
bdev              646 block/partition-generic.c 	if (!bdev->bd_invalidated)
bdev              649 block/partition-generic.c 	res = drop_partitions(disk, bdev);
bdev              654 block/partition-generic.c 	check_disk_size_change(disk, bdev, false);
bdev              655 block/partition-generic.c 	bdev->bd_invalidated = 0;
bdev              662 block/partition-generic.c unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
bdev              664 block/partition-generic.c 	struct address_space *mapping = bdev->bd_inode->i_mapping;
bdev              279 block/partitions/acorn.c 	nr_sects = (state->bdev->bd_inode->i_size >> 9) - start_sect;
bdev              544 block/partitions/acorn.c 		size = get_capacity(state->bdev->bd_disk);
bdev               79 block/partitions/aix.c static u64 last_lba(struct block_device *bdev)
bdev               81 block/partitions/aix.c 	if (!bdev || !bdev->bd_inode)
bdev               83 block/partitions/aix.c 	return (bdev->bd_inode->i_size >> 9) - 1ULL;
bdev              101 block/partitions/aix.c 	if (!buffer || lba + count / 512 > last_lba(state->bdev))
bdev               47 block/partitions/amiga.c 				       bdevname(state->bdev, b), blk);
bdev               69 block/partitions/amiga.c 		       bdevname(state->bdev, b), blk);
bdev               90 block/partitions/amiga.c 				       bdevname(state->bdev, b), blk);
bdev               50 block/partitions/atari.c 	if (bdev_logical_block_size(state->bdev) != 512)
bdev               58 block/partitions/atari.c 	hd_size = state->bdev->bd_inode->i_size >> 9;
bdev              143 block/partitions/check.c check_partition(struct gendisk *hd, struct block_device *bdev)
bdev              158 block/partitions/check.c 	state->bdev = bdev;
bdev               11 block/partitions/check.h 	struct block_device *bdev;
bdev               34 block/partitions/check.h 	if (n >= get_capacity(state->bdev->bd_disk)) {
bdev               38 block/partitions/check.h 	return read_dev_sector(state->bdev, n, p);
bdev              127 block/partitions/cmdline.c 	char bdev[BDEVNAME_SIZE];
bdev              144 block/partitions/cmdline.c 	bdevname(state->bdev, bdev);
bdev              145 block/partitions/cmdline.c 	parts = cmdline_parts_find(bdev_parts, bdev);
bdev              149 block/partitions/cmdline.c 	disk_size = get_capacity(state->bdev->bd_disk) << 9;
bdev              134 block/partitions/efi.c static u64 last_lba(struct block_device *bdev)
bdev              136 block/partitions/efi.c 	if (!bdev || !bdev->bd_inode)
bdev              138 block/partitions/efi.c 	return div_u64(bdev->bd_inode->i_size,
bdev              139 block/partitions/efi.c 		       bdev_logical_block_size(bdev)) - 1ULL;
bdev              241 block/partitions/efi.c 	struct block_device *bdev = state->bdev;
bdev              242 block/partitions/efi.c 	sector_t n = lba * (bdev_logical_block_size(bdev) / 512);
bdev              244 block/partitions/efi.c 	if (!buffer || lba > last_lba(bdev))
bdev              312 block/partitions/efi.c 	unsigned ssz = bdev_logical_block_size(state->bdev);
bdev              359 block/partitions/efi.c 			bdev_logical_block_size(state->bdev)) {
bdev              362 block/partitions/efi.c 			bdev_logical_block_size(state->bdev));
bdev              398 block/partitions/efi.c 	lastlba = last_lba(state->bdev);
bdev              590 block/partitions/efi.c 	sector_t total_sectors = i_size_read(state->bdev->bd_inode) >> 9;
bdev              596 block/partitions/efi.c 	lastlba = last_lba(state->bdev);
bdev              683 block/partitions/efi.c 	unsigned ssz = bdev_logical_block_size(state->bdev) / 512;
bdev              701 block/partitions/efi.c 		if (!is_pte_valid(&ptes[i], last_lba(state->bdev)))
bdev              292 block/partitions/ibm.c 	struct block_device *bdev = state->bdev;
bdev              303 block/partitions/ibm.c 	blocksize = bdev_logical_block_size(bdev);
bdev              306 block/partitions/ibm.c 	i_size = i_size_read(bdev->bd_inode);
bdev              318 block/partitions/ibm.c 	if (ioctl_by_bdev(bdev, HDIO_GETGEO, (unsigned long)geo) != 0)
bdev              320 block/partitions/ibm.c 	if (ioctl_by_bdev(bdev, BIODASDINFO2, (unsigned long)info) != 0) {
bdev              307 block/partitions/ldm.c 	num_sects = state->bdev->bd_inode->i_size >> 9;
bdev              136 block/partitions/mac.c 		note_bootable_part(state->bdev->bd_dev, found_root,
bdev              129 block/partitions/msdos.c 	sector_t sector_size = bdev_logical_block_size(state->bdev) / 512;
bdev              454 block/partitions/msdos.c 	sector_t sector_size = bdev_logical_block_size(state->bdev) / 512;
bdev               61 block/partitions/sgi.c 		       bdevname(state->bdev, b));
bdev               82 block/partitions/sun.c 		       bdevname(state->bdev, b));
bdev              491 drivers/ata/libata-scsi.c int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev,
bdev             1525 drivers/block/amiflop.c static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
bdev             1527 drivers/block/amiflop.c 	int drive = MINOR(bdev->bd_dev) & 3;
bdev             1535 drivers/block/amiflop.c static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
bdev             1538 drivers/block/amiflop.c 	struct amiga_floppy_struct *p = bdev->bd_disk->private_data;
bdev             1550 drivers/block/amiflop.c 		fsync_bdev(bdev);
bdev             1579 drivers/block/amiflop.c 		invalidate_bdev(bdev);
bdev             1610 drivers/block/amiflop.c static int fd_ioctl(struct block_device *bdev, fmode_t mode,
bdev             1616 drivers/block/amiflop.c 	ret = fd_locked_ioctl(bdev, mode, cmd, param);
bdev             1657 drivers/block/amiflop.c static int floppy_open(struct block_device *bdev, fmode_t mode)
bdev             1659 drivers/block/amiflop.c 	int drive = MINOR(bdev->bd_dev) & 3;
bdev             1660 drivers/block/amiflop.c 	int system =  (MINOR(bdev->bd_dev) & 4) >> 2;
bdev             1673 drivers/block/amiflop.c 		check_disk_change(bdev);
bdev              220 drivers/block/aoe/aoeblk.c aoeblk_open(struct block_device *bdev, fmode_t mode)
bdev              222 drivers/block/aoe/aoeblk.c 	struct aoedev *d = bdev->bd_disk->private_data;
bdev              285 drivers/block/aoe/aoeblk.c aoeblk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
bdev              287 drivers/block/aoe/aoeblk.c 	struct aoedev *d = bdev->bd_disk->private_data;
bdev              301 drivers/block/aoe/aoeblk.c aoeblk_ioctl(struct block_device *bdev, fmode_t mode, uint cmd, ulong arg)
bdev              308 drivers/block/aoe/aoeblk.c 	d = bdev->bd_disk->private_data;
bdev              442 drivers/block/ataflop.c static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
bdev              447 drivers/block/ataflop.c static int floppy_open(struct block_device *bdev, fmode_t mode);
bdev             1559 drivers/block/ataflop.c static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
bdev             1562 drivers/block/ataflop.c 	struct gendisk *disk = bdev->bd_disk;
bdev             1735 drivers/block/ataflop.c 		check_disk_change(bdev);
bdev             1742 drivers/block/ataflop.c static int fd_ioctl(struct block_device *bdev, fmode_t mode,
bdev             1748 drivers/block/ataflop.c 	ret = fd_locked_ioctl(bdev, mode, cmd, arg);
bdev             1889 drivers/block/ataflop.c static int floppy_open(struct block_device *bdev, fmode_t mode)
bdev             1891 drivers/block/ataflop.c 	struct atari_floppy_struct *p = bdev->bd_disk->private_data;
bdev             1892 drivers/block/ataflop.c 	int type  = MINOR(bdev->bd_dev) >> 2;
bdev             1912 drivers/block/ataflop.c 		check_disk_change(bdev);
bdev             1926 drivers/block/ataflop.c static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode)
bdev             1931 drivers/block/ataflop.c 	ret = floppy_open(bdev, mode);
bdev              314 drivers/block/brd.c static int brd_rw_page(struct block_device *bdev, sector_t sector,
bdev              317 drivers/block/brd.c 	struct brd_device *brd = bdev->bd_disk->private_data;
bdev              105 drivers/block/drbd/drbd_actlog.c void wait_until_done_or_force_detached(struct drbd_device *device, struct drbd_backing_dev *bdev,
bdev              111 drivers/block/drbd/drbd_actlog.c 	dt = rcu_dereference(bdev->disk_conf)->disk_timeout;
bdev              126 drivers/block/drbd/drbd_actlog.c 				 struct drbd_backing_dev *bdev,
bdev              142 drivers/block/drbd/drbd_actlog.c 	bio_set_dev(bio, bdev->md_bdev);
bdev              168 drivers/block/drbd/drbd_actlog.c 	wait_until_done_or_force_detached(device, bdev, &device->md_io.done);
bdev              177 drivers/block/drbd/drbd_actlog.c int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev,
bdev              183 drivers/block/drbd/drbd_actlog.c 	BUG_ON(!bdev->md_bdev);
bdev              190 drivers/block/drbd/drbd_actlog.c 	if (sector < drbd_md_first_sector(bdev) ||
bdev              191 drivers/block/drbd/drbd_actlog.c 	    sector + 7 > drbd_md_last_sector(bdev))
bdev              197 drivers/block/drbd/drbd_actlog.c 	err = _drbd_md_sync_page_io(device, bdev, sector, op);
bdev             1129 drivers/block/drbd/drbd_int.h extern int  drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev);
bdev             1480 drivers/block/drbd/drbd_int.h 			struct drbd_backing_dev *bdev, struct o_qlim *o);
bdev             1505 drivers/block/drbd/drbd_int.h 		struct drbd_backing_dev *bdev, sector_t sector, int op);
bdev             1508 drivers/block/drbd/drbd_int.h 		struct drbd_backing_dev *bdev, unsigned int *done);
bdev             1624 drivers/block/drbd/drbd_int.h void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
bdev             1829 drivers/block/drbd/drbd_int.h static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
bdev             1831 drivers/block/drbd/drbd_int.h 	switch (bdev->md.meta_dev_idx) {
bdev             1834 drivers/block/drbd/drbd_int.h 		return bdev->md.md_offset + bdev->md.bm_offset;
bdev             1837 drivers/block/drbd/drbd_int.h 		return bdev->md.md_offset;
bdev             1845 drivers/block/drbd/drbd_int.h static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
bdev             1847 drivers/block/drbd/drbd_int.h 	switch (bdev->md.meta_dev_idx) {
bdev             1850 drivers/block/drbd/drbd_int.h 		return bdev->md.md_offset + MD_4kB_SECT -1;
bdev             1853 drivers/block/drbd/drbd_int.h 		return bdev->md.md_offset + bdev->md.md_size_sect -1;
bdev             1858 drivers/block/drbd/drbd_int.h static inline sector_t drbd_get_capacity(struct block_device *bdev)
bdev             1861 drivers/block/drbd/drbd_int.h 	return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
bdev             1872 drivers/block/drbd/drbd_int.h static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
bdev             1876 drivers/block/drbd/drbd_int.h 	switch (bdev->md.meta_dev_idx) {
bdev             1879 drivers/block/drbd/drbd_int.h 		s = drbd_get_capacity(bdev->backing_bdev)
bdev             1881 drivers/block/drbd/drbd_int.h 				drbd_md_first_sector(bdev))
bdev             1886 drivers/block/drbd/drbd_int.h 				drbd_get_capacity(bdev->backing_bdev));
bdev             1889 drivers/block/drbd/drbd_int.h 			BM_EXT_TO_SECT(bdev->md.md_size_sect
bdev             1890 drivers/block/drbd/drbd_int.h 				     - bdev->md.bm_offset));
bdev             1894 drivers/block/drbd/drbd_int.h 				drbd_get_capacity(bdev->backing_bdev));
bdev             1903 drivers/block/drbd/drbd_int.h static inline sector_t drbd_md_ss(struct drbd_backing_dev *bdev)
bdev             1905 drivers/block/drbd/drbd_int.h 	const int meta_dev_idx = bdev->md.meta_dev_idx;
bdev             1914 drivers/block/drbd/drbd_int.h 		return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) - 8;
bdev             1917 drivers/block/drbd/drbd_int.h 	return MD_128MB_SECT * bdev->md.meta_dev_idx;
bdev               53 drivers/block/drbd/drbd_main.c static int drbd_open(struct block_device *bdev, fmode_t mode);
bdev             1912 drivers/block/drbd/drbd_main.c static int drbd_open(struct block_device *bdev, fmode_t mode)
bdev             1914 drivers/block/drbd/drbd_main.c 	struct drbd_device *device = bdev->bd_disk->private_data;
bdev             3217 drivers/block/drbd/drbd_main.c static int check_offsets_and_sizes(struct drbd_device *device, struct drbd_backing_dev *bdev)
bdev             3219 drivers/block/drbd/drbd_main.c 	sector_t capacity = drbd_get_capacity(bdev->md_bdev);
bdev             3220 drivers/block/drbd/drbd_main.c 	struct drbd_md *in_core = &bdev->md;
bdev             3259 drivers/block/drbd/drbd_main.c 	if (capacity - in_core->md_size_sect < drbd_md_first_sector(bdev))
bdev             3308 drivers/block/drbd/drbd_main.c int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
bdev             3323 drivers/block/drbd/drbd_main.c 	bdev->md.meta_dev_idx = bdev->disk_conf->meta_dev_idx;
bdev             3324 drivers/block/drbd/drbd_main.c 	bdev->md.md_offset = drbd_md_ss(bdev);
bdev             3328 drivers/block/drbd/drbd_main.c 	bdev->md.md_size_sect = 8;
bdev             3330 drivers/block/drbd/drbd_main.c 	if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset,
bdev             3366 drivers/block/drbd/drbd_main.c 	bdev->md.la_size_sect = be64_to_cpu(buffer->la_size_sect);
bdev             3368 drivers/block/drbd/drbd_main.c 		bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
bdev             3369 drivers/block/drbd/drbd_main.c 	bdev->md.flags = be32_to_cpu(buffer->flags);
bdev             3370 drivers/block/drbd/drbd_main.c 	bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
bdev             3372 drivers/block/drbd/drbd_main.c 	bdev->md.md_size_sect = be32_to_cpu(buffer->md_size_sect);
bdev             3373 drivers/block/drbd/drbd_main.c 	bdev->md.al_offset = be32_to_cpu(buffer->al_offset);
bdev             3374 drivers/block/drbd/drbd_main.c 	bdev->md.bm_offset = be32_to_cpu(buffer->bm_offset);
bdev             3376 drivers/block/drbd/drbd_main.c 	if (check_activity_log_stripe_size(device, buffer, &bdev->md))
bdev             3378 drivers/block/drbd/drbd_main.c 	if (check_offsets_and_sizes(device, bdev))
bdev             3381 drivers/block/drbd/drbd_main.c 	if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
bdev             3383 drivers/block/drbd/drbd_main.c 		    be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
bdev             3386 drivers/block/drbd/drbd_main.c 	if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
bdev             3388 drivers/block/drbd/drbd_main.c 		    be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
bdev             3692 drivers/block/drbd/drbd_main.c int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
bdev             3694 drivers/block/drbd/drbd_main.c 	return (bdev->md.flags & flag) != 0;
bdev              827 drivers/block/drbd/drbd_nl.c 				       struct drbd_backing_dev *bdev)
bdev              830 drivers/block/drbd/drbd_nl.c 	unsigned int al_size_sect = bdev->md.al_size_4k * 8;
bdev              832 drivers/block/drbd/drbd_nl.c 	bdev->md.md_offset = drbd_md_ss(bdev);
bdev              834 drivers/block/drbd/drbd_nl.c 	switch (bdev->md.meta_dev_idx) {
bdev              837 drivers/block/drbd/drbd_nl.c 		bdev->md.md_size_sect = MD_128MB_SECT;
bdev              838 drivers/block/drbd/drbd_nl.c 		bdev->md.al_offset = MD_4kB_SECT;
bdev              839 drivers/block/drbd/drbd_nl.c 		bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
bdev              843 drivers/block/drbd/drbd_nl.c 		bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
bdev              844 drivers/block/drbd/drbd_nl.c 		bdev->md.al_offset = MD_4kB_SECT;
bdev              845 drivers/block/drbd/drbd_nl.c 		bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
bdev              850 drivers/block/drbd/drbd_nl.c 		bdev->md.al_offset = -al_size_sect;
bdev              852 drivers/block/drbd/drbd_nl.c 		md_size_sect = drbd_get_capacity(bdev->backing_bdev);
bdev              861 drivers/block/drbd/drbd_nl.c 		bdev->md.md_size_sect = md_size_sect;
bdev              863 drivers/block/drbd/drbd_nl.c 		bdev->md.bm_offset   = -md_size_sect + MD_4kB_SECT;
bdev             1092 drivers/block/drbd/drbd_nl.c drbd_new_dev_size(struct drbd_device *device, struct drbd_backing_dev *bdev,
bdev             1096 drivers/block/drbd/drbd_nl.c 	sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */
bdev             1100 drivers/block/drbd/drbd_nl.c 	m_size = drbd_get_max_capacity(bdev);
bdev             1332 drivers/block/drbd/drbd_nl.c static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backing_dev *bdev,
bdev             1343 drivers/block/drbd/drbd_nl.c 	if (bdev) {
bdev             1344 drivers/block/drbd/drbd_nl.c 		b = bdev->backing_bdev->bd_disk->queue;
bdev             1380 drivers/block/drbd/drbd_nl.c void drbd_reconsider_queue_parameters(struct drbd_device *device, struct drbd_backing_dev *bdev, struct o_qlim *o)
bdev             1388 drivers/block/drbd/drbd_nl.c 	if (bdev) {
bdev             1389 drivers/block/drbd/drbd_nl.c 		local = queue_max_hw_sectors(bdev->backing_bdev->bd_disk->queue) << 9;
bdev             1423 drivers/block/drbd/drbd_nl.c 	drbd_setup_queue_param(device, bdev, new, o);
bdev             1477 drivers/block/drbd/drbd_nl.c static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev)
bdev             1497 drivers/block/drbd/drbd_nl.c 	unsigned int al_size_4k = bdev->md.al_size_4k;
bdev             1711 drivers/block/drbd/drbd_nl.c 	struct block_device *bdev;
bdev             1714 drivers/block/drbd/drbd_nl.c 	bdev = blkdev_get_by_path(bdev_path,
bdev             1716 drivers/block/drbd/drbd_nl.c 	if (IS_ERR(bdev)) {
bdev             1718 drivers/block/drbd/drbd_nl.c 				bdev_path, PTR_ERR(bdev));
bdev             1719 drivers/block/drbd/drbd_nl.c 		return bdev;
bdev             1723 drivers/block/drbd/drbd_nl.c 		return bdev;
bdev             1725 drivers/block/drbd/drbd_nl.c 	err = bd_link_disk_holder(bdev, device->vdisk);
bdev             1727 drivers/block/drbd/drbd_nl.c 		blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
bdev             1730 drivers/block/drbd/drbd_nl.c 		bdev = ERR_PTR(err);
bdev             1732 drivers/block/drbd/drbd_nl.c 	return bdev;
bdev             1739 drivers/block/drbd/drbd_nl.c 	struct block_device *bdev;
bdev             1741 drivers/block/drbd/drbd_nl.c 	bdev = open_backing_dev(device, new_disk_conf->backing_dev, device, true);
bdev             1742 drivers/block/drbd/drbd_nl.c 	if (IS_ERR(bdev))
bdev             1744 drivers/block/drbd/drbd_nl.c 	nbc->backing_bdev = bdev;
bdev             1754 drivers/block/drbd/drbd_nl.c 	bdev = open_backing_dev(device, new_disk_conf->meta_dev,
bdev             1762 drivers/block/drbd/drbd_nl.c 	if (IS_ERR(bdev))
bdev             1764 drivers/block/drbd/drbd_nl.c 	nbc->md_bdev = bdev;
bdev             1768 drivers/block/drbd/drbd_nl.c static void close_backing_dev(struct drbd_device *device, struct block_device *bdev,
bdev             1771 drivers/block/drbd/drbd_nl.c 	if (!bdev)
bdev             1774 drivers/block/drbd/drbd_nl.c 		bd_unlink_disk_holder(bdev, device->vdisk);
bdev             1775 drivers/block/drbd/drbd_nl.c 	blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
bdev             1427 drivers/block/drbd/drbd_receiver.c max_allowed_wo(struct drbd_backing_dev *bdev, enum write_ordering_e wo)
bdev             1431 drivers/block/drbd/drbd_receiver.c 	dc = rcu_dereference(bdev->disk_conf);
bdev             1446 drivers/block/drbd/drbd_receiver.c void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
bdev             1465 drivers/block/drbd/drbd_receiver.c 			if (device->ldev == bdev)
bdev             1466 drivers/block/drbd/drbd_receiver.c 				bdev = NULL;
bdev             1471 drivers/block/drbd/drbd_receiver.c 	if (bdev)
bdev             1472 drivers/block/drbd/drbd_receiver.c 		wo = max_allowed_wo(bdev, wo);
bdev             1513 drivers/block/drbd/drbd_receiver.c 	struct block_device *bdev = device->ldev->backing_bdev;
bdev             1514 drivers/block/drbd/drbd_receiver.c 	struct request_queue *q = bdev_get_queue(bdev);
bdev             1525 drivers/block/drbd/drbd_receiver.c 	alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
bdev             1546 drivers/block/drbd/drbd_receiver.c 		err |= blkdev_issue_zeroout(bdev, start, nr, GFP_NOIO, 0);
bdev             1551 drivers/block/drbd/drbd_receiver.c 		err |= blkdev_issue_discard(bdev, start, max_discard_sectors, GFP_NOIO, 0);
bdev             1563 drivers/block/drbd/drbd_receiver.c 			err |= blkdev_issue_discard(bdev, start, nr, GFP_NOIO, 0);
bdev             1570 drivers/block/drbd/drbd_receiver.c 		err |= blkdev_issue_zeroout(bdev, start, nr_sectors, GFP_NOIO,
bdev             1610 drivers/block/drbd/drbd_receiver.c 	struct block_device *bdev = device->ldev->backing_bdev;
bdev             1613 drivers/block/drbd/drbd_receiver.c 	if (blkdev_issue_write_same(bdev, s, nr, GFP_NOIO, peer_req->pages))
bdev             3224 drivers/block/floppy.c static int invalidate_drive(struct block_device *bdev)
bdev             3227 drivers/block/floppy.c 	set_bit((long)bdev->bd_disk->private_data, &fake_change);
bdev             3229 drivers/block/floppy.c 	check_disk_change(bdev);
bdev             3234 drivers/block/floppy.c 			       int drive, int type, struct block_device *bdev)
bdev             3264 drivers/block/floppy.c 			struct block_device *bdev = opened_bdev[cnt];
bdev             3265 drivers/block/floppy.c 			if (!bdev || ITYPE(drive_state[cnt].fd_device) != type)
bdev             3267 drivers/block/floppy.c 			__invalidate_device(bdev, true);
bdev             3300 drivers/block/floppy.c 			invalidate_drive(bdev);
bdev             3371 drivers/block/floppy.c static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
bdev             3373 drivers/block/floppy.c 	int drive = (long)bdev->bd_disk->private_data;
bdev             3406 drivers/block/floppy.c static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
bdev             3409 drivers/block/floppy.c 	int drive = (long)bdev->bd_disk->private_data;
bdev             3478 drivers/block/floppy.c 		return invalidate_drive(bdev);
bdev             3481 drivers/block/floppy.c 		return set_geometry(cmd, &inparam.g, drive, type, bdev);
bdev             3517 drivers/block/floppy.c 		return invalidate_drive(bdev);
bdev             3588 drivers/block/floppy.c static int fd_ioctl(struct block_device *bdev, fmode_t mode,
bdev             3594 drivers/block/floppy.c 	ret = fd_locked_ioctl(bdev, mode, cmd, param);
bdev             3676 drivers/block/floppy.c static int compat_set_geometry(struct block_device *bdev, fmode_t mode, unsigned int cmd,
bdev             3694 drivers/block/floppy.c 	drive = (long)bdev->bd_disk->private_data;
bdev             3697 drivers/block/floppy.c 			&v, drive, type, bdev);
bdev             3880 drivers/block/floppy.c static int fd_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
bdev             3883 drivers/block/floppy.c 	int drive = (long)bdev->bd_disk->private_data;
bdev             3895 drivers/block/floppy.c 		return fd_ioctl(bdev, mode, cmd, param);
bdev             3902 drivers/block/floppy.c 		return fd_ioctl(bdev, mode, cmd,
bdev             3906 drivers/block/floppy.c 		return compat_set_geometry(bdev, mode, cmd, compat_ptr(param));
bdev             4000 drivers/block/floppy.c static int floppy_open(struct block_device *bdev, fmode_t mode)
bdev             4002 drivers/block/floppy.c 	int drive = (long)bdev->bd_disk->private_data;
bdev             4011 drivers/block/floppy.c 	if (opened_bdev[drive] && opened_bdev[drive] != bdev)
bdev             4021 drivers/block/floppy.c 	opened_bdev[drive] = bdev;
bdev             4055 drivers/block/floppy.c 	new_dev = MINOR(bdev->bd_dev);
bdev             4070 drivers/block/floppy.c 			check_disk_change(bdev);
bdev             4146 drivers/block/floppy.c static int __floppy_read_block_0(struct block_device *bdev, int drive)
bdev             4160 drivers/block/floppy.c 	size = bdev->bd_block_size;
bdev             4167 drivers/block/floppy.c 	bio_set_dev(&bio, bdev);
bdev              234 drivers/block/loop.c 	struct block_device *bdev = lo->lo_device;
bdev              243 drivers/block/loop.c 	bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9);
bdev              245 drivers/block/loop.c 	kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
bdev              640 drivers/block/loop.c 				   struct block_device *bdev)
bdev              644 drivers/block/loop.c 	rc = blkdev_reread_part(bdev);
bdev              657 drivers/block/loop.c static int loop_validate_file(struct file *file, struct block_device *bdev)
bdev              666 drivers/block/loop.c 		if (f->f_mapping->host->i_bdev == bdev)
bdev              688 drivers/block/loop.c static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
bdev              712 drivers/block/loop.c 	error = loop_validate_file(file, bdev);
bdev              742 drivers/block/loop.c 		loop_reread_partitions(lo, bdev);
bdev              952 drivers/block/loop.c 		       struct block_device *bdev, unsigned int arg)
bdev              976 drivers/block/loop.c 		claimed_bdev = bd_start_claiming(bdev, loop_set_fd);
bdev              991 drivers/block/loop.c 	error = loop_validate_file(file, bdev);
bdev             1012 drivers/block/loop.c 	set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
bdev             1015 drivers/block/loop.c 	lo->lo_device = bdev;
bdev             1040 drivers/block/loop.c 	bd_set_size(bdev, size << 9);
bdev             1043 drivers/block/loop.c 	kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
bdev             1045 drivers/block/loop.c 	set_blocksize(bdev, S_ISBLK(inode->i_mode) ?
bdev             1056 drivers/block/loop.c 	bdgrab(bdev);
bdev             1059 drivers/block/loop.c 		loop_reread_partitions(lo, bdev);
bdev             1061 drivers/block/loop.c 		bd_abort_claiming(bdev, claimed_bdev, loop_set_fd);
bdev             1068 drivers/block/loop.c 		bd_abort_claiming(bdev, claimed_bdev, loop_set_fd);
bdev             1118 drivers/block/loop.c 	struct block_device *bdev = lo->lo_device;
bdev             1156 drivers/block/loop.c 	if (bdev) {
bdev             1157 drivers/block/loop.c 		bdput(bdev);
bdev             1158 drivers/block/loop.c 		invalidate_bdev(bdev);
bdev             1159 drivers/block/loop.c 		bdev->bd_inode->i_mapping->wb_err = 0;
bdev             1163 drivers/block/loop.c 	if (bdev) {
bdev             1164 drivers/block/loop.c 		bd_set_size(bdev, 0);
bdev             1166 drivers/block/loop.c 		kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
bdev             1173 drivers/block/loop.c 	partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev;
bdev             1188 drivers/block/loop.c 			err = __blkdev_reread_part(bdev);
bdev             1190 drivers/block/loop.c 			err = blkdev_reread_part(bdev);
bdev             1263 drivers/block/loop.c 	struct block_device *bdev;
bdev             1367 drivers/block/loop.c 		bdev = lo->lo_device;
bdev             1373 drivers/block/loop.c 		loop_reread_partitions(lo, bdev);
bdev             1611 drivers/block/loop.c static int lo_ioctl(struct block_device *bdev, fmode_t mode,
bdev             1614 drivers/block/loop.c 	struct loop_device *lo = bdev->bd_disk->private_data;
bdev             1619 drivers/block/loop.c 		return loop_set_fd(lo, mode, bdev, arg);
bdev             1621 drivers/block/loop.c 		return loop_change_fd(lo, bdev, arg);
bdev             1774 drivers/block/loop.c static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
bdev             1777 drivers/block/loop.c 	struct loop_device *lo = bdev->bd_disk->private_data;
bdev             1799 drivers/block/loop.c 		err = lo_ioctl(bdev, mode, cmd, arg);
bdev             1809 drivers/block/loop.c static int lo_open(struct block_device *bdev, fmode_t mode)
bdev             1817 drivers/block/loop.c 	lo = bdev->bd_disk->private_data;
bdev             3689 drivers/block/mtip32xx/mtip32xx.c 	dd->bdev = bdget_disk(dd->disk, 0);
bdev             3723 drivers/block/mtip32xx/mtip32xx.c 	bdput(dd->bdev);
bdev             3724 drivers/block/mtip32xx/mtip32xx.c 	dd->bdev = NULL;
bdev             3810 drivers/block/mtip32xx/mtip32xx.c 	if (dd->bdev) {
bdev             3811 drivers/block/mtip32xx/mtip32xx.c 		bdput(dd->bdev);
bdev             3812 drivers/block/mtip32xx/mtip32xx.c 		dd->bdev = NULL;
bdev             4209 drivers/block/mtip32xx/mtip32xx.c 		fsync_bdev(dd->bdev);
bdev              466 drivers/block/mtip32xx/mtip32xx.h 	struct block_device *bdev;
bdev              302 drivers/block/nbd.c 	struct block_device *bdev = bdget_disk(nbd->disk, 0);
bdev              312 drivers/block/nbd.c 	if (bdev) {
bdev              313 drivers/block/nbd.c 		if (bdev->bd_disk) {
bdev              314 drivers/block/nbd.c 			bd_set_size(bdev, config->bytesize);
bdev              315 drivers/block/nbd.c 			set_blocksize(bdev, config->blksize);
bdev              317 drivers/block/nbd.c 			bdev->bd_invalidated = 1;
bdev              318 drivers/block/nbd.c 		bdput(bdev);
bdev             1118 drivers/block/nbd.c static void nbd_bdev_reset(struct block_device *bdev)
bdev             1120 drivers/block/nbd.c 	if (bdev->bd_openers > 1)
bdev             1122 drivers/block/nbd.c 	bd_set_size(bdev, 0);
bdev             1295 drivers/block/nbd.c static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev)
bdev             1305 drivers/block/nbd.c 		bdev->bd_invalidated = 1;
bdev             1314 drivers/block/nbd.c 	nbd_bdev_reset(bdev);
bdev             1324 drivers/block/nbd.c 				 struct block_device *bdev)
bdev             1327 drivers/block/nbd.c 	__invalidate_device(bdev, true);
bdev             1328 drivers/block/nbd.c 	nbd_bdev_reset(bdev);
bdev             1350 drivers/block/nbd.c static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
bdev             1359 drivers/block/nbd.c 		nbd_clear_sock_ioctl(nbd, bdev);
bdev             1386 drivers/block/nbd.c 		return nbd_start_device_ioctl(nbd, bdev);
bdev             1403 drivers/block/nbd.c static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
bdev             1406 drivers/block/nbd.c 	struct nbd_device *nbd = bdev->bd_disk->private_data;
bdev             1426 drivers/block/nbd.c 		error = __nbd_ioctl(bdev, nbd, cmd, arg);
bdev             1449 drivers/block/nbd.c static int nbd_open(struct block_device *bdev, fmode_t mode)
bdev             1455 drivers/block/nbd.c 	nbd = bdev->bd_disk->private_data;
bdev             1481 drivers/block/nbd.c 		bdev->bd_invalidated = 1;
bdev             1483 drivers/block/nbd.c 		bdev->bd_invalidated = 1;
bdev             1493 drivers/block/nbd.c 	struct block_device *bdev = bdget_disk(disk, 0);
bdev             1496 drivers/block/nbd.c 			bdev->bd_openers == 0)
bdev             1428 drivers/block/null_blk_main.c static int null_open(struct block_device *bdev, fmode_t mode)
bdev              231 drivers/block/paride/pcd.c static int pcd_block_open(struct block_device *bdev, fmode_t mode)
bdev              233 drivers/block/paride/pcd.c 	struct pcd_unit *cd = bdev->bd_disk->private_data;
bdev              236 drivers/block/paride/pcd.c 	check_disk_change(bdev);
bdev              239 drivers/block/paride/pcd.c 	ret = cdrom_open(&cd->info, bdev, mode);
bdev              253 drivers/block/paride/pcd.c static int pcd_block_ioctl(struct block_device *bdev, fmode_t mode,
bdev              256 drivers/block/paride/pcd.c 	struct pcd_unit *cd = bdev->bd_disk->private_data;
bdev              260 drivers/block/paride/pcd.c 	ret = cdrom_ioctl(&cd->info, bdev, mode, cmd, arg);
bdev              791 drivers/block/paride/pd.c static int pd_open(struct block_device *bdev, fmode_t mode)
bdev              793 drivers/block/paride/pd.c 	struct pd_unit *disk = bdev->bd_disk->private_data;
bdev              806 drivers/block/paride/pd.c static int pd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
bdev              808 drivers/block/paride/pd.c 	struct pd_unit *disk = bdev->bd_disk->private_data;
bdev              823 drivers/block/paride/pd.c static int pd_ioctl(struct block_device *bdev, fmode_t mode,
bdev              826 drivers/block/paride/pd.c 	struct pd_unit *disk = bdev->bd_disk->private_data;
bdev              208 drivers/block/paride/pf.c static int pf_open(struct block_device *bdev, fmode_t mode);
bdev              211 drivers/block/paride/pf.c static int pf_ioctl(struct block_device *bdev, fmode_t mode,
bdev              213 drivers/block/paride/pf.c static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo);
bdev              328 drivers/block/paride/pf.c static int pf_open(struct block_device *bdev, fmode_t mode)
bdev              330 drivers/block/paride/pf.c 	struct pf_unit *pf = bdev->bd_disk->private_data;
bdev              353 drivers/block/paride/pf.c static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo)
bdev              355 drivers/block/paride/pf.c 	struct pf_unit *pf = bdev->bd_disk->private_data;
bdev              371 drivers/block/paride/pf.c static int pf_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
bdev              373 drivers/block/paride/pf.c 	struct pf_unit *pf = bdev->bd_disk->private_data;
bdev              366 drivers/block/pktcdvd.c 			MAJOR(pd->bdev->bd_dev),
bdev              367 drivers/block/pktcdvd.c 			MINOR(pd->bdev->bd_dev));
bdev              702 drivers/block/pktcdvd.c 	struct request_queue *q = bdev_get_queue(pd->bdev);
bdev              725 drivers/block/pktcdvd.c 	blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0);
bdev             1032 drivers/block/pktcdvd.c 		bio_set_dev(bio, pd->bdev);
bdev             1126 drivers/block/pktcdvd.c 	bio_set_dev(pkt->bio, pd->bdev);
bdev             1271 drivers/block/pktcdvd.c 	bio_set_dev(pkt->w_bio, pd->bdev);
bdev             2182 drivers/block/pktcdvd.c 	bdget(pd->bdev->bd_dev);
bdev             2183 drivers/block/pktcdvd.c 	ret = blkdev_get(pd->bdev, FMODE_READ | FMODE_EXCL, pd);
bdev             2194 drivers/block/pktcdvd.c 	set_capacity(pd->bdev->bd_disk, lba << 2);
bdev             2195 drivers/block/pktcdvd.c 	bd_set_size(pd->bdev, (loff_t)lba << 11);
bdev             2197 drivers/block/pktcdvd.c 	q = bdev_get_queue(pd->bdev);
bdev             2229 drivers/block/pktcdvd.c 	blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
bdev             2246 drivers/block/pktcdvd.c 	blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
bdev             2260 drivers/block/pktcdvd.c static int pkt_open(struct block_device *bdev, fmode_t mode)
bdev             2267 drivers/block/pktcdvd.c 	pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev));
bdev             2289 drivers/block/pktcdvd.c 		set_blocksize(bdev, CD_FRAMESIZE);
bdev             2340 drivers/block/pktcdvd.c 	bio_set_dev(cloned_bio, pd->bdev);
bdev             2510 drivers/block/pktcdvd.c 		   bdevname(pd->bdev, bdev_buf));
bdev             2569 drivers/block/pktcdvd.c 	struct block_device *bdev;
bdev             2579 drivers/block/pktcdvd.c 		if (pd2->bdev->bd_dev == dev) {
bdev             2581 drivers/block/pktcdvd.c 				bdevname(pd2->bdev, b));
bdev             2590 drivers/block/pktcdvd.c 	bdev = bdget(dev);
bdev             2591 drivers/block/pktcdvd.c 	if (!bdev)
bdev             2593 drivers/block/pktcdvd.c 	ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
bdev             2596 drivers/block/pktcdvd.c 	if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) {
bdev             2597 drivers/block/pktcdvd.c 		blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
bdev             2604 drivers/block/pktcdvd.c 	pd->bdev = bdev;
bdev             2605 drivers/block/pktcdvd.c 	set_blocksize(bdev, CD_FRAMESIZE);
bdev             2618 drivers/block/pktcdvd.c 	pkt_dbg(1, pd, "writer mapped to %s\n", bdevname(bdev, b));
bdev             2622 drivers/block/pktcdvd.c 	blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
bdev             2628 drivers/block/pktcdvd.c static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
bdev             2630 drivers/block/pktcdvd.c 	struct pktcdvd_device *pd = bdev->bd_disk->private_data;
bdev             2634 drivers/block/pktcdvd.c 		cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
bdev             2654 drivers/block/pktcdvd.c 		ret = __blkdev_driver_ioctl(pd->bdev, mode, cmd, arg);
bdev             2674 drivers/block/pktcdvd.c 	if (!pd->bdev)
bdev             2676 drivers/block/pktcdvd.c 	attached_disk = pd->bdev->bd_disk;
bdev             2762 drivers/block/pktcdvd.c 	disk->events = pd->bdev->bd_disk->events;
bdev             2821 drivers/block/pktcdvd.c 	blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
bdev             2849 drivers/block/pktcdvd.c 		ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev);
bdev              659 drivers/block/rbd.c static int rbd_open(struct block_device *bdev, fmode_t mode)
bdev              661 drivers/block/rbd.c 	struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
bdev              706 drivers/block/rbd.c static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
bdev              709 drivers/block/rbd.c 	struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
bdev              724 drivers/block/rbd.c static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
bdev              727 drivers/block/rbd.c 	return rbd_ioctl(bdev, mode, cmd, arg);
bdev               54 drivers/block/rsxx/dev.c static int rsxx_blkdev_ioctl(struct block_device *bdev,
bdev               59 drivers/block/rsxx/dev.c 	struct rsxx_cardinfo *card = bdev->bd_disk->private_data;
bdev               71 drivers/block/rsxx/dev.c static int rsxx_getgeo(struct block_device *bdev, struct hd_geometry *geo)
bdev               73 drivers/block/rsxx/dev.c 	struct rsxx_cardinfo *card = bdev->bd_disk->private_data;
bdev             3088 drivers/block/skd_main.c static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
bdev             3093 drivers/block/skd_main.c 	skdev = bdev->bd_disk->private_data;
bdev             3096 drivers/block/skd_main.c 		bdev->bd_disk->disk_name, current->comm);
bdev              123 drivers/block/sunvdc.c static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo)
bdev              125 drivers/block/sunvdc.c 	struct gendisk *disk = bdev->bd_disk;
bdev              143 drivers/block/sunvdc.c static int vdc_ioctl(struct block_device *bdev, fmode_t mode,
bdev              158 drivers/block/sunvdc.c 		disk = bdev->bd_disk;
bdev              160 drivers/block/sunvdc.c 		if (bdev->bd_disk && (disk->flags & GENHD_FL_CD))
bdev              609 drivers/block/swim.c static int floppy_open(struct block_device *bdev, fmode_t mode)
bdev              611 drivers/block/swim.c 	struct floppy_state *fs = bdev->bd_disk->private_data;
bdev              641 drivers/block/swim.c 		check_disk_change(bdev);
bdev              659 drivers/block/swim.c static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode)
bdev              664 drivers/block/swim.c 	ret = floppy_open(bdev, mode);
bdev              686 drivers/block/swim.c static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
bdev              689 drivers/block/swim.c 	struct floppy_state *fs = bdev->bd_disk->private_data;
bdev              713 drivers/block/swim.c static int floppy_getgeo(struct block_device *bdev, struct hd_geometry *geo)
bdev              715 drivers/block/swim.c 	struct floppy_state *fs = bdev->bd_disk->private_data;
bdev              249 drivers/block/swim3.c static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
bdev              251 drivers/block/swim3.c static int floppy_open(struct block_device *bdev, fmode_t mode);
bdev              868 drivers/block/swim3.c static int floppy_locked_ioctl(struct block_device *bdev, fmode_t mode,
bdev              871 drivers/block/swim3.c 	struct floppy_state *fs = bdev->bd_disk->private_data;
bdev              896 drivers/block/swim3.c static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
bdev              902 drivers/block/swim3.c 	ret = floppy_locked_ioctl(bdev, mode, cmd, param);
bdev              908 drivers/block/swim3.c static int floppy_open(struct block_device *bdev, fmode_t mode)
bdev              910 drivers/block/swim3.c 	struct floppy_state *fs = bdev->bd_disk->private_data;
bdev              948 drivers/block/swim3.c 		check_disk_change(bdev);
bdev              977 drivers/block/swim3.c static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode)
bdev              982 drivers/block/swim3.c 	ret = floppy_open(bdev, mode);
bdev              404 drivers/block/sx8.c static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo);
bdev              430 drivers/block/sx8.c static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
bdev              432 drivers/block/sx8.c 	struct carm_port *port = bdev->bd_disk->private_data;
bdev              763 drivers/block/umem.c static int mm_getgeo(struct block_device *bdev, struct hd_geometry *geo)
bdev              765 drivers/block/umem.c 	struct cardinfo *card = bdev->bd_disk->private_data;
bdev              143 drivers/block/virtio_blk.c static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
bdev              146 drivers/block/virtio_blk.c 	struct gendisk *disk = bdev->bd_disk;
bdev              155 drivers/block/virtio_blk.c 	return scsi_cmd_blk_ioctl(bdev, mode, cmd,
bdev              524 drivers/block/xen-blkback/blkback.c 	req->bdev = vbd->bdev;
bdev             1014 drivers/block/xen-blkback/blkback.c 	struct block_device *bdev = blkif->vbd.bdev;
bdev             1036 drivers/block/xen-blkback/blkback.c 	err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
bdev             1333 drivers/block/xen-blkback/blkback.c 		    ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
bdev             1375 drivers/block/xen-blkback/blkback.c 			bio_set_dev(bio, preq.bdev);
bdev             1394 drivers/block/xen-blkback/blkback.c 		bio_set_dev(bio, preq.bdev);
bdev              225 drivers/block/xen-blkback/common.h 	struct block_device	*bdev;
bdev              361 drivers/block/xen-blkback/common.h #define vbd_sz(_v)	((_v)->bdev->bd_part ? \
bdev              362 drivers/block/xen-blkback/common.h 			 (_v)->bdev->bd_part->nr_sects : \
bdev              363 drivers/block/xen-blkback/common.h 			  get_capacity((_v)->bdev->bd_disk))
bdev              375 drivers/block/xen-blkback/common.h 	struct block_device	*bdev;
bdev               84 drivers/block/xen-blkback/xenbus.c 	if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev)
bdev              102 drivers/block/xen-blkback/xenbus.c 	err = filemap_write_and_wait(blkif->vbd.bdev->bd_inode->i_mapping);
bdev              107 drivers/block/xen-blkback/xenbus.c 	invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping);
bdev              448 drivers/block/xen-blkback/xenbus.c 	if (vbd->bdev)
bdev              449 drivers/block/xen-blkback/xenbus.c 		blkdev_put(vbd->bdev, vbd->readonly ? FMODE_READ : FMODE_WRITE);
bdev              450 drivers/block/xen-blkback/xenbus.c 	vbd->bdev = NULL;
bdev              458 drivers/block/xen-blkback/xenbus.c 	struct block_device *bdev;
bdev              468 drivers/block/xen-blkback/xenbus.c 	bdev = blkdev_get_by_dev(vbd->pdevice, vbd->readonly ?
bdev              471 drivers/block/xen-blkback/xenbus.c 	if (IS_ERR(bdev)) {
bdev              477 drivers/block/xen-blkback/xenbus.c 	vbd->bdev = bdev;
bdev              478 drivers/block/xen-blkback/xenbus.c 	if (vbd->bdev->bd_disk == NULL) {
bdev              486 drivers/block/xen-blkback/xenbus.c 	if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom)
bdev              488 drivers/block/xen-blkback/xenbus.c 	if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
bdev              491 drivers/block/xen-blkback/xenbus.c 	q = bdev_get_queue(bdev);
bdev              549 drivers/block/xen-blkback/xenbus.c 	struct block_device *bdev = be->blkif->vbd.bdev;
bdev              550 drivers/block/xen-blkback/xenbus.c 	struct request_queue *q = bdev_get_queue(bdev);
bdev              893 drivers/block/xen-blkback/xenbus.c 			    bdev_logical_block_size(be->blkif->vbd.bdev));
bdev              900 drivers/block/xen-blkback/xenbus.c 			    bdev_physical_block_size(be->blkif->vbd.bdev));
bdev              492 drivers/block/xen-blkfront.c static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
bdev              495 drivers/block/xen-blkfront.c 	struct blkfront_info *info = bdev->bd_disk->private_data;
bdev             2136 drivers/block/xen-blkfront.c 	struct block_device *bdev = NULL;
bdev             2146 drivers/block/xen-blkfront.c 		bdev = bdget_disk(info->gd, 0);
bdev             2150 drivers/block/xen-blkfront.c 	if (!bdev) {
bdev             2155 drivers/block/xen-blkfront.c 	mutex_lock(&bdev->bd_mutex);
bdev             2157 drivers/block/xen-blkfront.c 	if (bdev->bd_openers) {
bdev             2166 drivers/block/xen-blkfront.c 	mutex_unlock(&bdev->bd_mutex);
bdev             2167 drivers/block/xen-blkfront.c 	bdput(bdev);
bdev             2499 drivers/block/xen-blkfront.c 	struct block_device *bdev = NULL;
bdev             2513 drivers/block/xen-blkfront.c 		bdev = bdget_disk(disk, 0);
bdev             2518 drivers/block/xen-blkfront.c 	if (!bdev) {
bdev             2531 drivers/block/xen-blkfront.c 	mutex_lock(&bdev->bd_mutex);
bdev             2536 drivers/block/xen-blkfront.c 		 xbdev->nodename, bdev->bd_openers);
bdev             2538 drivers/block/xen-blkfront.c 	if (info && !bdev->bd_openers) {
bdev             2546 drivers/block/xen-blkfront.c 	mutex_unlock(&bdev->bd_mutex);
bdev             2547 drivers/block/xen-blkfront.c 	bdput(bdev);
bdev             2559 drivers/block/xen-blkfront.c static int blkif_open(struct block_device *bdev, fmode_t mode)
bdev             2561 drivers/block/xen-blkfront.c 	struct gendisk *disk = bdev->bd_disk;
bdev             2590 drivers/block/xen-blkfront.c 	struct block_device *bdev;
bdev             2595 drivers/block/xen-blkfront.c 	bdev = bdget_disk(disk, 0);
bdev             2597 drivers/block/xen-blkfront.c 	if (!bdev) {
bdev             2601 drivers/block/xen-blkfront.c 	if (bdev->bd_openers)
bdev             2614 drivers/block/xen-blkfront.c 		dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
bdev             2623 drivers/block/xen-blkfront.c 		dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
bdev             2630 drivers/block/xen-blkfront.c 	bdput(bdev);
bdev              913 drivers/block/xsysace.c static int ace_open(struct block_device *bdev, fmode_t mode)
bdev              915 drivers/block/xsysace.c 	struct ace_device *ace = bdev->bd_disk->private_data;
bdev              925 drivers/block/xsysace.c 	check_disk_change(bdev);
bdev              950 drivers/block/xsysace.c static int ace_getgeo(struct block_device *bdev, struct hd_geometry *geo)
bdev              952 drivers/block/xsysace.c 	struct ace_device *ace = bdev->bd_disk->private_data;
bdev              149 drivers/block/z2ram.c static int z2_open(struct block_device *bdev, fmode_t mode)
bdev              158 drivers/block/z2ram.c     device = MINOR(bdev->bd_dev);
bdev              393 drivers/block/zram/zram_drv.c 	struct block_device *bdev;
bdev              398 drivers/block/zram/zram_drv.c 	bdev = zram->bdev;
bdev              400 drivers/block/zram/zram_drv.c 		set_blocksize(bdev, zram->old_block_size);
bdev              401 drivers/block/zram/zram_drv.c 	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
bdev              406 drivers/block/zram/zram_drv.c 	zram->bdev = NULL;
bdev              453 drivers/block/zram/zram_drv.c 	struct block_device *bdev = NULL;
bdev              490 drivers/block/zram/zram_drv.c 	bdev = bdgrab(I_BDEV(inode));
bdev              491 drivers/block/zram/zram_drv.c 	err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
bdev              493 drivers/block/zram/zram_drv.c 		bdev = NULL;
bdev              505 drivers/block/zram/zram_drv.c 	old_block_size = block_size(bdev);
bdev              506 drivers/block/zram/zram_drv.c 	err = set_blocksize(bdev, PAGE_SIZE);
bdev              513 drivers/block/zram/zram_drv.c 	zram->bdev = bdev;
bdev              539 drivers/block/zram/zram_drv.c 	if (bdev)
bdev              540 drivers/block/zram/zram_drv.c 		blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
bdev              599 drivers/block/zram/zram_drv.c 	bio_set_dev(bio, zram->bdev);
bdev              712 drivers/block/zram/zram_drv.c 		bio_set_dev(&bio, zram->bdev);
bdev             1608 drivers/block/zram/zram_drv.c static void zram_slot_free_notify(struct block_device *bdev,
bdev             1613 drivers/block/zram/zram_drv.c 	zram = bdev->bd_disk->private_data;
bdev             1625 drivers/block/zram/zram_drv.c static int zram_rw_page(struct block_device *bdev, sector_t sector,
bdev             1635 drivers/block/zram/zram_drv.c 	zram = bdev->bd_disk->private_data;
bdev             1760 drivers/block/zram/zram_drv.c 	struct block_device *bdev;
bdev             1770 drivers/block/zram/zram_drv.c 	bdev = bdget_disk(zram->disk, 0);
bdev             1771 drivers/block/zram/zram_drv.c 	if (!bdev)
bdev             1774 drivers/block/zram/zram_drv.c 	mutex_lock(&bdev->bd_mutex);
bdev             1776 drivers/block/zram/zram_drv.c 	if (bdev->bd_openers || zram->claim) {
bdev             1777 drivers/block/zram/zram_drv.c 		mutex_unlock(&bdev->bd_mutex);
bdev             1778 drivers/block/zram/zram_drv.c 		bdput(bdev);
bdev             1784 drivers/block/zram/zram_drv.c 	mutex_unlock(&bdev->bd_mutex);
bdev             1787 drivers/block/zram/zram_drv.c 	fsync_bdev(bdev);
bdev             1790 drivers/block/zram/zram_drv.c 	bdput(bdev);
bdev             1792 drivers/block/zram/zram_drv.c 	mutex_lock(&bdev->bd_mutex);
bdev             1794 drivers/block/zram/zram_drv.c 	mutex_unlock(&bdev->bd_mutex);
bdev             1799 drivers/block/zram/zram_drv.c static int zram_open(struct block_device *bdev, fmode_t mode)
bdev             1804 drivers/block/zram/zram_drv.c 	WARN_ON(!mutex_is_locked(&bdev->bd_mutex));
bdev             1806 drivers/block/zram/zram_drv.c 	zram = bdev->bd_disk->private_data;
bdev             1972 drivers/block/zram/zram_drv.c 	struct block_device *bdev;
bdev             1974 drivers/block/zram/zram_drv.c 	bdev = bdget_disk(zram->disk, 0);
bdev             1975 drivers/block/zram/zram_drv.c 	if (!bdev)
bdev             1978 drivers/block/zram/zram_drv.c 	mutex_lock(&bdev->bd_mutex);
bdev             1979 drivers/block/zram/zram_drv.c 	if (bdev->bd_openers || zram->claim) {
bdev             1980 drivers/block/zram/zram_drv.c 		mutex_unlock(&bdev->bd_mutex);
bdev             1981 drivers/block/zram/zram_drv.c 		bdput(bdev);
bdev             1986 drivers/block/zram/zram_drv.c 	mutex_unlock(&bdev->bd_mutex);
bdev             1991 drivers/block/zram/zram_drv.c 	fsync_bdev(bdev);
bdev             1993 drivers/block/zram/zram_drv.c 	bdput(bdev);
bdev              120 drivers/block/zram/zram_drv.h 	struct block_device *bdev;
bdev              172 drivers/bluetooth/btmtksdio.c 	struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
bdev              191 drivers/bluetooth/btmtksdio.c 	set_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
bdev              195 drivers/bluetooth/btmtksdio.c 		clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
bdev              208 drivers/bluetooth/btmtksdio.c 	err = wait_on_bit_timeout(&bdev->tx_state, BTMTKSDIO_TX_WAIT_VND_EVT,
bdev              212 drivers/bluetooth/btmtksdio.c 		clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
bdev              218 drivers/bluetooth/btmtksdio.c 		clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
bdev              223 drivers/bluetooth/btmtksdio.c 	wmt_evt = (struct btmtk_hci_wmt_evt *)bdev->evt_skb->data;
bdev              253 drivers/bluetooth/btmtksdio.c 	kfree_skb(bdev->evt_skb);
bdev              254 drivers/bluetooth/btmtksdio.c 	bdev->evt_skb = NULL;
bdev              259 drivers/bluetooth/btmtksdio.c static int btmtksdio_tx_packet(struct btmtksdio_dev *bdev,
bdev              281 drivers/bluetooth/btmtksdio.c 	err = sdio_writesb(bdev->func, MTK_REG_CTDR, skb->data,
bdev              286 drivers/bluetooth/btmtksdio.c 	bdev->hdev->stat.byte_tx += skb->len;
bdev              298 drivers/bluetooth/btmtksdio.c static u32 btmtksdio_drv_own_query(struct btmtksdio_dev *bdev)
bdev              300 drivers/bluetooth/btmtksdio.c 	return sdio_readl(bdev->func, MTK_REG_CHLPCR, NULL);
bdev              305 drivers/bluetooth/btmtksdio.c 	struct btmtksdio_dev *bdev = container_of(work, struct btmtksdio_dev,
bdev              310 drivers/bluetooth/btmtksdio.c 	pm_runtime_get_sync(bdev->dev);
bdev              312 drivers/bluetooth/btmtksdio.c 	sdio_claim_host(bdev->func);
bdev              314 drivers/bluetooth/btmtksdio.c 	while ((skb = skb_dequeue(&bdev->txq))) {
bdev              315 drivers/bluetooth/btmtksdio.c 		err = btmtksdio_tx_packet(bdev, skb);
bdev              317 drivers/bluetooth/btmtksdio.c 			bdev->hdev->stat.err_tx++;
bdev              318 drivers/bluetooth/btmtksdio.c 			skb_queue_head(&bdev->txq, skb);
bdev              323 drivers/bluetooth/btmtksdio.c 	sdio_release_host(bdev->func);
bdev              325 drivers/bluetooth/btmtksdio.c 	pm_runtime_mark_last_busy(bdev->dev);
bdev              326 drivers/bluetooth/btmtksdio.c 	pm_runtime_put_autosuspend(bdev->dev);
bdev              331 drivers/bluetooth/btmtksdio.c 	struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
bdev              345 drivers/bluetooth/btmtksdio.c 	if (test_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state)) {
bdev              346 drivers/bluetooth/btmtksdio.c 		bdev->evt_skb = skb_clone(skb, GFP_KERNEL);
bdev              347 drivers/bluetooth/btmtksdio.c 		if (!bdev->evt_skb) {
bdev              359 drivers/bluetooth/btmtksdio.c 				       &bdev->tx_state)) {
bdev              362 drivers/bluetooth/btmtksdio.c 			wake_up_bit(&bdev->tx_state, BTMTKSDIO_TX_WAIT_VND_EVT);
bdev              369 drivers/bluetooth/btmtksdio.c 	kfree_skb(bdev->evt_skb);
bdev              370 drivers/bluetooth/btmtksdio.c 	bdev->evt_skb = NULL;
bdev              382 drivers/bluetooth/btmtksdio.c static int btmtksdio_rx_packet(struct btmtksdio_dev *bdev, u16 rx_size)
bdev              401 drivers/bluetooth/btmtksdio.c 	err = sdio_readsb(bdev->func, skb->data, MTK_REG_CRDR, rx_size);
bdev              413 drivers/bluetooth/btmtksdio.c 		bt_dev_err(bdev->hdev, "Rx size in sdio header is mismatched ");
bdev              432 drivers/bluetooth/btmtksdio.c 		bt_dev_err(bdev->hdev, "Invalid bt type 0x%02x",
bdev              439 drivers/bluetooth/btmtksdio.c 		bt_dev_err(bdev->hdev, "The size of bt header is mismatched");
bdev              459 drivers/bluetooth/btmtksdio.c 		bt_dev_err(bdev->hdev, "The size of bt payload is mismatched");
bdev              467 drivers/bluetooth/btmtksdio.c 	(&pkts[i])->recv(bdev->hdev, skb);
bdev              469 drivers/bluetooth/btmtksdio.c 	bdev->hdev->stat.byte_rx += rx_size;
bdev              481 drivers/bluetooth/btmtksdio.c 	struct btmtksdio_dev *bdev = sdio_get_drvdata(func);
bdev              491 drivers/bluetooth/btmtksdio.c 	sdio_release_host(bdev->func);
bdev              493 drivers/bluetooth/btmtksdio.c 	pm_runtime_get_sync(bdev->dev);
bdev              495 drivers/bluetooth/btmtksdio.c 	sdio_claim_host(bdev->func);
bdev              514 drivers/bluetooth/btmtksdio.c 		bt_dev_err(bdev->hdev, "CHISR is 0");
bdev              517 drivers/bluetooth/btmtksdio.c 		bt_dev_dbg(bdev->hdev, "Get fw own back");
bdev              520 drivers/bluetooth/btmtksdio.c 		schedule_work(&bdev->tx_work);
bdev              522 drivers/bluetooth/btmtksdio.c 		bt_dev_warn(bdev->hdev, "Tx fifo overflow");
bdev              527 drivers/bluetooth/btmtksdio.c 		if (btmtksdio_rx_packet(bdev, rx_size) < 0)
bdev              528 drivers/bluetooth/btmtksdio.c 			bdev->hdev->stat.err_rx++;
bdev              534 drivers/bluetooth/btmtksdio.c 	pm_runtime_mark_last_busy(bdev->dev);
bdev              535 drivers/bluetooth/btmtksdio.c 	pm_runtime_put_autosuspend(bdev->dev);
bdev              540 drivers/bluetooth/btmtksdio.c 	struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
bdev              544 drivers/bluetooth/btmtksdio.c 	sdio_claim_host(bdev->func);
bdev              546 drivers/bluetooth/btmtksdio.c 	err = sdio_enable_func(bdev->func);
bdev              551 drivers/bluetooth/btmtksdio.c 	sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err);
bdev              555 drivers/bluetooth/btmtksdio.c 	err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
bdev              558 drivers/bluetooth/btmtksdio.c 		bt_dev_err(bdev->hdev, "Cannot get ownership from device");
bdev              563 drivers/bluetooth/btmtksdio.c 	sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, &err);
bdev              567 drivers/bluetooth/btmtksdio.c 	sdio_writel(bdev->func, 0, MTK_REG_CHIER, &err);
bdev              571 drivers/bluetooth/btmtksdio.c 	err = sdio_claim_irq(bdev->func, btmtksdio_interrupt);
bdev              575 drivers/bluetooth/btmtksdio.c 	err = sdio_set_block_size(bdev->func, MTK_SDIO_BLOCK_SIZE);
bdev              582 drivers/bluetooth/btmtksdio.c 	sdio_writel(bdev->func, SDIO_INT_CTL | SDIO_RE_INIT_EN,
bdev              588 drivers/bluetooth/btmtksdio.c 	sdio_writel(bdev->func, C_INT_CLR_CTRL, MTK_REG_CHCR, &err);
bdev              593 drivers/bluetooth/btmtksdio.c 	sdio_writel(bdev->func, RX_DONE_INT | TX_EMPTY | TX_FIFO_OVERFLOW,
bdev              599 drivers/bluetooth/btmtksdio.c 	sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, &err);
bdev              603 drivers/bluetooth/btmtksdio.c 	sdio_release_host(bdev->func);
bdev              608 drivers/bluetooth/btmtksdio.c 	sdio_release_irq(bdev->func);
bdev              611 drivers/bluetooth/btmtksdio.c 	sdio_disable_func(bdev->func);
bdev              614 drivers/bluetooth/btmtksdio.c 	sdio_release_host(bdev->func);
bdev              621 drivers/bluetooth/btmtksdio.c 	struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
bdev              625 drivers/bluetooth/btmtksdio.c 	sdio_claim_host(bdev->func);
bdev              628 drivers/bluetooth/btmtksdio.c 	sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL);
bdev              630 drivers/bluetooth/btmtksdio.c 	sdio_release_irq(bdev->func);
bdev              633 drivers/bluetooth/btmtksdio.c 	sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, NULL);
bdev              635 drivers/bluetooth/btmtksdio.c 	err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
bdev              638 drivers/bluetooth/btmtksdio.c 		bt_dev_err(bdev->hdev, "Cannot return ownership to device");
bdev              640 drivers/bluetooth/btmtksdio.c 	sdio_disable_func(bdev->func);
bdev              642 drivers/bluetooth/btmtksdio.c 	sdio_release_host(bdev->func);
bdev              649 drivers/bluetooth/btmtksdio.c 	struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
bdev              651 drivers/bluetooth/btmtksdio.c 	skb_queue_purge(&bdev->txq);
bdev              653 drivers/bluetooth/btmtksdio.c 	cancel_work_sync(&bdev->tx_work);
bdev              758 drivers/bluetooth/btmtksdio.c 	struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
bdev              788 drivers/bluetooth/btmtksdio.c 	err = mtk_setup_firmware(hdev, bdev->data->fwname);
bdev              844 drivers/bluetooth/btmtksdio.c 	pm_runtime_set_autosuspend_delay(bdev->dev,
bdev              846 drivers/bluetooth/btmtksdio.c 	pm_runtime_use_autosuspend(bdev->dev);
bdev              848 drivers/bluetooth/btmtksdio.c 	err = pm_runtime_set_active(bdev->dev);
bdev              855 drivers/bluetooth/btmtksdio.c 	pm_runtime_forbid(bdev->dev);
bdev              856 drivers/bluetooth/btmtksdio.c 	pm_runtime_enable(bdev->dev);
bdev              859 drivers/bluetooth/btmtksdio.c 		pm_runtime_allow(bdev->dev);
bdev              868 drivers/bluetooth/btmtksdio.c 	struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
bdev              876 drivers/bluetooth/btmtksdio.c 	pm_runtime_get_sync(bdev->dev);
bdev              891 drivers/bluetooth/btmtksdio.c 	pm_runtime_put_noidle(bdev->dev);
bdev              892 drivers/bluetooth/btmtksdio.c 	pm_runtime_disable(bdev->dev);
bdev              899 drivers/bluetooth/btmtksdio.c 	struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
bdev              918 drivers/bluetooth/btmtksdio.c 	skb_queue_tail(&bdev->txq, skb);
bdev              920 drivers/bluetooth/btmtksdio.c 	schedule_work(&bdev->tx_work);
bdev              928 drivers/bluetooth/btmtksdio.c 	struct btmtksdio_dev *bdev;
bdev              932 drivers/bluetooth/btmtksdio.c 	bdev = devm_kzalloc(&func->dev, sizeof(*bdev), GFP_KERNEL);
bdev              933 drivers/bluetooth/btmtksdio.c 	if (!bdev)
bdev              936 drivers/bluetooth/btmtksdio.c 	bdev->data = (void *)id->driver_data;
bdev              937 drivers/bluetooth/btmtksdio.c 	if (!bdev->data)
bdev              940 drivers/bluetooth/btmtksdio.c 	bdev->dev = &func->dev;
bdev              941 drivers/bluetooth/btmtksdio.c 	bdev->func = func;
bdev              943 drivers/bluetooth/btmtksdio.c 	INIT_WORK(&bdev->tx_work, btmtksdio_tx_work);
bdev              944 drivers/bluetooth/btmtksdio.c 	skb_queue_head_init(&bdev->txq);
bdev              953 drivers/bluetooth/btmtksdio.c 	bdev->hdev = hdev;
bdev              956 drivers/bluetooth/btmtksdio.c 	hci_set_drvdata(hdev, bdev);
bdev              976 drivers/bluetooth/btmtksdio.c 	sdio_set_drvdata(func, bdev);
bdev              983 drivers/bluetooth/btmtksdio.c 	if (pm_runtime_enabled(bdev->dev))
bdev              984 drivers/bluetooth/btmtksdio.c 		pm_runtime_disable(bdev->dev);
bdev              995 drivers/bluetooth/btmtksdio.c 	pm_runtime_put_noidle(bdev->dev);
bdev             1002 drivers/bluetooth/btmtksdio.c 	struct btmtksdio_dev *bdev = sdio_get_drvdata(func);
bdev             1005 drivers/bluetooth/btmtksdio.c 	if (!bdev)
bdev             1009 drivers/bluetooth/btmtksdio.c 	pm_runtime_get_noresume(bdev->dev);
bdev             1011 drivers/bluetooth/btmtksdio.c 	hdev = bdev->hdev;
bdev             1022 drivers/bluetooth/btmtksdio.c 	struct btmtksdio_dev *bdev;
bdev             1026 drivers/bluetooth/btmtksdio.c 	bdev = sdio_get_drvdata(func);
bdev             1027 drivers/bluetooth/btmtksdio.c 	if (!bdev)
bdev             1030 drivers/bluetooth/btmtksdio.c 	sdio_claim_host(bdev->func);
bdev             1032 drivers/bluetooth/btmtksdio.c 	sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, &err);
bdev             1036 drivers/bluetooth/btmtksdio.c 	err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
bdev             1039 drivers/bluetooth/btmtksdio.c 	bt_dev_info(bdev->hdev, "status (%d) return ownership to device", err);
bdev             1041 drivers/bluetooth/btmtksdio.c 	sdio_release_host(bdev->func);
bdev             1049 drivers/bluetooth/btmtksdio.c 	struct btmtksdio_dev *bdev;
bdev             1053 drivers/bluetooth/btmtksdio.c 	bdev = sdio_get_drvdata(func);
bdev             1054 drivers/bluetooth/btmtksdio.c 	if (!bdev)
bdev             1057 drivers/bluetooth/btmtksdio.c 	sdio_claim_host(bdev->func);
bdev             1059 drivers/bluetooth/btmtksdio.c 	sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err);
bdev             1063 drivers/bluetooth/btmtksdio.c 	err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
bdev             1066 drivers/bluetooth/btmtksdio.c 	bt_dev_info(bdev->hdev, "status (%d) get ownership from device", err);
bdev             1068 drivers/bluetooth/btmtksdio.c 	sdio_release_host(bdev->func);
bdev              144 drivers/bluetooth/btmtkuart.c #define btmtkuart_is_standalone(bdev)	\
bdev              145 drivers/bluetooth/btmtkuart.c 	((bdev)->data->flags & BTMTKUART_FLAG_STANDALONE_HW)
bdev              146 drivers/bluetooth/btmtkuart.c #define btmtkuart_is_builtin_soc(bdev)	\
bdev              147 drivers/bluetooth/btmtkuart.c 	!((bdev)->data->flags & BTMTKUART_FLAG_STANDALONE_HW)
bdev              152 drivers/bluetooth/btmtkuart.c 	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
bdev              171 drivers/bluetooth/btmtkuart.c 	set_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
bdev              175 drivers/bluetooth/btmtkuart.c 		clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
bdev              188 drivers/bluetooth/btmtkuart.c 	err = wait_on_bit_timeout(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT,
bdev              192 drivers/bluetooth/btmtkuart.c 		clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
bdev              198 drivers/bluetooth/btmtkuart.c 		clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
bdev              203 drivers/bluetooth/btmtkuart.c 	wmt_evt = (struct btmtk_hci_wmt_evt *)bdev->evt_skb->data;
bdev              233 drivers/bluetooth/btmtkuart.c 	kfree_skb(bdev->evt_skb);
bdev              234 drivers/bluetooth/btmtkuart.c 	bdev->evt_skb = NULL;
bdev              317 drivers/bluetooth/btmtkuart.c 	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
bdev              331 drivers/bluetooth/btmtkuart.c 	if (test_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state)) {
bdev              332 drivers/bluetooth/btmtkuart.c 		bdev->evt_skb = skb_clone(skb, GFP_KERNEL);
bdev              333 drivers/bluetooth/btmtkuart.c 		if (!bdev->evt_skb) {
bdev              345 drivers/bluetooth/btmtkuart.c 				       &bdev->tx_state)) {
bdev              348 drivers/bluetooth/btmtkuart.c 			wake_up_bit(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT);
bdev              355 drivers/bluetooth/btmtkuart.c 	kfree_skb(bdev->evt_skb);
bdev              356 drivers/bluetooth/btmtkuart.c 	bdev->evt_skb = NULL;
bdev              370 drivers/bluetooth/btmtkuart.c 	struct btmtkuart_dev *bdev = container_of(work, struct btmtkuart_dev,
bdev              372 drivers/bluetooth/btmtkuart.c 	struct serdev_device *serdev = bdev->serdev;
bdev              373 drivers/bluetooth/btmtkuart.c 	struct hci_dev *hdev = bdev->hdev;
bdev              376 drivers/bluetooth/btmtkuart.c 		clear_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state);
bdev              379 drivers/bluetooth/btmtkuart.c 			struct sk_buff *skb = skb_dequeue(&bdev->txq);
bdev              391 drivers/bluetooth/btmtkuart.c 				skb_queue_head(&bdev->txq, skb);
bdev              410 drivers/bluetooth/btmtkuart.c 		if (!test_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state))
bdev              414 drivers/bluetooth/btmtkuart.c 	clear_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state);
bdev              417 drivers/bluetooth/btmtkuart.c static void btmtkuart_tx_wakeup(struct btmtkuart_dev *bdev)
bdev              419 drivers/bluetooth/btmtkuart.c 	if (test_and_set_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state))
bdev              420 drivers/bluetooth/btmtkuart.c 		set_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state);
bdev              422 drivers/bluetooth/btmtkuart.c 	schedule_work(&bdev->tx_work);
bdev              426 drivers/bluetooth/btmtkuart.c mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count,
bdev              432 drivers/bluetooth/btmtkuart.c 	if (!bdev->stp_dlen && bdev->stp_cursor >= 6)
bdev              433 drivers/bluetooth/btmtkuart.c 		bdev->stp_cursor = 0;
bdev              436 drivers/bluetooth/btmtkuart.c 	while (bdev->stp_cursor < 6 && count > 0) {
bdev              437 drivers/bluetooth/btmtkuart.c 		bdev->stp_pad[bdev->stp_cursor] = *data;
bdev              438 drivers/bluetooth/btmtkuart.c 		bdev->stp_cursor++;
bdev              444 drivers/bluetooth/btmtkuart.c 	if (!bdev->stp_dlen && bdev->stp_cursor >= 6) {
bdev              445 drivers/bluetooth/btmtkuart.c 		shdr = (struct mtk_stp_hdr *)&bdev->stp_pad[2];
bdev              446 drivers/bluetooth/btmtkuart.c 		bdev->stp_dlen = be16_to_cpu(shdr->dlen) & 0x0fff;
bdev              449 drivers/bluetooth/btmtkuart.c 		if (shdr->prefix != 0x80 || bdev->stp_dlen > 2048) {
bdev              450 drivers/bluetooth/btmtkuart.c 			bt_dev_err(bdev->hdev, "stp format unexpect (%d, %d)",
bdev              451 drivers/bluetooth/btmtkuart.c 				   shdr->prefix, bdev->stp_dlen);
bdev              452 drivers/bluetooth/btmtkuart.c 			bdev->stp_cursor = 2;
bdev              453 drivers/bluetooth/btmtkuart.c 			bdev->stp_dlen = 0;
bdev              462 drivers/bluetooth/btmtkuart.c 	*sz_h4 = min_t(int, count, bdev->stp_dlen);
bdev              465 drivers/bluetooth/btmtkuart.c 	bdev->stp_dlen -= *sz_h4;
bdev              473 drivers/bluetooth/btmtkuart.c 	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
bdev              494 drivers/bluetooth/btmtkuart.c 		p_h4 = mtk_stp_split(bdev, p_left, sz_left, &sz_h4);
bdev              502 drivers/bluetooth/btmtkuart.c 		bdev->rx_skb = h4_recv_buf(bdev->hdev, bdev->rx_skb, p_h4,
bdev              505 drivers/bluetooth/btmtkuart.c 		if (IS_ERR(bdev->rx_skb)) {
bdev              506 drivers/bluetooth/btmtkuart.c 			err = PTR_ERR(bdev->rx_skb);
bdev              507 drivers/bluetooth/btmtkuart.c 			bt_dev_err(bdev->hdev,
bdev              509 drivers/bluetooth/btmtkuart.c 			bdev->rx_skb = NULL;
bdev              523 drivers/bluetooth/btmtkuart.c 	struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
bdev              526 drivers/bluetooth/btmtkuart.c 	err = btmtkuart_recv(bdev->hdev, data, count);
bdev              530 drivers/bluetooth/btmtkuart.c 	bdev->hdev->stat.byte_rx += count;
bdev              537 drivers/bluetooth/btmtkuart.c 	struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
bdev              539 drivers/bluetooth/btmtkuart.c 	btmtkuart_tx_wakeup(bdev);
bdev              549 drivers/bluetooth/btmtkuart.c 	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
bdev              553 drivers/bluetooth/btmtkuart.c 	err = serdev_device_open(bdev->serdev);
bdev              556 drivers/bluetooth/btmtkuart.c 			   dev_name(&bdev->serdev->dev));
bdev              560 drivers/bluetooth/btmtkuart.c 	if (btmtkuart_is_standalone(bdev)) {
bdev              561 drivers/bluetooth/btmtkuart.c 		if (bdev->curr_speed != bdev->desired_speed)
bdev              562 drivers/bluetooth/btmtkuart.c 			err = serdev_device_set_baudrate(bdev->serdev,
bdev              565 drivers/bluetooth/btmtkuart.c 			err = serdev_device_set_baudrate(bdev->serdev,
bdev              566 drivers/bluetooth/btmtkuart.c 							 bdev->desired_speed);
bdev              570 drivers/bluetooth/btmtkuart.c 				   dev_name(&bdev->serdev->dev));
bdev              574 drivers/bluetooth/btmtkuart.c 		serdev_device_set_flow_control(bdev->serdev, false);
bdev              577 drivers/bluetooth/btmtkuart.c 	bdev->stp_cursor = 2;
bdev              578 drivers/bluetooth/btmtkuart.c 	bdev->stp_dlen = 0;
bdev              580 drivers/bluetooth/btmtkuart.c 	dev = &bdev->serdev->dev;
bdev              590 drivers/bluetooth/btmtkuart.c 	err = clk_prepare_enable(bdev->clk);
bdev              601 drivers/bluetooth/btmtkuart.c 	serdev_device_close(bdev->serdev);
bdev              608 drivers/bluetooth/btmtkuart.c 	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
bdev              609 drivers/bluetooth/btmtkuart.c 	struct device *dev = &bdev->serdev->dev;
bdev              612 drivers/bluetooth/btmtkuart.c 	clk_disable_unprepare(bdev->clk);
bdev              616 drivers/bluetooth/btmtkuart.c 	serdev_device_close(bdev->serdev);
bdev              623 drivers/bluetooth/btmtkuart.c 	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
bdev              626 drivers/bluetooth/btmtkuart.c 	serdev_device_write_flush(bdev->serdev);
bdev              627 drivers/bluetooth/btmtkuart.c 	skb_queue_purge(&bdev->txq);
bdev              629 drivers/bluetooth/btmtkuart.c 	cancel_work_sync(&bdev->tx_work);
bdev              631 drivers/bluetooth/btmtkuart.c 	kfree_skb(bdev->rx_skb);
bdev              632 drivers/bluetooth/btmtkuart.c 	bdev->rx_skb = NULL;
bdev              634 drivers/bluetooth/btmtkuart.c 	bdev->stp_cursor = 2;
bdev              635 drivers/bluetooth/btmtkuart.c 	bdev->stp_dlen = 0;
bdev              664 drivers/bluetooth/btmtkuart.c 	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
bdev              673 drivers/bluetooth/btmtkuart.c 	baudrate = cpu_to_le32(bdev->desired_speed);
bdev              686 drivers/bluetooth/btmtkuart.c 	err = serdev_device_set_baudrate(bdev->serdev,
bdev              687 drivers/bluetooth/btmtkuart.c 					 bdev->desired_speed);
bdev              694 drivers/bluetooth/btmtkuart.c 	serdev_device_set_flow_control(bdev->serdev, false);
bdev              698 drivers/bluetooth/btmtkuart.c 	err = serdev_device_write(bdev->serdev, &param, sizeof(param),
bdev              703 drivers/bluetooth/btmtkuart.c 	serdev_device_wait_until_sent(bdev->serdev, 0);
bdev              722 drivers/bluetooth/btmtkuart.c 	bdev->curr_speed = bdev->desired_speed;
bdev              729 drivers/bluetooth/btmtkuart.c 	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
bdev              743 drivers/bluetooth/btmtkuart.c 	if (test_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state)) {
bdev              756 drivers/bluetooth/btmtkuart.c 		clear_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state);
bdev              759 drivers/bluetooth/btmtkuart.c 	if (btmtkuart_is_standalone(bdev))
bdev              781 drivers/bluetooth/btmtkuart.c 	err = mtk_setup_firmware(hdev, bdev->data->fwname);
bdev              866 drivers/bluetooth/btmtkuart.c 	struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
bdev              892 drivers/bluetooth/btmtkuart.c 	skb_queue_tail(&bdev->txq, skb);
bdev              894 drivers/bluetooth/btmtkuart.c 	btmtkuart_tx_wakeup(bdev);
bdev              900 drivers/bluetooth/btmtkuart.c 	struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
bdev              905 drivers/bluetooth/btmtkuart.c 	if (btmtkuart_is_standalone(bdev)) {
bdev              908 drivers/bluetooth/btmtkuart.c 		bdev->desired_speed = speed;
bdev              910 drivers/bluetooth/btmtkuart.c 		bdev->vcc = devm_regulator_get(&serdev->dev, "vcc");
bdev              911 drivers/bluetooth/btmtkuart.c 		if (IS_ERR(bdev->vcc)) {
bdev              912 drivers/bluetooth/btmtkuart.c 			err = PTR_ERR(bdev->vcc);
bdev              916 drivers/bluetooth/btmtkuart.c 		bdev->osc = devm_clk_get_optional(&serdev->dev, "osc");
bdev              917 drivers/bluetooth/btmtkuart.c 		if (IS_ERR(bdev->osc)) {
bdev              918 drivers/bluetooth/btmtkuart.c 			err = PTR_ERR(bdev->osc);
bdev              922 drivers/bluetooth/btmtkuart.c 		bdev->boot = devm_gpiod_get_optional(&serdev->dev, "boot",
bdev              924 drivers/bluetooth/btmtkuart.c 		if (IS_ERR(bdev->boot)) {
bdev              925 drivers/bluetooth/btmtkuart.c 			err = PTR_ERR(bdev->boot);
bdev              929 drivers/bluetooth/btmtkuart.c 		bdev->pinctrl = devm_pinctrl_get(&serdev->dev);
bdev              930 drivers/bluetooth/btmtkuart.c 		if (IS_ERR(bdev->pinctrl)) {
bdev              931 drivers/bluetooth/btmtkuart.c 			err = PTR_ERR(bdev->pinctrl);
bdev              935 drivers/bluetooth/btmtkuart.c 		bdev->pins_boot = pinctrl_lookup_state(bdev->pinctrl,
bdev              937 drivers/bluetooth/btmtkuart.c 		if (IS_ERR(bdev->pins_boot) && !bdev->boot) {
bdev              938 drivers/bluetooth/btmtkuart.c 			err = PTR_ERR(bdev->pins_boot);
bdev              944 drivers/bluetooth/btmtkuart.c 		bdev->pins_runtime = pinctrl_lookup_state(bdev->pinctrl,
bdev              946 drivers/bluetooth/btmtkuart.c 		if (IS_ERR(bdev->pins_runtime)) {
bdev              947 drivers/bluetooth/btmtkuart.c 			err = PTR_ERR(bdev->pins_runtime);
bdev              951 drivers/bluetooth/btmtkuart.c 		bdev->reset = devm_gpiod_get_optional(&serdev->dev, "reset",
bdev              953 drivers/bluetooth/btmtkuart.c 		if (IS_ERR(bdev->reset)) {
bdev              954 drivers/bluetooth/btmtkuart.c 			err = PTR_ERR(bdev->reset);
bdev              957 drivers/bluetooth/btmtkuart.c 	} else if (btmtkuart_is_builtin_soc(bdev)) {
bdev              958 drivers/bluetooth/btmtkuart.c 		bdev->clk = devm_clk_get(&serdev->dev, "ref");
bdev              959 drivers/bluetooth/btmtkuart.c 		if (IS_ERR(bdev->clk))
bdev              960 drivers/bluetooth/btmtkuart.c 			return PTR_ERR(bdev->clk);
bdev              968 drivers/bluetooth/btmtkuart.c 	struct btmtkuart_dev *bdev;
bdev              972 drivers/bluetooth/btmtkuart.c 	bdev = devm_kzalloc(&serdev->dev, sizeof(*bdev), GFP_KERNEL);
bdev              973 drivers/bluetooth/btmtkuart.c 	if (!bdev)
bdev              976 drivers/bluetooth/btmtkuart.c 	bdev->data = of_device_get_match_data(&serdev->dev);
bdev              977 drivers/bluetooth/btmtkuart.c 	if (!bdev->data)
bdev              980 drivers/bluetooth/btmtkuart.c 	bdev->serdev = serdev;
bdev              981 drivers/bluetooth/btmtkuart.c 	serdev_device_set_drvdata(serdev, bdev);
bdev              989 drivers/bluetooth/btmtkuart.c 	INIT_WORK(&bdev->tx_work, btmtkuart_tx_work);
bdev              990 drivers/bluetooth/btmtkuart.c 	skb_queue_head_init(&bdev->txq);
bdev              999 drivers/bluetooth/btmtkuart.c 	bdev->hdev = hdev;
bdev             1002 drivers/bluetooth/btmtkuart.c 	hci_set_drvdata(hdev, bdev);
bdev             1015 drivers/bluetooth/btmtkuart.c 	if (btmtkuart_is_standalone(bdev)) {
bdev             1016 drivers/bluetooth/btmtkuart.c 		err = clk_prepare_enable(bdev->osc);
bdev             1020 drivers/bluetooth/btmtkuart.c 		if (bdev->boot) {
bdev             1021 drivers/bluetooth/btmtkuart.c 			gpiod_set_value_cansleep(bdev->boot, 1);
bdev             1026 drivers/bluetooth/btmtkuart.c 			pinctrl_select_state(bdev->pinctrl, bdev->pins_boot);
bdev             1030 drivers/bluetooth/btmtkuart.c 		err = regulator_enable(bdev->vcc);
bdev             1032 drivers/bluetooth/btmtkuart.c 			clk_disable_unprepare(bdev->osc);
bdev             1039 drivers/bluetooth/btmtkuart.c 		if (bdev->reset) {
bdev             1040 drivers/bluetooth/btmtkuart.c 			gpiod_set_value_cansleep(bdev->reset, 1);
bdev             1042 drivers/bluetooth/btmtkuart.c 			gpiod_set_value_cansleep(bdev->reset, 0);
bdev             1050 drivers/bluetooth/btmtkuart.c 		if (bdev->boot)
bdev             1051 drivers/bluetooth/btmtkuart.c 			devm_gpiod_put(&serdev->dev, bdev->boot);
bdev             1053 drivers/bluetooth/btmtkuart.c 		pinctrl_select_state(bdev->pinctrl, bdev->pins_runtime);
bdev             1060 drivers/bluetooth/btmtkuart.c 		set_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state);
bdev             1073 drivers/bluetooth/btmtkuart.c 	if (btmtkuart_is_standalone(bdev))
bdev             1074 drivers/bluetooth/btmtkuart.c 		regulator_disable(bdev->vcc);
bdev             1081 drivers/bluetooth/btmtkuart.c 	struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
bdev             1082 drivers/bluetooth/btmtkuart.c 	struct hci_dev *hdev = bdev->hdev;
bdev             1084 drivers/bluetooth/btmtkuart.c 	if (btmtkuart_is_standalone(bdev)) {
bdev             1085 drivers/bluetooth/btmtkuart.c 		regulator_disable(bdev->vcc);
bdev             1086 drivers/bluetooth/btmtkuart.c 		clk_disable_unprepare(bdev->osc);
bdev              286 drivers/bluetooth/hci_bcm.c 	struct bcm_device *bdev = data;
bdev              288 drivers/bluetooth/hci_bcm.c 	bt_dev_dbg(bdev, "Host wake IRQ");
bdev              290 drivers/bluetooth/hci_bcm.c 	pm_runtime_get(bdev->dev);
bdev              291 drivers/bluetooth/hci_bcm.c 	pm_runtime_mark_last_busy(bdev->dev);
bdev              292 drivers/bluetooth/hci_bcm.c 	pm_runtime_put_autosuspend(bdev->dev);
bdev              299 drivers/bluetooth/hci_bcm.c 	struct bcm_device *bdev = bcm->dev;
bdev              303 drivers/bluetooth/hci_bcm.c 	if (!bcm_device_exists(bdev)) {
bdev              308 drivers/bluetooth/hci_bcm.c 	if (bdev->irq <= 0) {
bdev              313 drivers/bluetooth/hci_bcm.c 	err = devm_request_irq(bdev->dev, bdev->irq, bcm_host_wake,
bdev              314 drivers/bluetooth/hci_bcm.c 			       bdev->irq_active_low ? IRQF_TRIGGER_FALLING :
bdev              316 drivers/bluetooth/hci_bcm.c 			       "host_wake", bdev);
bdev              318 drivers/bluetooth/hci_bcm.c 		bdev->irq = err;
bdev              322 drivers/bluetooth/hci_bcm.c 	device_init_wakeup(bdev->dev, true);
bdev              324 drivers/bluetooth/hci_bcm.c 	pm_runtime_set_autosuspend_delay(bdev->dev,
bdev              326 drivers/bluetooth/hci_bcm.c 	pm_runtime_use_autosuspend(bdev->dev);
bdev              327 drivers/bluetooth/hci_bcm.c 	pm_runtime_set_active(bdev->dev);
bdev              328 drivers/bluetooth/hci_bcm.c 	pm_runtime_enable(bdev->dev);
bdev              472 drivers/bluetooth/hci_bcm.c 	struct bcm_device *bdev = NULL;
bdev              481 drivers/bluetooth/hci_bcm.c 		bdev = serdev_device_get_drvdata(hu->serdev);
bdev              483 drivers/bluetooth/hci_bcm.c 		bdev = bcm->dev;
bdev              485 drivers/bluetooth/hci_bcm.c 		bdev->hu = NULL;
bdev              489 drivers/bluetooth/hci_bcm.c 	if (bdev) {
bdev              490 drivers/bluetooth/hci_bcm.c 		if (IS_ENABLED(CONFIG_PM) && bdev->irq > 0) {
bdev              491 drivers/bluetooth/hci_bcm.c 			devm_free_irq(bdev->dev, bdev->irq, bdev);
bdev              492 drivers/bluetooth/hci_bcm.c 			device_init_wakeup(bdev->dev, false);
bdev              493 drivers/bluetooth/hci_bcm.c 			pm_runtime_disable(bdev->dev);
bdev              496 drivers/bluetooth/hci_bcm.c 		err = bcm_gpio_set_power(bdev, false);
bdev              500 drivers/bluetooth/hci_bcm.c 			pm_runtime_set_suspended(bdev->dev);
bdev              673 drivers/bluetooth/hci_bcm.c 	struct bcm_device *bdev = NULL;
bdev              678 drivers/bluetooth/hci_bcm.c 		bdev = bcm->dev;
bdev              679 drivers/bluetooth/hci_bcm.c 		pm_runtime_get_sync(bdev->dev);
bdev              685 drivers/bluetooth/hci_bcm.c 	if (bdev) {
bdev              686 drivers/bluetooth/hci_bcm.c 		pm_runtime_mark_last_busy(bdev->dev);
bdev              687 drivers/bluetooth/hci_bcm.c 		pm_runtime_put_autosuspend(bdev->dev);
bdev              698 drivers/bluetooth/hci_bcm.c 	struct bcm_device *bdev = dev_get_drvdata(dev);
bdev              701 drivers/bluetooth/hci_bcm.c 	bt_dev_dbg(bdev, "");
bdev              703 drivers/bluetooth/hci_bcm.c 	if (!bdev->is_suspended && bdev->hu) {
bdev              704 drivers/bluetooth/hci_bcm.c 		hci_uart_set_flow_control(bdev->hu, true);
bdev              707 drivers/bluetooth/hci_bcm.c 		bdev->is_suspended = true;
bdev              711 drivers/bluetooth/hci_bcm.c 	err = bdev->set_device_wakeup(bdev, false);
bdev              713 drivers/bluetooth/hci_bcm.c 		if (bdev->is_suspended && bdev->hu) {
bdev              714 drivers/bluetooth/hci_bcm.c 			bdev->is_suspended = false;
bdev              715 drivers/bluetooth/hci_bcm.c 			hci_uart_set_flow_control(bdev->hu, false);
bdev              720 drivers/bluetooth/hci_bcm.c 	bt_dev_dbg(bdev, "suspend, delaying 15 ms");
bdev              728 drivers/bluetooth/hci_bcm.c 	struct bcm_device *bdev = dev_get_drvdata(dev);
bdev              731 drivers/bluetooth/hci_bcm.c 	bt_dev_dbg(bdev, "");
bdev              733 drivers/bluetooth/hci_bcm.c 	err = bdev->set_device_wakeup(bdev, true);
bdev              739 drivers/bluetooth/hci_bcm.c 	bt_dev_dbg(bdev, "resume, delaying 15 ms");
bdev              743 drivers/bluetooth/hci_bcm.c 	if (bdev->is_suspended && bdev->hu) {
bdev              744 drivers/bluetooth/hci_bcm.c 		bdev->is_suspended = false;
bdev              746 drivers/bluetooth/hci_bcm.c 		hci_uart_set_flow_control(bdev->hu, false);
bdev              757 drivers/bluetooth/hci_bcm.c 	struct bcm_device *bdev = dev_get_drvdata(dev);
bdev              760 drivers/bluetooth/hci_bcm.c 	bt_dev_dbg(bdev, "suspend: is_suspended %d", bdev->is_suspended);
bdev              770 drivers/bluetooth/hci_bcm.c 	if (!bdev->hu)
bdev              776 drivers/bluetooth/hci_bcm.c 	if (device_may_wakeup(dev) && bdev->irq > 0) {
bdev              777 drivers/bluetooth/hci_bcm.c 		error = enable_irq_wake(bdev->irq);
bdev              779 drivers/bluetooth/hci_bcm.c 			bt_dev_dbg(bdev, "BCM irq: enabled");
bdev              791 drivers/bluetooth/hci_bcm.c 	struct bcm_device *bdev = dev_get_drvdata(dev);
bdev              794 drivers/bluetooth/hci_bcm.c 	bt_dev_dbg(bdev, "resume: is_suspended %d", bdev->is_suspended);
bdev              804 drivers/bluetooth/hci_bcm.c 	if (!bdev->hu)
bdev              807 drivers/bluetooth/hci_bcm.c 	if (device_may_wakeup(dev) && bdev->irq > 0) {
bdev              808 drivers/bluetooth/hci_bcm.c 		disable_irq_wake(bdev->irq);
bdev              809 drivers/bluetooth/hci_bcm.c 		bt_dev_dbg(bdev, "BCM irq: disabled");
bdev             1111 drivers/bluetooth/hci_bcm.c static int bcm_of_probe(struct bcm_device *bdev)
bdev             1113 drivers/bluetooth/hci_bcm.c 	device_property_read_u32(bdev->dev, "max-speed", &bdev->oper_speed);
bdev             1155 drivers/cdrom/cdrom.c int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
bdev             2472 drivers/cdrom/cdrom.c 		struct block_device *bdev)
bdev             2480 drivers/cdrom/cdrom.c 	invalidate_bdev(bdev);
bdev             3329 drivers/cdrom/cdrom.c int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
bdev             3338 drivers/cdrom/cdrom.c 	ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
bdev             3362 drivers/cdrom/cdrom.c 		return cdrom_ioctl_reset(cdi, bdev);
bdev              478 drivers/cdrom/gdrom.c static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
bdev              482 drivers/cdrom/gdrom.c 	check_disk_change(bdev);
bdev              485 drivers/cdrom/gdrom.c 	ret = cdrom_open(gd.cd_info, bdev, mode);
bdev              503 drivers/cdrom/gdrom.c static int gdrom_bdops_ioctl(struct block_device *bdev, fmode_t mode,
bdev              509 drivers/cdrom/gdrom.c 	ret = cdrom_ioctl(gd.cd_info, bdev, mode, cmd, arg);
bdev               58 drivers/char/raw.c 	struct block_device *bdev;
bdev               71 drivers/char/raw.c 	bdev = raw_devices[minor].binding;
bdev               73 drivers/char/raw.c 	if (!bdev)
bdev               75 drivers/char/raw.c 	bdgrab(bdev);
bdev               76 drivers/char/raw.c 	err = blkdev_get(bdev, filp->f_mode | FMODE_EXCL, raw_open);
bdev               79 drivers/char/raw.c 	err = set_blocksize(bdev, bdev_logical_block_size(bdev));
bdev               83 drivers/char/raw.c 	filp->f_mapping = bdev->bd_inode->i_mapping;
bdev               86 drivers/char/raw.c 			bdev->bd_inode->i_mapping;
bdev               87 drivers/char/raw.c 	filp->private_data = bdev;
bdev               92 drivers/char/raw.c 	blkdev_put(bdev, filp->f_mode | FMODE_EXCL);
bdev              105 drivers/char/raw.c 	struct block_device *bdev;
bdev              108 drivers/char/raw.c 	bdev = raw_devices[minor].binding;
bdev              114 drivers/char/raw.c 	blkdev_put(bdev, filp->f_mode | FMODE_EXCL);
bdev              124 drivers/char/raw.c 	struct block_device *bdev = filp->private_data;
bdev              125 drivers/char/raw.c 	return blkdev_ioctl(bdev, 0, command, arg);
bdev              191 drivers/char/raw.c 	struct block_device *bdev;
bdev              199 drivers/char/raw.c 	bdev = rawdev->binding;
bdev              200 drivers/char/raw.c 	*dev = bdev ? bdev->bd_dev : 0;
bdev               46 drivers/dax/super.c int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size,
bdev               49 drivers/dax/super.c 	phys_addr_t phys_off = (get_start_sect(bdev) + sector) * 512;
bdev               60 drivers/dax/super.c struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
bdev               62 drivers/dax/super.c 	if (!blk_queue_dax(bdev->bd_queue))
bdev               64 drivers/dax/super.c 	return fs_dax_get_by_host(bdev->bd_disk->disk_name);
bdev               70 drivers/dax/super.c 		struct block_device *bdev, int blocksize, sector_t start,
bdev               84 drivers/dax/super.c 				bdevname(bdev, buf));
bdev               88 drivers/dax/super.c 	err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff);
bdev               91 drivers/dax/super.c 				bdevname(bdev, buf));
bdev               96 drivers/dax/super.c 	err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end);
bdev               99 drivers/dax/super.c 				bdevname(bdev, buf));
bdev              110 drivers/dax/super.c 				bdevname(bdev, buf), len < 1 ? len : len2);
bdev              143 drivers/dax/super.c 				bdevname(bdev, buf));
bdev              160 drivers/dax/super.c bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
bdev              168 drivers/dax/super.c 	q = bdev_get_queue(bdev);
bdev              171 drivers/dax/super.c 				bdevname(bdev, buf));
bdev              175 drivers/dax/super.c 	dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
bdev              178 drivers/dax/super.c 				bdevname(bdev, buf));
bdev              183 drivers/dax/super.c 	ret = dax_supported(dax_dev, bdev, blocksize, 0,
bdev              184 drivers/dax/super.c 			i_size_read(bdev->bd_inode) / 512);
bdev              318 drivers/dax/super.c bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
bdev              324 drivers/dax/super.c 	return dax_dev->ops->dax_supported(dax_dev, bdev, blocksize, start, len);
bdev              350 drivers/dma/qcom/bam_dma.c 	struct bam_device *bdev;
bdev              408 drivers/dma/qcom/bam_dma.c static inline void __iomem *bam_addr(struct bam_device *bdev, u32 pipe,
bdev              411 drivers/dma/qcom/bam_dma.c 	const struct reg_offset_data r = bdev->layout[reg];
bdev              413 drivers/dma/qcom/bam_dma.c 	return bdev->regs + r.base_offset +
bdev              416 drivers/dma/qcom/bam_dma.c 		r.ee_mult * bdev->ee;
bdev              427 drivers/dma/qcom/bam_dma.c 	struct bam_device *bdev = bchan->bdev;
bdev              432 drivers/dma/qcom/bam_dma.c 	writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_RST));
bdev              433 drivers/dma/qcom/bam_dma.c 	writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_RST));
bdev              452 drivers/dma/qcom/bam_dma.c 	struct bam_device *bdev = bchan->bdev;
bdev              463 drivers/dma/qcom/bam_dma.c 			bam_addr(bdev, bchan->id, BAM_P_DESC_FIFO_ADDR));
bdev              465 drivers/dma/qcom/bam_dma.c 			bam_addr(bdev, bchan->id, BAM_P_FIFO_SIZES));
bdev              469 drivers/dma/qcom/bam_dma.c 			bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
bdev              472 drivers/dma/qcom/bam_dma.c 	val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
bdev              474 drivers/dma/qcom/bam_dma.c 	writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
bdev              484 drivers/dma/qcom/bam_dma.c 	writel_relaxed(val, bam_addr(bdev, bchan->id, BAM_P_CTRL));
bdev              502 drivers/dma/qcom/bam_dma.c 	struct bam_device *bdev = bchan->bdev;
bdev              508 drivers/dma/qcom/bam_dma.c 	bchan->fifo_virt = dma_alloc_wc(bdev->dev, BAM_DESC_FIFO_SIZE,
bdev              512 drivers/dma/qcom/bam_dma.c 		dev_err(bdev->dev, "Failed to allocate desc fifo\n");
bdev              537 drivers/dma/qcom/bam_dma.c 	struct bam_device *bdev = bchan->bdev;
bdev              542 drivers/dma/qcom/bam_dma.c 	ret = bam_pm_runtime_get_sync(bdev->dev);
bdev              549 drivers/dma/qcom/bam_dma.c 		dev_err(bchan->bdev->dev, "Cannot free busy channel\n");
bdev              557 drivers/dma/qcom/bam_dma.c 	dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt,
bdev              562 drivers/dma/qcom/bam_dma.c 	val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
bdev              564 drivers/dma/qcom/bam_dma.c 	writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
bdev              567 drivers/dma/qcom/bam_dma.c 	writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
bdev              570 drivers/dma/qcom/bam_dma.c 	pm_runtime_mark_last_busy(bdev->dev);
bdev              571 drivers/dma/qcom/bam_dma.c 	pm_runtime_put_autosuspend(bdev->dev);
bdev              612 drivers/dma/qcom/bam_dma.c 	struct bam_device *bdev = bchan->bdev;
bdev              621 drivers/dma/qcom/bam_dma.c 		dev_err(bdev->dev, "invalid dma direction\n");
bdev              738 drivers/dma/qcom/bam_dma.c 	struct bam_device *bdev = bchan->bdev;
bdev              742 drivers/dma/qcom/bam_dma.c 	ret = bam_pm_runtime_get_sync(bdev->dev);
bdev              747 drivers/dma/qcom/bam_dma.c 	writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT));
bdev              750 drivers/dma/qcom/bam_dma.c 	pm_runtime_mark_last_busy(bdev->dev);
bdev              751 drivers/dma/qcom/bam_dma.c 	pm_runtime_put_autosuspend(bdev->dev);
bdev              764 drivers/dma/qcom/bam_dma.c 	struct bam_device *bdev = bchan->bdev;
bdev              768 drivers/dma/qcom/bam_dma.c 	ret = bam_pm_runtime_get_sync(bdev->dev);
bdev              773 drivers/dma/qcom/bam_dma.c 	writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT));
bdev              776 drivers/dma/qcom/bam_dma.c 	pm_runtime_mark_last_busy(bdev->dev);
bdev              777 drivers/dma/qcom/bam_dma.c 	pm_runtime_put_autosuspend(bdev->dev);
bdev              789 drivers/dma/qcom/bam_dma.c static u32 process_channel_irqs(struct bam_device *bdev)
bdev              795 drivers/dma/qcom/bam_dma.c 	srcs = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_EE));
bdev              801 drivers/dma/qcom/bam_dma.c 	for (i = 0; i < bdev->num_channels; i++) {
bdev              802 drivers/dma/qcom/bam_dma.c 		struct bam_chan *bchan = &bdev->channels[i];
bdev              808 drivers/dma/qcom/bam_dma.c 		pipe_stts = readl_relaxed(bam_addr(bdev, i, BAM_P_IRQ_STTS));
bdev              810 drivers/dma/qcom/bam_dma.c 		writel_relaxed(pipe_stts, bam_addr(bdev, i, BAM_P_IRQ_CLR));
bdev              814 drivers/dma/qcom/bam_dma.c 		offset = readl_relaxed(bam_addr(bdev, i, BAM_P_SW_OFSTS)) &
bdev              867 drivers/dma/qcom/bam_dma.c 	struct bam_device *bdev = data;
bdev              871 drivers/dma/qcom/bam_dma.c 	srcs |= process_channel_irqs(bdev);
bdev              875 drivers/dma/qcom/bam_dma.c 		tasklet_schedule(&bdev->task);
bdev              877 drivers/dma/qcom/bam_dma.c 	ret = bam_pm_runtime_get_sync(bdev->dev);
bdev              882 drivers/dma/qcom/bam_dma.c 		clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS));
bdev              890 drivers/dma/qcom/bam_dma.c 		writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR));
bdev              893 drivers/dma/qcom/bam_dma.c 	pm_runtime_mark_last_busy(bdev->dev);
bdev              894 drivers/dma/qcom/bam_dma.c 	pm_runtime_put_autosuspend(bdev->dev);
bdev              958 drivers/dma/qcom/bam_dma.c 	struct bam_device *bdev = bchan->bdev;
bdev              961 drivers/dma/qcom/bam_dma.c 	if (!bdev->controlled_remotely) {
bdev              968 drivers/dma/qcom/bam_dma.c 			       bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
bdev              981 drivers/dma/qcom/bam_dma.c 	struct bam_device *bdev = bchan->bdev;
bdev              995 drivers/dma/qcom/bam_dma.c 	ret = bam_pm_runtime_get_sync(bdev->dev);
bdev             1066 drivers/dma/qcom/bam_dma.c 			bam_addr(bdev, bchan->id, BAM_P_EVNT_REG));
bdev             1068 drivers/dma/qcom/bam_dma.c 	pm_runtime_mark_last_busy(bdev->dev);
bdev             1069 drivers/dma/qcom/bam_dma.c 	pm_runtime_put_autosuspend(bdev->dev);
bdev             1080 drivers/dma/qcom/bam_dma.c 	struct bam_device *bdev = (struct bam_device *)data;
bdev             1086 drivers/dma/qcom/bam_dma.c 	for (i = 0; i < bdev->num_channels; i++) {
bdev             1087 drivers/dma/qcom/bam_dma.c 		bchan = &bdev->channels[i];
bdev             1133 drivers/dma/qcom/bam_dma.c 	struct bam_device *bdev = container_of(of->of_dma_data,
bdev             1141 drivers/dma/qcom/bam_dma.c 	if (request >= bdev->num_channels)
bdev             1144 drivers/dma/qcom/bam_dma.c 	return dma_get_slave_channel(&(bdev->channels[request].vc.chan));
bdev             1153 drivers/dma/qcom/bam_dma.c static int bam_init(struct bam_device *bdev)
bdev             1158 drivers/dma/qcom/bam_dma.c 	if (!bdev->num_ees) {
bdev             1159 drivers/dma/qcom/bam_dma.c 		val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION));
bdev             1160 drivers/dma/qcom/bam_dma.c 		bdev->num_ees = (val >> NUM_EES_SHIFT) & NUM_EES_MASK;
bdev             1164 drivers/dma/qcom/bam_dma.c 	if (bdev->ee >= bdev->num_ees)
bdev             1167 drivers/dma/qcom/bam_dma.c 	if (!bdev->num_channels) {
bdev             1168 drivers/dma/qcom/bam_dma.c 		val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES));
bdev             1169 drivers/dma/qcom/bam_dma.c 		bdev->num_channels = val & BAM_NUM_PIPES_MASK;
bdev             1172 drivers/dma/qcom/bam_dma.c 	if (bdev->controlled_remotely)
bdev             1177 drivers/dma/qcom/bam_dma.c 	val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL));
bdev             1179 drivers/dma/qcom/bam_dma.c 	writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
bdev             1181 drivers/dma/qcom/bam_dma.c 	writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
bdev             1188 drivers/dma/qcom/bam_dma.c 	writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
bdev             1192 drivers/dma/qcom/bam_dma.c 			bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
bdev             1195 drivers/dma/qcom/bam_dma.c 	writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS));
bdev             1199 drivers/dma/qcom/bam_dma.c 			bam_addr(bdev, 0, BAM_IRQ_EN));
bdev             1202 drivers/dma/qcom/bam_dma.c 	writel_relaxed(BAM_IRQ_MSK, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
bdev             1207 drivers/dma/qcom/bam_dma.c static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan,
bdev             1211 drivers/dma/qcom/bam_dma.c 	bchan->bdev = bdev;
bdev             1213 drivers/dma/qcom/bam_dma.c 	vchan_init(&bchan->vc, &bdev->common);
bdev             1229 drivers/dma/qcom/bam_dma.c 	struct bam_device *bdev;
bdev             1234 drivers/dma/qcom/bam_dma.c 	bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL);
bdev             1235 drivers/dma/qcom/bam_dma.c 	if (!bdev)
bdev             1238 drivers/dma/qcom/bam_dma.c 	bdev->dev = &pdev->dev;
bdev             1246 drivers/dma/qcom/bam_dma.c 	bdev->layout = match->data;
bdev             1249 drivers/dma/qcom/bam_dma.c 	bdev->regs = devm_ioremap_resource(&pdev->dev, iores);
bdev             1250 drivers/dma/qcom/bam_dma.c 	if (IS_ERR(bdev->regs))
bdev             1251 drivers/dma/qcom/bam_dma.c 		return PTR_ERR(bdev->regs);
bdev             1253 drivers/dma/qcom/bam_dma.c 	bdev->irq = platform_get_irq(pdev, 0);
bdev             1254 drivers/dma/qcom/bam_dma.c 	if (bdev->irq < 0)
bdev             1255 drivers/dma/qcom/bam_dma.c 		return bdev->irq;
bdev             1257 drivers/dma/qcom/bam_dma.c 	ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &bdev->ee);
bdev             1259 drivers/dma/qcom/bam_dma.c 		dev_err(bdev->dev, "Execution environment unspecified\n");
bdev             1263 drivers/dma/qcom/bam_dma.c 	bdev->controlled_remotely = of_property_read_bool(pdev->dev.of_node,
bdev             1266 drivers/dma/qcom/bam_dma.c 	if (bdev->controlled_remotely) {
bdev             1268 drivers/dma/qcom/bam_dma.c 					   &bdev->num_channels);
bdev             1270 drivers/dma/qcom/bam_dma.c 			dev_err(bdev->dev, "num-channels unspecified in dt\n");
bdev             1273 drivers/dma/qcom/bam_dma.c 					   &bdev->num_ees);
bdev             1275 drivers/dma/qcom/bam_dma.c 			dev_err(bdev->dev, "num-ees unspecified in dt\n");
bdev             1278 drivers/dma/qcom/bam_dma.c 	bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
bdev             1279 drivers/dma/qcom/bam_dma.c 	if (IS_ERR(bdev->bamclk)) {
bdev             1280 drivers/dma/qcom/bam_dma.c 		if (!bdev->controlled_remotely)
bdev             1281 drivers/dma/qcom/bam_dma.c 			return PTR_ERR(bdev->bamclk);
bdev             1283 drivers/dma/qcom/bam_dma.c 		bdev->bamclk = NULL;
bdev             1286 drivers/dma/qcom/bam_dma.c 	ret = clk_prepare_enable(bdev->bamclk);
bdev             1288 drivers/dma/qcom/bam_dma.c 		dev_err(bdev->dev, "failed to prepare/enable clock\n");
bdev             1292 drivers/dma/qcom/bam_dma.c 	ret = bam_init(bdev);
bdev             1296 drivers/dma/qcom/bam_dma.c 	tasklet_init(&bdev->task, dma_tasklet, (unsigned long)bdev);
bdev             1298 drivers/dma/qcom/bam_dma.c 	bdev->channels = devm_kcalloc(bdev->dev, bdev->num_channels,
bdev             1299 drivers/dma/qcom/bam_dma.c 				sizeof(*bdev->channels), GFP_KERNEL);
bdev             1301 drivers/dma/qcom/bam_dma.c 	if (!bdev->channels) {
bdev             1307 drivers/dma/qcom/bam_dma.c 	INIT_LIST_HEAD(&bdev->common.channels);
bdev             1309 drivers/dma/qcom/bam_dma.c 	for (i = 0; i < bdev->num_channels; i++)
bdev             1310 drivers/dma/qcom/bam_dma.c 		bam_channel_init(bdev, &bdev->channels[i], i);
bdev             1312 drivers/dma/qcom/bam_dma.c 	ret = devm_request_irq(bdev->dev, bdev->irq, bam_dma_irq,
bdev             1313 drivers/dma/qcom/bam_dma.c 			IRQF_TRIGGER_HIGH, "bam_dma", bdev);
bdev             1318 drivers/dma/qcom/bam_dma.c 	bdev->common.dev = bdev->dev;
bdev             1319 drivers/dma/qcom/bam_dma.c 	bdev->common.dev->dma_parms = &bdev->dma_parms;
bdev             1320 drivers/dma/qcom/bam_dma.c 	ret = dma_set_max_seg_size(bdev->common.dev, BAM_FIFO_SIZE);
bdev             1322 drivers/dma/qcom/bam_dma.c 		dev_err(bdev->dev, "cannot set maximum segment size\n");
bdev             1326 drivers/dma/qcom/bam_dma.c 	platform_set_drvdata(pdev, bdev);
bdev             1329 drivers/dma/qcom/bam_dma.c 	dma_cap_zero(bdev->common.cap_mask);
bdev             1330 drivers/dma/qcom/bam_dma.c 	dma_cap_set(DMA_SLAVE, bdev->common.cap_mask);
bdev             1333 drivers/dma/qcom/bam_dma.c 	bdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
bdev             1334 drivers/dma/qcom/bam_dma.c 	bdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
bdev             1335 drivers/dma/qcom/bam_dma.c 	bdev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
bdev             1336 drivers/dma/qcom/bam_dma.c 	bdev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
bdev             1337 drivers/dma/qcom/bam_dma.c 	bdev->common.device_alloc_chan_resources = bam_alloc_chan;
bdev             1338 drivers/dma/qcom/bam_dma.c 	bdev->common.device_free_chan_resources = bam_free_chan;
bdev             1339 drivers/dma/qcom/bam_dma.c 	bdev->common.device_prep_slave_sg = bam_prep_slave_sg;
bdev             1340 drivers/dma/qcom/bam_dma.c 	bdev->common.device_config = bam_slave_config;
bdev             1341 drivers/dma/qcom/bam_dma.c 	bdev->common.device_pause = bam_pause;
bdev             1342 drivers/dma/qcom/bam_dma.c 	bdev->common.device_resume = bam_resume;
bdev             1343 drivers/dma/qcom/bam_dma.c 	bdev->common.device_terminate_all = bam_dma_terminate_all;
bdev             1344 drivers/dma/qcom/bam_dma.c 	bdev->common.device_issue_pending = bam_issue_pending;
bdev             1345 drivers/dma/qcom/bam_dma.c 	bdev->common.device_tx_status = bam_tx_status;
bdev             1346 drivers/dma/qcom/bam_dma.c 	bdev->common.dev = bdev->dev;
bdev             1348 drivers/dma/qcom/bam_dma.c 	ret = dma_async_device_register(&bdev->common);
bdev             1350 drivers/dma/qcom/bam_dma.c 		dev_err(bdev->dev, "failed to register dma async device\n");
bdev             1355 drivers/dma/qcom/bam_dma.c 					&bdev->common);
bdev             1359 drivers/dma/qcom/bam_dma.c 	if (bdev->controlled_remotely) {
bdev             1374 drivers/dma/qcom/bam_dma.c 	dma_async_device_unregister(&bdev->common);
bdev             1376 drivers/dma/qcom/bam_dma.c 	for (i = 0; i < bdev->num_channels; i++)
bdev             1377 drivers/dma/qcom/bam_dma.c 		tasklet_kill(&bdev->channels[i].vc.task);
bdev             1379 drivers/dma/qcom/bam_dma.c 	tasklet_kill(&bdev->task);
bdev             1381 drivers/dma/qcom/bam_dma.c 	clk_disable_unprepare(bdev->bamclk);
bdev             1388 drivers/dma/qcom/bam_dma.c 	struct bam_device *bdev = platform_get_drvdata(pdev);
bdev             1394 drivers/dma/qcom/bam_dma.c 	dma_async_device_unregister(&bdev->common);
bdev             1397 drivers/dma/qcom/bam_dma.c 	writel_relaxed(0, bam_addr(bdev, 0,  BAM_IRQ_SRCS_MSK_EE));
bdev             1399 drivers/dma/qcom/bam_dma.c 	devm_free_irq(bdev->dev, bdev->irq, bdev);
bdev             1401 drivers/dma/qcom/bam_dma.c 	for (i = 0; i < bdev->num_channels; i++) {
bdev             1402 drivers/dma/qcom/bam_dma.c 		bam_dma_terminate_all(&bdev->channels[i].vc.chan);
bdev             1403 drivers/dma/qcom/bam_dma.c 		tasklet_kill(&bdev->channels[i].vc.task);
bdev             1405 drivers/dma/qcom/bam_dma.c 		if (!bdev->channels[i].fifo_virt)
bdev             1408 drivers/dma/qcom/bam_dma.c 		dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE,
bdev             1409 drivers/dma/qcom/bam_dma.c 			    bdev->channels[i].fifo_virt,
bdev             1410 drivers/dma/qcom/bam_dma.c 			    bdev->channels[i].fifo_phys);
bdev             1413 drivers/dma/qcom/bam_dma.c 	tasklet_kill(&bdev->task);
bdev             1415 drivers/dma/qcom/bam_dma.c 	clk_disable_unprepare(bdev->bamclk);
bdev             1422 drivers/dma/qcom/bam_dma.c 	struct bam_device *bdev = dev_get_drvdata(dev);
bdev             1424 drivers/dma/qcom/bam_dma.c 	clk_disable(bdev->bamclk);
bdev             1431 drivers/dma/qcom/bam_dma.c 	struct bam_device *bdev = dev_get_drvdata(dev);
bdev             1434 drivers/dma/qcom/bam_dma.c 	ret = clk_enable(bdev->bamclk);
bdev             1445 drivers/dma/qcom/bam_dma.c 	struct bam_device *bdev = dev_get_drvdata(dev);
bdev             1447 drivers/dma/qcom/bam_dma.c 	if (!bdev->controlled_remotely)
bdev             1450 drivers/dma/qcom/bam_dma.c 	clk_unprepare(bdev->bamclk);
bdev             1457 drivers/dma/qcom/bam_dma.c 	struct bam_device *bdev = dev_get_drvdata(dev);
bdev             1460 drivers/dma/qcom/bam_dma.c 	ret = clk_prepare(bdev->bamclk);
bdev             1464 drivers/dma/qcom/bam_dma.c 	if (!bdev->controlled_remotely)
bdev             1024 drivers/gpu/drm/amd/amdgpu/amdgpu.h static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
bdev             1026 drivers/gpu/drm/amd/amdgpu/amdgpu.h 	return container_of(bdev, struct amdgpu_device, mman.bdev);
bdev              562 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c 	return amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
bdev              123 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 	acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
bdev              167 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 	acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
bdev              196 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
bdev              313 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
bdev              349 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
bdev             1063 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
bdev              317 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
bdev              364 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 			amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
bdev              401 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
bdev              468 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
bdev              921 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	seq_printf(m, "(%d)\n", ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_TT));
bdev             1050 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
bdev             1088 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
bdev             3475 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 	amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
bdev             3664 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 					&tmp_adev->mman.bdev.man[TTM_PL_TT]);
bdev              108 drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
bdev              133 drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c 	ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev);
bdev              198 drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
bdev              254 drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
bdev              286 drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
bdev               74 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c 	struct page *dummy_page = adev->mman.bdev.glob->dummy_read_page;
bdev              125 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
bdev              158 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
bdev              715 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c 			if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
bdev              716 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c 				amdgpu_ttm_adev(base->vm->root.base.bo->tbo.bdev))) {
bdev               44 drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
bdev               69 drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
bdev              121 drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
bdev               53 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c 			(adev->mman.bdev.man[TTM_PL_TT].size) * PAGE_SIZE);
bdev               71 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c 			amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]));
bdev               90 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
bdev              130 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
bdev              173 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
bdev              563 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 		ui64 = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
bdev              566 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 		ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
bdev              569 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 		ui64 = amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
bdev              589 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 		vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
bdev              603 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 			amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
bdev              611 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 			amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
bdev              615 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 		mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
bdev              620 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 			amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
bdev              378 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
bdev              436 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
bdev               64 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
bdev               77 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
bdev              127 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
bdev              448 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 		man = &adev->mman.bdev.man[TTM_PL_TT];
bdev              457 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 		man = &adev->mman.bdev.man[TTM_PL_VRAM];
bdev              545 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 	acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
bdev              566 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 	bo->tbo.bdev = &adev->mman.bdev;
bdev              575 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 	r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
bdev              743 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(shadow->tbo.bdev);
bdev              885 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
bdev              917 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 			u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
bdev              994 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
bdev             1038 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 	return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
bdev             1139 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
bdev             1259 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
bdev             1332 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
bdev             1411 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
bdev              154 drivers/gpu/drm/amd/amdgpu/amdgpu_object.h 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
bdev              202 drivers/gpu/drm/amd/amdgpu/amdgpu_object.h 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
bdev               69 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
bdev               85 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
bdev               90 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	adev = amdgpu_ttm_adev(bdev);
bdev              144 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
bdev              269 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		addr += bo->bdev->man[mem->mem_type].gpu_offset;
bdev              430 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
bdev              495 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	adev = amdgpu_ttm_adev(bo->bdev);
bdev              554 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	adev = amdgpu_ttm_adev(bo->bdev);
bdev              631 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	adev = amdgpu_ttm_adev(bo->bdev);
bdev              705 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
bdev              707 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
bdev              708 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
bdev              747 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
bdev              940 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
bdev              978 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
bdev             1050 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
bdev             1096 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
bdev             1145 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		bo->bdev->man[bo->mem.mem_type].gpu_offset;
bdev             1158 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
bdev             1179 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
bdev             1228 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	adev = amdgpu_ttm_adev(bo->bdev);
bdev             1253 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
bdev             1309 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	adev = amdgpu_ttm_adev(ttm->bdev);
bdev             1544 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
bdev             1680 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	r = ttm_bo_device_init(&adev->mman.bdev,
bdev             1691 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	adev->mman.bdev.no_retry = true;
bdev             1694 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
bdev             1764 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
bdev             1773 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
bdev             1780 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS,
bdev             1787 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
bdev             1830 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
bdev             1831 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
bdev             1832 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS);
bdev             1833 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
bdev             1834 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
bdev             1835 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	ttm_bo_device_release(&adev->mman.bdev);
bdev             1851 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	struct ttm_mem_type_manager *man = &adev->mman.bdev.man[TTM_PL_VRAM];
bdev             1894 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
bdev             2042 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
bdev             2137 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	struct ttm_mem_type_manager *man = &adev->mman.bdev.man[ttm_pl];
bdev             2342 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		if (p->mapping != adev->mman.bdev.dev_mapping)
bdev             2393 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		if (p->mapping != adev->mman.bdev.dev_mapping)
bdev               44 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h 	struct ttm_bo_device		bdev;
bdev              603 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 	struct ttm_bo_global *glob = adev->mman.bdev.glob;
bdev             1714 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 		bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
bdev             2054 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 	if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
bdev               83 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c 		amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]));
bdev              101 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c 		amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]));
bdev              124 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
bdev              171 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
bdev              217 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
bdev              272 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
bdev              387 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c 	struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
bdev             4513 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
bdev              553 drivers/gpu/drm/ast/ast_main.c 	gbo = drm_gem_vram_create(dev, &dev->vram_mm->bdev, size, 0, false);
bdev              135 drivers/gpu/drm/cirrus/cirrus_drv.h 		struct ttm_bo_device bdev;
bdev               77 drivers/gpu/drm/drm_gem_vram_helper.c 			     struct ttm_bo_device *bdev,
bdev               92 drivers/gpu/drm/drm_gem_vram_helper.c 	acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo));
bdev               94 drivers/gpu/drm/drm_gem_vram_helper.c 	gbo->bo.bdev = bdev;
bdev               97 drivers/gpu/drm/drm_gem_vram_helper.c 	ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device,
bdev              123 drivers/gpu/drm/drm_gem_vram_helper.c 						struct ttm_bo_device *bdev,
bdev              135 drivers/gpu/drm/drm_gem_vram_helper.c 	ret = drm_gem_vram_init(dev, bdev, gbo, size, pg_align, interruptible);
bdev              365 drivers/gpu/drm/drm_gem_vram_helper.c 				  struct ttm_bo_device *bdev,
bdev              382 drivers/gpu/drm/drm_gem_vram_helper.c 	gbo = drm_gem_vram_create(dev, bdev, size, pg_align, interruptible);
bdev              509 drivers/gpu/drm/drm_gem_vram_helper.c 	return drm_gem_vram_fill_create_dumb(file, dev, &dev->vram_mm->bdev, 0,
bdev               59 drivers/gpu/drm/drm_vram_mm_helper.c static int bo_driver_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
bdev               85 drivers/gpu/drm/drm_vram_mm_helper.c 	struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bo->bdev);
bdev               94 drivers/gpu/drm/drm_vram_mm_helper.c 	struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bo->bdev);
bdev              101 drivers/gpu/drm/drm_vram_mm_helper.c static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
bdev              104 drivers/gpu/drm/drm_vram_mm_helper.c 	struct ttm_mem_type_manager *man = bdev->man + mem->mem_type;
bdev              105 drivers/gpu/drm/drm_vram_mm_helper.c 	struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev);
bdev              131 drivers/gpu/drm/drm_vram_mm_helper.c static void bo_driver_io_mem_free(struct ttm_bo_device *bdev,
bdev              173 drivers/gpu/drm/drm_vram_mm_helper.c 	ret = ttm_bo_device_init(&vmm->bdev, &bo_driver,
bdev              179 drivers/gpu/drm/drm_vram_mm_helper.c 	ret = ttm_bo_init_mm(&vmm->bdev, TTM_PL_VRAM, vram_size >> PAGE_SHIFT);
bdev              193 drivers/gpu/drm/drm_vram_mm_helper.c 	ttm_bo_device_release(&vmm->bdev);
bdev              210 drivers/gpu/drm/drm_vram_mm_helper.c 	return ttm_bo_mmap(filp, vma, &vmm->bdev);
bdev               62 drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c 	gbo = drm_gem_vram_create(dev, &dev->vram_mm->bdev, size, 0, false);
bdev               92 drivers/gpu/drm/mgag200/mgag200_drv.c 	return drm_gem_vram_fill_create_dumb(file, dev, &dev->vram_mm->bdev,
bdev              176 drivers/gpu/drm/mgag200/mgag200_main.c 	mdev->cursor.pixels_1 = drm_gem_vram_create(dev, &dev->vram_mm->bdev,
bdev              179 drivers/gpu/drm/mgag200/mgag200_main.c 	mdev->cursor.pixels_2 = drm_gem_vram_create(dev, &dev->vram_mm->bdev,
bdev              135 drivers/gpu/drm/nouveau/nouveau_bo.c 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
bdev              164 drivers/gpu/drm/nouveau/nouveau_bo.c 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
bdev              215 drivers/gpu/drm/nouveau/nouveau_bo.c 	nvbo->bo.bdev = &drm->ttm.bdev;
bdev              303 drivers/gpu/drm/nouveau/nouveau_bo.c 	acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo));
bdev              308 drivers/gpu/drm/nouveau/nouveau_bo.c 	ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
bdev              357 drivers/gpu/drm/nouveau/nouveau_bo.c 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
bdev              410 drivers/gpu/drm/nouveau/nouveau_bo.c 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
bdev              480 drivers/gpu/drm/nouveau/nouveau_bo.c 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
bdev              541 drivers/gpu/drm/nouveau/nouveau_bo.c 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
bdev              561 drivers/gpu/drm/nouveau/nouveau_bo.c 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
bdev              639 drivers/gpu/drm/nouveau/nouveau_bo.c 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
bdev              650 drivers/gpu/drm/nouveau/nouveau_bo.c nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
bdev              657 drivers/gpu/drm/nouveau/nouveau_bo.c nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
bdev              660 drivers/gpu/drm/nouveau/nouveau_bo.c 	struct nouveau_drm *drm = nouveau_bdev(bdev);
bdev             1127 drivers/gpu/drm/nouveau/nouveau_bo.c 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
bdev             1332 drivers/gpu/drm/nouveau/nouveau_bo.c 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
bdev             1354 drivers/gpu/drm/nouveau/nouveau_bo.c 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
bdev             1367 drivers/gpu/drm/nouveau/nouveau_bo.c 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
bdev             1438 drivers/gpu/drm/nouveau/nouveau_bo.c nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
bdev             1440 drivers/gpu/drm/nouveau/nouveau_bo.c 	struct ttm_mem_type_manager *man = &bdev->man[reg->mem_type];
bdev             1441 drivers/gpu/drm/nouveau/nouveau_bo.c 	struct nouveau_drm *drm = nouveau_bdev(bdev);
bdev             1517 drivers/gpu/drm/nouveau/nouveau_bo.c nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
bdev             1519 drivers/gpu/drm/nouveau/nouveau_bo.c 	struct nouveau_drm *drm = nouveau_bdev(bdev);
bdev             1540 drivers/gpu/drm/nouveau/nouveau_bo.c 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
bdev             1604 drivers/gpu/drm/nouveau/nouveau_bo.c 	drm = nouveau_bdev(ttm->bdev);
bdev             1657 drivers/gpu/drm/nouveau/nouveau_bo.c 	drm = nouveau_bdev(ttm->bdev);
bdev              826 drivers/gpu/drm/nouveau/nouveau_drm.c 	ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
bdev              152 drivers/gpu/drm/nouveau/nouveau_drv.h 		struct ttm_bo_device bdev;
bdev               43 drivers/gpu/drm/nouveau/nouveau_gem.c 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
bdev               65 drivers/gpu/drm/nouveau/nouveau_gem.c 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
bdev              140 drivers/gpu/drm/nouveau/nouveau_gem.c 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
bdev               87 drivers/gpu/drm/nouveau/nouveau_sgdma.c 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
bdev               65 drivers/gpu/drm/nouveau/nouveau_ttm.c 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
bdev              103 drivers/gpu/drm/nouveau/nouveau_ttm.c 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
bdev              129 drivers/gpu/drm/nouveau/nouveau_ttm.c 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
bdev              167 drivers/gpu/drm/nouveau/nouveau_ttm.c 	return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
bdev              232 drivers/gpu/drm/nouveau/nouveau_ttm.c 	ret = ttm_bo_device_init(&drm->ttm.bdev,
bdev              247 drivers/gpu/drm/nouveau/nouveau_ttm.c 	ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
bdev              264 drivers/gpu/drm/nouveau/nouveau_ttm.c 	ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT,
bdev              281 drivers/gpu/drm/nouveau/nouveau_ttm.c 	ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
bdev              282 drivers/gpu/drm/nouveau/nouveau_ttm.c 	ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
bdev              284 drivers/gpu/drm/nouveau/nouveau_ttm.c 	ttm_bo_device_release(&drm->ttm.bdev);
bdev                8 drivers/gpu/drm/nouveau/nouveau_ttm.h 	return container_of(bd, struct nouveau_drm, ttm.bdev);
bdev              126 drivers/gpu/drm/qxl/qxl_drv.h 	struct ttm_bo_device		bdev;
bdev              113 drivers/gpu/drm/qxl/qxl_object.c 	r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
bdev              151 drivers/gpu/drm/qxl/qxl_object.c 	struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
bdev              164 drivers/gpu/drm/qxl/qxl_object.c 	ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem);
bdev              196 drivers/gpu/drm/qxl/qxl_object.c 	struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
bdev              205 drivers/gpu/drm/qxl/qxl_object.c 	ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
bdev              352 drivers/gpu/drm/qxl/qxl_object.c 	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV);
bdev              357 drivers/gpu/drm/qxl/qxl_object.c 	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM);
bdev              433 drivers/gpu/drm/qxl/qxl_release.c 	struct ttm_bo_device *bdev;
bdev              443 drivers/gpu/drm/qxl/qxl_release.c 	bdev = bo->bdev;
bdev              444 drivers/gpu/drm/qxl/qxl_release.c 	qdev = container_of(bdev, struct qxl_device, mman.bdev);
bdev              454 drivers/gpu/drm/qxl/qxl_release.c 	glob = bdev->glob;
bdev               41 drivers/gpu/drm/qxl/qxl_ttm.c static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
bdev               46 drivers/gpu/drm/qxl/qxl_ttm.c 	mman = container_of(bdev, struct qxl_mman, bdev);
bdev               80 drivers/gpu/drm/qxl/qxl_ttm.c 	r = ttm_bo_mmap(filp, vma, &qdev->mman.bdev);
bdev               92 drivers/gpu/drm/qxl/qxl_ttm.c static int qxl_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
bdev               97 drivers/gpu/drm/qxl/qxl_ttm.c static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
bdev              100 drivers/gpu/drm/qxl/qxl_ttm.c 	struct qxl_device *qdev = qxl_get_qdev(bdev);
bdev              162 drivers/gpu/drm/qxl/qxl_ttm.c static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
bdev              165 drivers/gpu/drm/qxl/qxl_ttm.c 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
bdev              166 drivers/gpu/drm/qxl/qxl_ttm.c 	struct qxl_device *qdev = qxl_get_qdev(bdev);
bdev              195 drivers/gpu/drm/qxl/qxl_ttm.c static void qxl_ttm_io_mem_free(struct ttm_bo_device *bdev,
bdev              249 drivers/gpu/drm/qxl/qxl_ttm.c 	qdev = qxl_get_qdev(bo->bdev);
bdev              325 drivers/gpu/drm/qxl/qxl_ttm.c 	r = ttm_bo_device_init(&qdev->mman.bdev,
bdev              335 drivers/gpu/drm/qxl/qxl_ttm.c 	r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_VRAM,
bdev              341 drivers/gpu/drm/qxl/qxl_ttm.c 	r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_PRIV,
bdev              358 drivers/gpu/drm/qxl/qxl_ttm.c 	ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM);
bdev              359 drivers/gpu/drm/qxl/qxl_ttm.c 	ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV);
bdev              360 drivers/gpu/drm/qxl/qxl_ttm.c 	ttm_bo_device_release(&qdev->mman.bdev);
bdev              373 drivers/gpu/drm/qxl/qxl_ttm.c 	struct ttm_bo_global *glob = rdev->mman.bdev.glob;
bdev              399 drivers/gpu/drm/qxl/qxl_ttm.c 			qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
bdev              401 drivers/gpu/drm/qxl/qxl_ttm.c 			qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV].priv;
bdev              451 drivers/gpu/drm/radeon/radeon.h 	struct ttm_bo_device		bdev;
bdev             1801 drivers/gpu/drm/radeon/radeon_device.c 	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
bdev             1860 drivers/gpu/drm/radeon/radeon_device.c 	ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
bdev              228 drivers/gpu/drm/radeon/radeon_gem.c 	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
bdev              206 drivers/gpu/drm/radeon/radeon_object.c 	acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
bdev              262 drivers/gpu/drm/radeon/radeon_object.c 	r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
bdev              432 drivers/gpu/drm/radeon/radeon_object.c 	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
bdev             1805 drivers/gpu/drm/radeon/radeon_pm.c 	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
bdev             1856 drivers/gpu/drm/radeon/radeon_pm.c 	ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
bdev               59 drivers/gpu/drm/radeon/radeon_ttm.c static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
bdev               64 drivers/gpu/drm/radeon/radeon_ttm.c 	mman = container_of(bdev, struct radeon_mman, bdev);
bdev               69 drivers/gpu/drm/radeon/radeon_ttm.c static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
bdev               74 drivers/gpu/drm/radeon/radeon_ttm.c static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
bdev               79 drivers/gpu/drm/radeon/radeon_ttm.c 	rdev = radeon_get_rdev(bdev);
bdev              212 drivers/gpu/drm/radeon/radeon_ttm.c 	rdev = radeon_get_rdev(bo->bdev);
bdev              357 drivers/gpu/drm/radeon/radeon_ttm.c 	rdev = radeon_get_rdev(bo->bdev);
bdev              402 drivers/gpu/drm/radeon/radeon_ttm.c static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
bdev              404 drivers/gpu/drm/radeon/radeon_ttm.c 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
bdev              405 drivers/gpu/drm/radeon/radeon_ttm.c 	struct radeon_device *rdev = radeon_get_rdev(bdev);
bdev              467 drivers/gpu/drm/radeon/radeon_ttm.c static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
bdev              487 drivers/gpu/drm/radeon/radeon_ttm.c 	struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
bdev              549 drivers/gpu/drm/radeon/radeon_ttm.c 	struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
bdev              638 drivers/gpu/drm/radeon/radeon_ttm.c 	rdev = radeon_get_rdev(bo->bdev);
bdev              690 drivers/gpu/drm/radeon/radeon_ttm.c 	rdev = radeon_get_rdev(ttm->bdev);
bdev              721 drivers/gpu/drm/radeon/radeon_ttm.c 	rdev = radeon_get_rdev(ttm->bdev);
bdev              794 drivers/gpu/drm/radeon/radeon_ttm.c 	r = ttm_bo_device_init(&rdev->mman.bdev,
bdev              803 drivers/gpu/drm/radeon/radeon_ttm.c 	r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
bdev              829 drivers/gpu/drm/radeon/radeon_ttm.c 	r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
bdev              861 drivers/gpu/drm/radeon/radeon_ttm.c 	ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
bdev              862 drivers/gpu/drm/radeon/radeon_ttm.c 	ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
bdev              863 drivers/gpu/drm/radeon/radeon_ttm.c 	ttm_bo_device_release(&rdev->mman.bdev);
bdev              878 drivers/gpu/drm/radeon/radeon_ttm.c 	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
bdev              896 drivers/gpu/drm/radeon/radeon_ttm.c 	rdev = radeon_get_rdev(bo->bdev);
bdev              912 drivers/gpu/drm/radeon/radeon_ttm.c 	r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
bdev              933 drivers/gpu/drm/radeon/radeon_ttm.c 	struct ttm_mem_type_manager *man = &rdev->mman.bdev.man[ttm_pl];
bdev               16 drivers/gpu/drm/shmobile/shmob_drm_backlight.c static int shmob_drm_backlight_update(struct backlight_device *bdev)
bdev               18 drivers/gpu/drm/shmobile/shmob_drm_backlight.c 	struct shmob_drm_connector *scon = bl_get_data(bdev);
bdev               21 drivers/gpu/drm/shmobile/shmob_drm_backlight.c 	int brightness = bdev->props.brightness;
bdev               23 drivers/gpu/drm/shmobile/shmob_drm_backlight.c 	if (bdev->props.power != FB_BLANK_UNBLANK ||
bdev               24 drivers/gpu/drm/shmobile/shmob_drm_backlight.c 	    bdev->props.state & BL_CORE_SUSPENDED)
bdev               30 drivers/gpu/drm/shmobile/shmob_drm_backlight.c static int shmob_drm_backlight_get_brightness(struct backlight_device *bdev)
bdev               32 drivers/gpu/drm/shmobile/shmob_drm_backlight.c 	struct shmob_drm_connector *scon = bl_get_data(bdev);
bdev               54 drivers/gpu/drm/ttm/ttm_agp_backend.c 	struct page *dummy_read_page = ttm->bdev->glob->dummy_read_page;
bdev               79 drivers/gpu/drm/ttm/ttm_bo.c static void ttm_mem_type_debug(struct ttm_bo_device *bdev, struct drm_printer *p,
bdev               82 drivers/gpu/drm/ttm/ttm_bo.c 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
bdev              111 drivers/gpu/drm/ttm/ttm_bo.c 		ttm_mem_type_debug(bo->bdev, &p, mem_type);
bdev              151 drivers/gpu/drm/ttm/ttm_bo.c 	struct ttm_bo_device *bdev = bo->bdev;
bdev              161 drivers/gpu/drm/ttm/ttm_bo.c 	atomic_dec(&bo->bdev->glob->bo_count);
bdev              167 drivers/gpu/drm/ttm/ttm_bo.c 	ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
bdev              173 drivers/gpu/drm/ttm/ttm_bo.c 	struct ttm_bo_device *bdev = bo->bdev;
bdev              184 drivers/gpu/drm/ttm/ttm_bo.c 	man = &bdev->man[mem->mem_type];
bdev              191 drivers/gpu/drm/ttm/ttm_bo.c 		list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]);
bdev              209 drivers/gpu/drm/ttm/ttm_bo.c 	struct ttm_bo_device *bdev = bo->bdev;
bdev              223 drivers/gpu/drm/ttm/ttm_bo.c 	if (notify && bdev->driver->del_from_lru_notify)
bdev              224 drivers/gpu/drm/ttm/ttm_bo.c 		bdev->driver->del_from_lru_notify(bo);
bdev              229 drivers/gpu/drm/ttm/ttm_bo.c 	struct ttm_bo_global *glob = bo->bdev->glob;
bdev              284 drivers/gpu/drm/ttm/ttm_bo.c 		man = &pos->first->bdev->man[TTM_PL_TT];
bdev              299 drivers/gpu/drm/ttm/ttm_bo.c 		man = &pos->first->bdev->man[TTM_PL_VRAM];
bdev              314 drivers/gpu/drm/ttm/ttm_bo.c 		lru = &pos->first->bdev->glob->swap_lru[i];
bdev              324 drivers/gpu/drm/ttm/ttm_bo.c 	struct ttm_bo_device *bdev = bo->bdev;
bdev              325 drivers/gpu/drm/ttm/ttm_bo.c 	bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
bdev              326 drivers/gpu/drm/ttm/ttm_bo.c 	bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
bdev              327 drivers/gpu/drm/ttm/ttm_bo.c 	struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
bdev              328 drivers/gpu/drm/ttm/ttm_bo.c 	struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
bdev              363 drivers/gpu/drm/ttm/ttm_bo.c 			if (bdev->driver->move_notify)
bdev              364 drivers/gpu/drm/ttm/ttm_bo.c 				bdev->driver->move_notify(bo, evict, mem);
bdev              371 drivers/gpu/drm/ttm/ttm_bo.c 	if (bdev->driver->move_notify)
bdev              372 drivers/gpu/drm/ttm/ttm_bo.c 		bdev->driver->move_notify(bo, evict, mem);
bdev              377 drivers/gpu/drm/ttm/ttm_bo.c 	else if (bdev->driver->move)
bdev              378 drivers/gpu/drm/ttm/ttm_bo.c 		ret = bdev->driver->move(bo, evict, ctx, mem);
bdev              383 drivers/gpu/drm/ttm/ttm_bo.c 		if (bdev->driver->move_notify) {
bdev              385 drivers/gpu/drm/ttm/ttm_bo.c 			bdev->driver->move_notify(bo, false, mem);
bdev              394 drivers/gpu/drm/ttm/ttm_bo.c 		if (bdev->driver->invalidate_caches) {
bdev              395 drivers/gpu/drm/ttm/ttm_bo.c 			ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
bdev              404 drivers/gpu/drm/ttm/ttm_bo.c 		    bdev->man[bo->mem.mem_type].gpu_offset;
bdev              412 drivers/gpu/drm/ttm/ttm_bo.c 	new_man = &bdev->man[bo->mem.mem_type];
bdev              431 drivers/gpu/drm/ttm/ttm_bo.c 	if (bo->bdev->driver->move_notify)
bdev              432 drivers/gpu/drm/ttm/ttm_bo.c 		bo->bdev->driver->move_notify(bo, false, NULL);
bdev              477 drivers/gpu/drm/ttm/ttm_bo.c 	struct ttm_bo_device *bdev = bo->bdev;
bdev              478 drivers/gpu/drm/ttm/ttm_bo.c 	struct ttm_bo_global *glob = bdev->glob;
bdev              527 drivers/gpu/drm/ttm/ttm_bo.c 	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
bdev              530 drivers/gpu/drm/ttm/ttm_bo.c 	schedule_delayed_work(&bdev->wq,
bdev              551 drivers/gpu/drm/ttm/ttm_bo.c 	struct ttm_bo_global *glob = bo->bdev->glob;
bdev              621 drivers/gpu/drm/ttm/ttm_bo.c static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
bdev              623 drivers/gpu/drm/ttm/ttm_bo.c 	struct ttm_bo_global *glob = bdev->glob;
bdev              630 drivers/gpu/drm/ttm/ttm_bo.c 	while (!list_empty(&bdev->ddestroy)) {
bdev              633 drivers/gpu/drm/ttm/ttm_bo.c 		bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
bdev              654 drivers/gpu/drm/ttm/ttm_bo.c 	list_splice_tail(&removed, &bdev->ddestroy);
bdev              655 drivers/gpu/drm/ttm/ttm_bo.c 	empty = list_empty(&bdev->ddestroy);
bdev              663 drivers/gpu/drm/ttm/ttm_bo.c 	struct ttm_bo_device *bdev =
bdev              666 drivers/gpu/drm/ttm/ttm_bo.c 	if (!ttm_bo_delayed_delete(bdev, false))
bdev              667 drivers/gpu/drm/ttm/ttm_bo.c 		schedule_delayed_work(&bdev->wq,
bdev              675 drivers/gpu/drm/ttm/ttm_bo.c 	struct ttm_bo_device *bdev = bo->bdev;
bdev              676 drivers/gpu/drm/ttm/ttm_bo.c 	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
bdev              678 drivers/gpu/drm/ttm/ttm_bo.c 	if (bo->bdev->driver->release_notify)
bdev              679 drivers/gpu/drm/ttm/ttm_bo.c 		bo->bdev->driver->release_notify(bo);
bdev              681 drivers/gpu/drm/ttm/ttm_bo.c 	drm_vma_offset_remove(&bdev->vma_manager, &bo->base.vma_node);
bdev              695 drivers/gpu/drm/ttm/ttm_bo.c int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
bdev              697 drivers/gpu/drm/ttm/ttm_bo.c 	return cancel_delayed_work_sync(&bdev->wq);
bdev              701 drivers/gpu/drm/ttm/ttm_bo.c void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
bdev              704 drivers/gpu/drm/ttm/ttm_bo.c 		schedule_delayed_work(&bdev->wq,
bdev              712 drivers/gpu/drm/ttm/ttm_bo.c 	struct ttm_bo_device *bdev = bo->bdev;
bdev              721 drivers/gpu/drm/ttm/ttm_bo.c 	bdev->driver->evict_flags(bo, &placement);
bdev              840 drivers/gpu/drm/ttm/ttm_bo.c static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
bdev              847 drivers/gpu/drm/ttm/ttm_bo.c 	struct ttm_bo_global *glob = bdev->glob;
bdev              848 drivers/gpu/drm/ttm/ttm_bo.c 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
bdev              866 drivers/gpu/drm/ttm/ttm_bo.c 			if (place && !bdev->driver->eviction_valuable(bo,
bdev              919 drivers/gpu/drm/ttm/ttm_bo.c 	struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
bdev              969 drivers/gpu/drm/ttm/ttm_bo.c 	struct ttm_bo_device *bdev = bo->bdev;
bdev              970 drivers/gpu/drm/ttm/ttm_bo.c 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
bdev              981 drivers/gpu/drm/ttm/ttm_bo.c 		ret = ttm_mem_evict_first(bdev, mem->mem_type, place, ctx,
bdev             1050 drivers/gpu/drm/ttm/ttm_bo.c 	struct ttm_bo_device *bdev = bo->bdev;
bdev             1060 drivers/gpu/drm/ttm/ttm_bo.c 	man = &bdev->man[mem_type];
bdev             1078 drivers/gpu/drm/ttm/ttm_bo.c 		spin_lock(&bo->bdev->glob->lru_lock);
bdev             1081 drivers/gpu/drm/ttm/ttm_bo.c 		spin_unlock(&bo->bdev->glob->lru_lock);
bdev             1100 drivers/gpu/drm/ttm/ttm_bo.c 	struct ttm_bo_device *bdev = bo->bdev;
bdev             1124 drivers/gpu/drm/ttm/ttm_bo.c 		man = &bdev->man[mem->mem_type];
bdev             1173 drivers/gpu/drm/ttm/ttm_bo.c 		spin_lock(&bo->bdev->glob->lru_lock);
bdev             1175 drivers/gpu/drm/ttm/ttm_bo.c 		spin_unlock(&bo->bdev->glob->lru_lock);
bdev             1287 drivers/gpu/drm/ttm/ttm_bo.c int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
bdev             1301 drivers/gpu/drm/ttm/ttm_bo.c 	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
bdev             1334 drivers/gpu/drm/ttm/ttm_bo.c 	bo->bdev = bdev;
bdev             1362 drivers/gpu/drm/ttm/ttm_bo.c 	atomic_inc(&bo->bdev->glob->bo_count);
bdev             1370 drivers/gpu/drm/ttm/ttm_bo.c 		ret = drm_vma_offset_add(&bdev->vma_manager, &bo->base.vma_node,
bdev             1393 drivers/gpu/drm/ttm/ttm_bo.c 		spin_lock(&bdev->glob->lru_lock);
bdev             1395 drivers/gpu/drm/ttm/ttm_bo.c 		spin_unlock(&bdev->glob->lru_lock);
bdev             1402 drivers/gpu/drm/ttm/ttm_bo.c int ttm_bo_init(struct ttm_bo_device *bdev,
bdev             1417 drivers/gpu/drm/ttm/ttm_bo.c 	ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
bdev             1430 drivers/gpu/drm/ttm/ttm_bo.c size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
bdev             1444 drivers/gpu/drm/ttm/ttm_bo.c size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
bdev             1458 drivers/gpu/drm/ttm/ttm_bo.c int ttm_bo_create(struct ttm_bo_device *bdev,
bdev             1474 drivers/gpu/drm/ttm/ttm_bo.c 	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
bdev             1475 drivers/gpu/drm/ttm/ttm_bo.c 	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
bdev             1485 drivers/gpu/drm/ttm/ttm_bo.c static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
bdev             1493 drivers/gpu/drm/ttm/ttm_bo.c 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
bdev             1494 drivers/gpu/drm/ttm/ttm_bo.c 	struct ttm_bo_global *glob = bdev->glob;
bdev             1507 drivers/gpu/drm/ttm/ttm_bo.c 			ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx,
bdev             1530 drivers/gpu/drm/ttm/ttm_bo.c int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
bdev             1539 drivers/gpu/drm/ttm/ttm_bo.c 	man = &bdev->man[mem_type];
bdev             1552 drivers/gpu/drm/ttm/ttm_bo.c 		ret = ttm_bo_force_list_clean(bdev, mem_type);
bdev             1568 drivers/gpu/drm/ttm/ttm_bo.c int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
bdev             1570 drivers/gpu/drm/ttm/ttm_bo.c 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
bdev             1582 drivers/gpu/drm/ttm/ttm_bo.c 	return ttm_bo_force_list_clean(bdev, mem_type);
bdev             1586 drivers/gpu/drm/ttm/ttm_bo.c int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
bdev             1594 drivers/gpu/drm/ttm/ttm_bo.c 	man = &bdev->man[type];
bdev             1602 drivers/gpu/drm/ttm/ttm_bo.c 	ret = bdev->driver->init_mem_type(bdev, type, man);
bdev             1605 drivers/gpu/drm/ttm/ttm_bo.c 	man->bdev = bdev;
bdev             1686 drivers/gpu/drm/ttm/ttm_bo.c int ttm_bo_device_release(struct ttm_bo_device *bdev)
bdev             1691 drivers/gpu/drm/ttm/ttm_bo.c 	struct ttm_bo_global *glob = bdev->glob;
bdev             1694 drivers/gpu/drm/ttm/ttm_bo.c 		man = &bdev->man[i];
bdev             1697 drivers/gpu/drm/ttm/ttm_bo.c 			if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
bdev             1707 drivers/gpu/drm/ttm/ttm_bo.c 	list_del(&bdev->device_list);
bdev             1710 drivers/gpu/drm/ttm/ttm_bo.c 	cancel_delayed_work_sync(&bdev->wq);
bdev             1712 drivers/gpu/drm/ttm/ttm_bo.c 	if (ttm_bo_delayed_delete(bdev, true))
bdev             1717 drivers/gpu/drm/ttm/ttm_bo.c 		if (list_empty(&bdev->man[0].lru[0]))
bdev             1721 drivers/gpu/drm/ttm/ttm_bo.c 	drm_vma_offset_manager_destroy(&bdev->vma_manager);
bdev             1730 drivers/gpu/drm/ttm/ttm_bo.c int ttm_bo_device_init(struct ttm_bo_device *bdev,
bdev             1742 drivers/gpu/drm/ttm/ttm_bo.c 	bdev->driver = driver;
bdev             1744 drivers/gpu/drm/ttm/ttm_bo.c 	memset(bdev->man, 0, sizeof(bdev->man));
bdev             1750 drivers/gpu/drm/ttm/ttm_bo.c 	ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
bdev             1754 drivers/gpu/drm/ttm/ttm_bo.c 	drm_vma_offset_manager_init(&bdev->vma_manager,
bdev             1757 drivers/gpu/drm/ttm/ttm_bo.c 	INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
bdev             1758 drivers/gpu/drm/ttm/ttm_bo.c 	INIT_LIST_HEAD(&bdev->ddestroy);
bdev             1759 drivers/gpu/drm/ttm/ttm_bo.c 	bdev->dev_mapping = mapping;
bdev             1760 drivers/gpu/drm/ttm/ttm_bo.c 	bdev->glob = glob;
bdev             1761 drivers/gpu/drm/ttm/ttm_bo.c 	bdev->need_dma32 = need_dma32;
bdev             1763 drivers/gpu/drm/ttm/ttm_bo.c 	list_add_tail(&bdev->device_list, &glob->device_list);
bdev             1777 drivers/gpu/drm/ttm/ttm_bo.c bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
bdev             1779 drivers/gpu/drm/ttm/ttm_bo.c 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
bdev             1796 drivers/gpu/drm/ttm/ttm_bo.c 	struct ttm_bo_device *bdev = bo->bdev;
bdev             1798 drivers/gpu/drm/ttm/ttm_bo.c 	drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
bdev             1804 drivers/gpu/drm/ttm/ttm_bo.c 	struct ttm_bo_device *bdev = bo->bdev;
bdev             1805 drivers/gpu/drm/ttm/ttm_bo.c 	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
bdev             1939 drivers/gpu/drm/ttm/ttm_bo.c 	if (bo->bdev->driver->swap_notify)
bdev             1940 drivers/gpu/drm/ttm/ttm_bo.c 		bo->bdev->driver->swap_notify(bo);
bdev             1957 drivers/gpu/drm/ttm/ttm_bo.c void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
bdev             1964 drivers/gpu/drm/ttm/ttm_bo.c 	while (ttm_bo_swapout(bdev->glob, &ctx) == 0)
bdev              133 drivers/gpu/drm/ttm/ttm_bo_util.c int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
bdev              136 drivers/gpu/drm/ttm/ttm_bo_util.c 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
bdev              139 drivers/gpu/drm/ttm/ttm_bo_util.c 	if (!bdev->driver->io_mem_reserve)
bdev              142 drivers/gpu/drm/ttm/ttm_bo_util.c 		return bdev->driver->io_mem_reserve(bdev, mem);
bdev              144 drivers/gpu/drm/ttm/ttm_bo_util.c 	if (bdev->driver->io_mem_reserve &&
bdev              147 drivers/gpu/drm/ttm/ttm_bo_util.c 		ret = bdev->driver->io_mem_reserve(bdev, mem);
bdev              158 drivers/gpu/drm/ttm/ttm_bo_util.c void ttm_mem_io_free(struct ttm_bo_device *bdev,
bdev              161 drivers/gpu/drm/ttm/ttm_bo_util.c 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
bdev              166 drivers/gpu/drm/ttm/ttm_bo_util.c 	if (bdev->driver->io_mem_reserve &&
bdev              168 drivers/gpu/drm/ttm/ttm_bo_util.c 	    bdev->driver->io_mem_free)
bdev              169 drivers/gpu/drm/ttm/ttm_bo_util.c 		bdev->driver->io_mem_free(bdev, mem);
bdev              181 drivers/gpu/drm/ttm/ttm_bo_util.c 			&bo->bdev->man[mem->mem_type];
bdev              183 drivers/gpu/drm/ttm/ttm_bo_util.c 		ret = ttm_mem_io_reserve(bo->bdev, mem);
bdev              201 drivers/gpu/drm/ttm/ttm_bo_util.c 		ttm_mem_io_free(bo->bdev, mem);
bdev              205 drivers/gpu/drm/ttm/ttm_bo_util.c static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
bdev              208 drivers/gpu/drm/ttm/ttm_bo_util.c 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
bdev              214 drivers/gpu/drm/ttm/ttm_bo_util.c 	ret = ttm_mem_io_reserve(bdev, mem);
bdev              228 drivers/gpu/drm/ttm/ttm_bo_util.c 			ttm_mem_io_free(bdev, mem);
bdev              237 drivers/gpu/drm/ttm/ttm_bo_util.c static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
bdev              242 drivers/gpu/drm/ttm/ttm_bo_util.c 	man = &bdev->man[mem->mem_type];
bdev              247 drivers/gpu/drm/ttm/ttm_bo_util.c 	ttm_mem_io_free(bdev, mem);
bdev              360 drivers/gpu/drm/ttm/ttm_bo_util.c 	struct ttm_bo_device *bdev = bo->bdev;
bdev              361 drivers/gpu/drm/ttm/ttm_bo_util.c 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
bdev              377 drivers/gpu/drm/ttm/ttm_bo_util.c 	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
bdev              380 drivers/gpu/drm/ttm/ttm_bo_util.c 	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
bdev              448 drivers/gpu/drm/ttm/ttm_bo_util.c 	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
bdev              450 drivers/gpu/drm/ttm/ttm_bo_util.c 	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
bdev              506 drivers/gpu/drm/ttm/ttm_bo_util.c 	atomic_inc(&bo->bdev->glob->bo_count);
bdev              624 drivers/gpu/drm/ttm/ttm_bo_util.c 		&bo->bdev->man[bo->mem.mem_type];
bdev              636 drivers/gpu/drm/ttm/ttm_bo_util.c 	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
bdev              654 drivers/gpu/drm/ttm/ttm_bo_util.c 		&bo->bdev->man[bo->mem.mem_type];
bdev              674 drivers/gpu/drm/ttm/ttm_bo_util.c 	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
bdev              686 drivers/gpu/drm/ttm/ttm_bo_util.c 	struct ttm_bo_device *bdev = bo->bdev;
bdev              687 drivers/gpu/drm/ttm/ttm_bo_util.c 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
bdev              747 drivers/gpu/drm/ttm/ttm_bo_util.c 	struct ttm_bo_device *bdev = bo->bdev;
bdev              750 drivers/gpu/drm/ttm/ttm_bo_util.c 	struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
bdev              751 drivers/gpu/drm/ttm/ttm_bo_util.c 	struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
bdev              100 drivers/gpu/drm/ttm/ttm_bo_vm.c 	struct ttm_bo_device *bdev = bo->bdev;
bdev              102 drivers/gpu/drm/ttm/ttm_bo_vm.c 	if (bdev->driver->io_mem_pfn)
bdev              103 drivers/gpu/drm/ttm/ttm_bo_vm.c 		return bdev->driver->io_mem_pfn(bo, page_offset);
bdev              114 drivers/gpu/drm/ttm/ttm_bo_vm.c 	struct ttm_bo_device *bdev = bo->bdev;
bdev              125 drivers/gpu/drm/ttm/ttm_bo_vm.c 		&bdev->man[bo->mem.mem_type];
bdev              163 drivers/gpu/drm/ttm/ttm_bo_vm.c 	if (bdev->driver->fault_reserve_notify) {
bdev              166 drivers/gpu/drm/ttm/ttm_bo_vm.c 		err = bdev->driver->fault_reserve_notify(bo);
bdev              180 drivers/gpu/drm/ttm/ttm_bo_vm.c 			spin_lock(&bdev->glob->lru_lock);
bdev              182 drivers/gpu/drm/ttm/ttm_bo_vm.c 			spin_unlock(&bdev->glob->lru_lock);
bdev              306 drivers/gpu/drm/ttm/ttm_bo_vm.c 	WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
bdev              384 drivers/gpu/drm/ttm/ttm_bo_vm.c 		if (bo->bdev->driver->access_memory)
bdev              385 drivers/gpu/drm/ttm/ttm_bo_vm.c 			ret = bo->bdev->driver->access_memory(
bdev              403 drivers/gpu/drm/ttm/ttm_bo_vm.c static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
bdev              410 drivers/gpu/drm/ttm/ttm_bo_vm.c 	drm_vma_offset_lock_lookup(&bdev->vma_manager);
bdev              412 drivers/gpu/drm/ttm/ttm_bo_vm.c 	node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
bdev              419 drivers/gpu/drm/ttm/ttm_bo_vm.c 	drm_vma_offset_unlock_lookup(&bdev->vma_manager);
bdev              428 drivers/gpu/drm/ttm/ttm_bo_vm.c 		struct ttm_bo_device *bdev)
bdev              437 drivers/gpu/drm/ttm/ttm_bo_vm.c 	bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
bdev              441 drivers/gpu/drm/ttm/ttm_bo_vm.c 	driver = bo->bdev->driver;
bdev               66 drivers/gpu/drm/ttm/ttm_execbuf_util.c 	glob = entry->bo->bdev->glob;
bdev              107 drivers/gpu/drm/ttm/ttm_execbuf_util.c 	glob = entry->bo->bdev->glob;
bdev              197 drivers/gpu/drm/ttm/ttm_execbuf_util.c 	glob = bo->bdev->glob;
bdev             1031 drivers/gpu/drm/ttm/ttm_page_alloc.c 	struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
bdev             1052 drivers/gpu/drm/ttm/ttm_page_alloc.c 	struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
bdev              890 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
bdev              995 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
bdev               48 drivers/gpu/drm/ttm/ttm_tt.c 	struct ttm_bo_device *bdev = bo->bdev;
bdev               53 drivers/gpu/drm/ttm/ttm_tt.c 	if (bdev->need_dma32)
bdev               56 drivers/gpu/drm/ttm/ttm_tt.c 	if (bdev->no_retry)
bdev               75 drivers/gpu/drm/ttm/ttm_tt.c 	bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags);
bdev              229 drivers/gpu/drm/ttm/ttm_tt.c 	ttm->bdev = bo->bdev;
bdev              457 drivers/gpu/drm/ttm/ttm_tt.c 		ttm->pages[i]->mapping = ttm->bdev->dev_mapping;
bdev              467 drivers/gpu/drm/ttm/ttm_tt.c 	if (ttm->bdev->driver->ttm_tt_populate)
bdev              468 drivers/gpu/drm/ttm/ttm_tt.c 		ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx);
bdev              496 drivers/gpu/drm/ttm/ttm_tt.c 	if (ttm->bdev->driver->ttm_tt_unpopulate)
bdev              497 drivers/gpu/drm/ttm/ttm_tt.c 		ttm->bdev->driver->ttm_tt_unpopulate(ttm);
bdev              286 drivers/gpu/drm/vboxvideo/vbox_main.c 	gbo = drm_gem_vram_create(&vbox->ddev, &vbox->ddev.vram_mm->bdev,
bdev              151 drivers/gpu/drm/virtio/virtgpu_drv.h 	struct ttm_bo_device		bdev;
bdev              112 drivers/gpu/drm/virtio/virtgpu_object.c 	acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, params->size,
bdev              139 drivers/gpu/drm/virtio/virtgpu_object.c 	ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, params->size,
bdev              220 drivers/gpu/drm/virtio/virtgpu_object.c 		bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm, &ctx);
bdev               42 drivers/gpu/drm/virtio/virtgpu_ttm.c virtio_gpu_device *virtio_gpu_get_vgdev(struct ttm_bo_device *bdev)
bdev               47 drivers/gpu/drm/virtio/virtgpu_ttm.c 	mman = container_of(bdev, struct virtio_gpu_mman, bdev);
bdev               65 drivers/gpu/drm/virtio/virtgpu_ttm.c 	r = ttm_bo_mmap(filp, vma, &vgdev->mman.bdev);
bdev               70 drivers/gpu/drm/virtio/virtgpu_ttm.c static int virtio_gpu_invalidate_caches(struct ttm_bo_device *bdev,
bdev              115 drivers/gpu/drm/virtio/virtgpu_ttm.c static int virtio_gpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
bdev              159 drivers/gpu/drm/virtio/virtgpu_ttm.c static int virtio_gpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
bdev              162 drivers/gpu/drm/virtio/virtgpu_ttm.c 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
bdev              182 drivers/gpu/drm/virtio/virtgpu_ttm.c static void virtio_gpu_ttm_io_mem_free(struct ttm_bo_device *bdev,
bdev              201 drivers/gpu/drm/virtio/virtgpu_ttm.c 		virtio_gpu_get_vgdev(gtt->obj->tbo.bdev);
bdev              212 drivers/gpu/drm/virtio/virtgpu_ttm.c 		virtio_gpu_get_vgdev(gtt->obj->tbo.bdev);
bdev              239 drivers/gpu/drm/virtio/virtgpu_ttm.c 	vgdev = virtio_gpu_get_vgdev(bo->bdev);
bdev              279 drivers/gpu/drm/virtio/virtgpu_ttm.c 	r = ttm_bo_device_init(&vgdev->mman.bdev,
bdev              288 drivers/gpu/drm/virtio/virtgpu_ttm.c 	r = ttm_bo_init_mm(&vgdev->mman.bdev, TTM_PL_TT, 0);
bdev              296 drivers/gpu/drm/virtio/virtgpu_ttm.c 	ttm_bo_device_release(&vgdev->mman.bdev);
bdev              303 drivers/gpu/drm/virtio/virtgpu_ttm.c 	ttm_bo_device_release(&vgdev->mman.bdev);
bdev              467 drivers/gpu/drm/vmwgfx/vmwgfx_blit.c 		ret = dst->ttm->bdev->driver->ttm_tt_populate(dst->ttm, &ctx);
bdev              473 drivers/gpu/drm/vmwgfx/vmwgfx_blit.c 		ret = src->ttm->bdev->driver->ttm_tt_populate(src->ttm, &ctx);
bdev              503 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c 	struct ttm_bo_device *bdev = &dev_priv->bdev;
bdev              517 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c 	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
bdev             1004 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c 	struct ttm_bo_device *bdev = bo->bdev;
bdev             1007 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c 		container_of(bdev, struct vmw_private, bdev);
bdev             1247 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c 		ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
bdev              462 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c 		(void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
bdev              495 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c 		ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
bdev              830 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c 	ret = ttm_bo_device_init(&dev_priv->bdev,
bdev              843 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c 	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
bdev              849 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c 	dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
bdev              853 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c 	    refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
bdev              862 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c 		if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
bdev              931 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c 		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
bdev              933 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c 		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
bdev              934 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c 	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
bdev              936 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c 	(void)ttm_bo_device_release(&dev_priv->bdev);
bdev              982 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c 		(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
bdev              983 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c 	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
bdev              987 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c 		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
bdev              988 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c 	(void) ttm_bo_device_release(&dev_priv->bdev);
bdev             1131 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c 	if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
bdev             1133 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c 		dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
bdev             1160 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c 	if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
bdev             1161 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c 		dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
bdev             1193 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c 	if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
bdev             1194 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c 		dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
bdev             1196 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c 		if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
bdev             1309 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c 	ttm_bo_swapout_all(&dev_priv->bdev);
bdev              436 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 	struct ttm_bo_device bdev;
bdev              103 drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c 		container_of(man->bdev, struct vmw_private, bdev);
bdev              258 drivers/gpu/drm/vmwgfx/vmwgfx_mob.c 	ret = ttm_bo_create(&dev_priv->bdev, bo_size,
bdev              443 drivers/gpu/drm/vmwgfx/vmwgfx_mob.c 	ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE,
bdev              793 drivers/gpu/drm/vmwgfx/vmwgfx_resource.c 	struct ttm_bo_device *bdev = bo->bdev;
bdev              797 drivers/gpu/drm/vmwgfx/vmwgfx_resource.c 	dev_priv = container_of(bdev, struct vmw_private, bdev);
bdev              723 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c 	vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
bdev              739 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
bdev              744 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
bdev              797 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
bdev              799 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
bdev              800 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c 	struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
bdev              825 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
bdev               35 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c 	return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
bdev               17 drivers/hid/hid-picolcd_backlight.c static int picolcd_get_brightness(struct backlight_device *bdev)
bdev               19 drivers/hid/hid-picolcd_backlight.c 	struct picolcd_data *data = bl_get_data(bdev);
bdev               23 drivers/hid/hid-picolcd_backlight.c static int picolcd_set_brightness(struct backlight_device *bdev)
bdev               25 drivers/hid/hid-picolcd_backlight.c 	struct picolcd_data *data = bl_get_data(bdev);
bdev               32 drivers/hid/hid-picolcd_backlight.c 	data->lcd_brightness = bdev->props.brightness & 0x0ff;
bdev               33 drivers/hid/hid-picolcd_backlight.c 	data->lcd_power      = bdev->props.power;
bdev               42 drivers/hid/hid-picolcd_backlight.c static int picolcd_check_bl_fb(struct backlight_device *bdev, struct fb_info *fb)
bdev               44 drivers/hid/hid-picolcd_backlight.c 	return fb && fb == picolcd_fbinfo((struct picolcd_data *)bl_get_data(bdev));
bdev               56 drivers/hid/hid-picolcd_backlight.c 	struct backlight_device *bdev;
bdev               69 drivers/hid/hid-picolcd_backlight.c 	bdev = backlight_device_register(dev_name(dev), dev, data,
bdev               71 drivers/hid/hid-picolcd_backlight.c 	if (IS_ERR(bdev)) {
bdev               73 drivers/hid/hid-picolcd_backlight.c 		return PTR_ERR(bdev);
bdev               75 drivers/hid/hid-picolcd_backlight.c 	bdev->props.brightness     = 0xff;
bdev               77 drivers/hid/hid-picolcd_backlight.c 	data->backlight            = bdev;
bdev               78 drivers/hid/hid-picolcd_backlight.c 	picolcd_set_brightness(bdev);
bdev               84 drivers/hid/hid-picolcd_backlight.c 	struct backlight_device *bdev = data->backlight;
bdev               87 drivers/hid/hid-picolcd_backlight.c 	backlight_device_unregister(bdev);
bdev             1609 drivers/ide/ide-cd.c static int idecd_open(struct block_device *bdev, fmode_t mode)
bdev             1614 drivers/ide/ide-cd.c 	check_disk_change(bdev);
bdev             1617 drivers/ide/ide-cd.c 	info = ide_cd_get(bdev->bd_disk);
bdev             1621 drivers/ide/ide-cd.c 	rc = cdrom_open(&info->devinfo, bdev, mode);
bdev             1679 drivers/ide/ide-cd.c static int idecd_locked_ioctl(struct block_device *bdev, fmode_t mode,
bdev             1682 drivers/ide/ide-cd.c 	struct cdrom_info *info = ide_drv_g(bdev->bd_disk, cdrom_info);
bdev             1694 drivers/ide/ide-cd.c 	err = generic_ide_ioctl(info->drive, bdev, cmd, arg);
bdev             1696 drivers/ide/ide-cd.c 		err = cdrom_ioctl(&info->devinfo, bdev, mode, cmd, arg);
bdev             1701 drivers/ide/ide-cd.c static int idecd_ioctl(struct block_device *bdev, fmode_t mode,
bdev             1707 drivers/ide/ide-cd.c 	ret = idecd_locked_ioctl(bdev, mode, cmd, arg);
bdev               19 drivers/ide/ide-disk_ioctl.c int ide_disk_ioctl(ide_drive_t *drive, struct block_device *bdev, fmode_t mode,
bdev               25 drivers/ide/ide-disk_ioctl.c 	err = ide_setting_ioctl(drive, bdev, cmd, arg, ide_disk_ioctl_settings);
bdev               29 drivers/ide/ide-disk_ioctl.c 	err = generic_ide_ioctl(drive, bdev, cmd, arg);
bdev              274 drivers/ide/ide-floppy_ioctl.c int ide_floppy_ioctl(ide_drive_t *drive, struct block_device *bdev,
bdev              296 drivers/ide/ide-floppy_ioctl.c 		err = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
bdev              299 drivers/ide/ide-floppy_ioctl.c 		err = generic_ide_ioctl(drive, bdev, cmd, arg);
bdev              184 drivers/ide/ide-gd.c static int ide_gd_open(struct block_device *bdev, fmode_t mode)
bdev              186 drivers/ide/ide-gd.c 	struct gendisk *disk = bdev->bd_disk;
bdev              229 drivers/ide/ide-gd.c 		check_disk_change(bdev);
bdev              242 drivers/ide/ide-gd.c static int ide_gd_unlocked_open(struct block_device *bdev, fmode_t mode)
bdev              247 drivers/ide/ide-gd.c 	ret = ide_gd_open(bdev, mode);
bdev              276 drivers/ide/ide-gd.c static int ide_gd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
bdev              278 drivers/ide/ide-gd.c 	struct ide_disk_obj *idkp = ide_drv_g(bdev->bd_disk, ide_disk_obj);
bdev              335 drivers/ide/ide-gd.c static int ide_gd_ioctl(struct block_device *bdev, fmode_t mode,
bdev              338 drivers/ide/ide-gd.c 	struct ide_disk_obj *idkp = ide_drv_g(bdev->bd_disk, ide_disk_obj);
bdev              341 drivers/ide/ide-gd.c 	return drive->disk_ops->ioctl(drive, bdev, mode, cmd, arg);
bdev               20 drivers/ide/ide-ioctls.c int ide_setting_ioctl(ide_drive_t *drive, struct block_device *bdev,
bdev               43 drivers/ide/ide-ioctls.c 	if (bdev != bdev->bd_contains)
bdev              236 drivers/ide/ide-ioctls.c int generic_ide_ioctl(ide_drive_t *drive, struct block_device *bdev,
bdev              241 drivers/ide/ide-ioctls.c 	err = ide_setting_ioctl(drive, bdev, cmd, arg, ide_ioctl_settings);
bdev              248 drivers/ide/ide-ioctls.c 		if (bdev != bdev->bd_contains)
bdev             1894 drivers/ide/ide-tape.c static int idetape_open(struct block_device *bdev, fmode_t mode)
bdev             1899 drivers/ide/ide-tape.c 	tape = ide_tape_get(bdev->bd_disk, false, 0);
bdev             1917 drivers/ide/ide-tape.c static int idetape_ioctl(struct block_device *bdev, fmode_t mode,
bdev             1920 drivers/ide/ide-tape.c 	struct ide_tape_obj *tape = ide_drv_g(bdev->bd_disk, ide_tape_obj);
bdev             1925 drivers/ide/ide-tape.c 	err = generic_ide_ioctl(drive, bdev, cmd, arg);
bdev               49 drivers/input/keyboard/gpio_keys_polled.c 	struct gpio_keys_polled_dev *bdev = dev->private;
bdev               56 drivers/input/keyboard/gpio_keys_polled.c 			__set_bit(button->code, bdev->rel_axis_seen);
bdev               61 drivers/input/keyboard/gpio_keys_polled.c 			__set_bit(button->code, bdev->abs_axis_seen);
bdev               91 drivers/input/keyboard/gpio_keys_polled.c 	struct gpio_keys_polled_dev *bdev = dev->private;
bdev               92 drivers/input/keyboard/gpio_keys_polled.c 	const struct gpio_keys_platform_data *pdata = bdev->pdata;
bdev               96 drivers/input/keyboard/gpio_keys_polled.c 	memset(bdev->rel_axis_seen, 0, sizeof(bdev->rel_axis_seen));
bdev               97 drivers/input/keyboard/gpio_keys_polled.c 	memset(bdev->abs_axis_seen, 0, sizeof(bdev->abs_axis_seen));
bdev              100 drivers/input/keyboard/gpio_keys_polled.c 		struct gpio_keys_button_data *bdata = &bdev->data[i];
bdev              113 drivers/input/keyboard/gpio_keys_polled.c 		if (!test_bit(i, bdev->rel_axis_seen))
bdev              118 drivers/input/keyboard/gpio_keys_polled.c 		if (!test_bit(i, bdev->abs_axis_seen))
bdev              127 drivers/input/keyboard/gpio_keys_polled.c 	struct gpio_keys_polled_dev *bdev = dev->private;
bdev              128 drivers/input/keyboard/gpio_keys_polled.c 	const struct gpio_keys_platform_data *pdata = bdev->pdata;
bdev              131 drivers/input/keyboard/gpio_keys_polled.c 		pdata->enable(bdev->dev);
bdev              136 drivers/input/keyboard/gpio_keys_polled.c 	struct gpio_keys_polled_dev *bdev = dev->private;
bdev              137 drivers/input/keyboard/gpio_keys_polled.c 	const struct gpio_keys_platform_data *pdata = bdev->pdata;
bdev              140 drivers/input/keyboard/gpio_keys_polled.c 		pdata->disable(bdev->dev);
bdev              234 drivers/input/keyboard/gpio_keys_polled.c 	struct gpio_keys_polled_dev *bdev;
bdev              251 drivers/input/keyboard/gpio_keys_polled.c 	bdev = devm_kzalloc(dev, struct_size(bdev, data, pdata->nbuttons),
bdev              253 drivers/input/keyboard/gpio_keys_polled.c 	if (!bdev) {
bdev              264 drivers/input/keyboard/gpio_keys_polled.c 	poll_dev->private = bdev;
bdev              286 drivers/input/keyboard/gpio_keys_polled.c 		struct gpio_keys_button_data *bdata = &bdev->data[i];
bdev              356 drivers/input/keyboard/gpio_keys_polled.c 	bdev->poll_dev = poll_dev;
bdev              357 drivers/input/keyboard/gpio_keys_polled.c 	bdev->dev = dev;
bdev              358 drivers/input/keyboard/gpio_keys_polled.c 	bdev->pdata = pdata;
bdev              370 drivers/input/keyboard/gpio_keys_polled.c 					     &bdev->data[i]);
bdev               37 drivers/input/misc/cobalt_btns.c 	struct buttons_dev *bdev = dev->private;
bdev               42 drivers/input/misc/cobalt_btns.c 	status = ~readl(bdev->reg) >> 24;
bdev               44 drivers/input/misc/cobalt_btns.c 	for (i = 0; i < ARRAY_SIZE(bdev->keymap); i++) {
bdev               46 drivers/input/misc/cobalt_btns.c 			if (++bdev->count[i] == BUTTONS_COUNT_THRESHOLD) {
bdev               48 drivers/input/misc/cobalt_btns.c 				input_report_key(input, bdev->keymap[i], 1);
bdev               52 drivers/input/misc/cobalt_btns.c 			if (bdev->count[i] >= BUTTONS_COUNT_THRESHOLD) {
bdev               54 drivers/input/misc/cobalt_btns.c 				input_report_key(input, bdev->keymap[i], 0);
bdev               57 drivers/input/misc/cobalt_btns.c 			bdev->count[i] = 0;
bdev               64 drivers/input/misc/cobalt_btns.c 	struct buttons_dev *bdev;
bdev               70 drivers/input/misc/cobalt_btns.c 	bdev = kzalloc(sizeof(struct buttons_dev), GFP_KERNEL);
bdev               72 drivers/input/misc/cobalt_btns.c 	if (!bdev || !poll_dev) {
bdev               77 drivers/input/misc/cobalt_btns.c 	memcpy(bdev->keymap, cobalt_map, sizeof(bdev->keymap));
bdev               79 drivers/input/misc/cobalt_btns.c 	poll_dev->private = bdev;
bdev               89 drivers/input/misc/cobalt_btns.c 	input->keycode = bdev->keymap;
bdev               90 drivers/input/misc/cobalt_btns.c 	input->keycodemax = ARRAY_SIZE(bdev->keymap);
bdev               96 drivers/input/misc/cobalt_btns.c 		__set_bit(bdev->keymap[i], input->keybit);
bdev              105 drivers/input/misc/cobalt_btns.c 	bdev->poll_dev = poll_dev;
bdev              106 drivers/input/misc/cobalt_btns.c 	bdev->reg = ioremap(res->start, resource_size(res));
bdev              107 drivers/input/misc/cobalt_btns.c 	dev_set_drvdata(&pdev->dev, bdev);
bdev              116 drivers/input/misc/cobalt_btns.c 	iounmap(bdev->reg);
bdev              119 drivers/input/misc/cobalt_btns.c 	kfree(bdev);
bdev              126 drivers/input/misc/cobalt_btns.c 	struct buttons_dev *bdev = dev_get_drvdata(dev);
bdev              128 drivers/input/misc/cobalt_btns.c 	input_unregister_polled_device(bdev->poll_dev);
bdev              129 drivers/input/misc/cobalt_btns.c 	input_free_polled_device(bdev->poll_dev);
bdev              130 drivers/input/misc/cobalt_btns.c 	iounmap(bdev->reg);
bdev              131 drivers/input/misc/cobalt_btns.c 	kfree(bdev);
bdev               55 drivers/input/misc/sgi_btns.c 	struct buttons_dev *bdev = dev->private;
bdev               62 drivers/input/misc/sgi_btns.c 	for (i = 0; i < ARRAY_SIZE(bdev->keymap); i++) {
bdev               64 drivers/input/misc/sgi_btns.c 			if (++bdev->count[i] == BUTTONS_COUNT_THRESHOLD) {
bdev               66 drivers/input/misc/sgi_btns.c 				input_report_key(input, bdev->keymap[i], 1);
bdev               70 drivers/input/misc/sgi_btns.c 			if (bdev->count[i] >= BUTTONS_COUNT_THRESHOLD) {
bdev               72 drivers/input/misc/sgi_btns.c 				input_report_key(input, bdev->keymap[i], 0);
bdev               75 drivers/input/misc/sgi_btns.c 			bdev->count[i] = 0;
bdev               82 drivers/input/misc/sgi_btns.c 	struct buttons_dev *bdev;
bdev               87 drivers/input/misc/sgi_btns.c 	bdev = kzalloc(sizeof(struct buttons_dev), GFP_KERNEL);
bdev               89 drivers/input/misc/sgi_btns.c 	if (!bdev || !poll_dev) {
bdev               94 drivers/input/misc/sgi_btns.c 	memcpy(bdev->keymap, sgi_map, sizeof(bdev->keymap));
bdev               96 drivers/input/misc/sgi_btns.c 	poll_dev->private = bdev;
bdev              106 drivers/input/misc/sgi_btns.c 	input->keycode = bdev->keymap;
bdev              107 drivers/input/misc/sgi_btns.c 	input->keycodemax = ARRAY_SIZE(bdev->keymap);
bdev              113 drivers/input/misc/sgi_btns.c 		__set_bit(bdev->keymap[i], input->keybit);
bdev              116 drivers/input/misc/sgi_btns.c 	bdev->poll_dev = poll_dev;
bdev              117 drivers/input/misc/sgi_btns.c 	platform_set_drvdata(pdev, bdev);
bdev              127 drivers/input/misc/sgi_btns.c 	kfree(bdev);
bdev              133 drivers/input/misc/sgi_btns.c 	struct buttons_dev *bdev = platform_get_drvdata(pdev);
bdev              135 drivers/input/misc/sgi_btns.c 	input_unregister_polled_device(bdev->poll_dev);
bdev              136 drivers/input/misc/sgi_btns.c 	input_free_polled_device(bdev->poll_dev);
bdev              137 drivers/input/misc/sgi_btns.c 	kfree(bdev);
bdev              338 drivers/md/bcache/alloc.c 				blkdev_issue_discard(ca->bdev,
bdev              301 drivers/md/bcache/bcache.h 	struct block_device	*bdev;
bdev              413 drivers/md/bcache/bcache.h 	struct block_device	*bdev;
bdev               53 drivers/md/bcache/debug.c 	bio_set_dev(bio, PTR_CACHE(b->c, &b->key, 0)->bdev);
bdev               39 drivers/md/bcache/io.c 	bio_set_dev(bio, PTR_CACHE(c, &b->key, 0)->bdev);
bdev               58 drivers/md/bcache/journal.c 		bio_set_dev(bio, ca->bdev);
bdev              629 drivers/md/bcache/journal.c 		bio_set_dev(bio, ca->bdev);
bdev              805 drivers/md/bcache/journal.c 		bio_set_dev(bio, ca->bdev);
bdev             1024 drivers/md/bcache/request.c 		    !blk_queue_discard(bdev_get_queue(dc->bdev)))
bdev             1130 drivers/md/bcache/request.c 	    !blk_queue_discard(bdev_get_queue(dc->bdev)))
bdev             1211 drivers/md/bcache/request.c 	bio_set_dev(bio, dc->bdev);
bdev             1249 drivers/md/bcache/request.c 	return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
bdev             1256 drivers/md/bcache/request.c 	struct request_queue *q = bdev_get_queue(dc->bdev);
bdev             1267 drivers/md/bcache/request.c 			q = bdev_get_queue(ca->bdev);
bdev             1379 drivers/md/bcache/request.c 		q = bdev_get_queue(ca->bdev);
bdev               62 drivers/md/bcache/super.c static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
bdev               67 drivers/md/bcache/super.c 	struct buffer_head *bh = __bread(bdev, 1, SB_SIZE);
bdev              117 drivers/md/bcache/super.c 	if (sb->block_size << 9 < bdev_logical_block_size(bdev))
bdev              156 drivers/md/bcache/super.c 		if (get_capacity(bdev->bd_disk) <
bdev              260 drivers/md/bcache/super.c 	bio_set_dev(bio, dc->bdev);
bdev              309 drivers/md/bcache/super.c 		bio_set_dev(bio, ca->bdev);
bdev              520 drivers/md/bcache/super.c 	bio_set_dev(bio, ca->bdev);
bdev              704 drivers/md/bcache/super.c 			bd_unlink_disk_holder(ca->bdev, d->disk);
bdev              716 drivers/md/bcache/super.c 		bd_link_disk_holder(ca->bdev, d->disk);
bdev              900 drivers/md/bcache/super.c 		sectors += bdev_sectors(dc->bdev);
bdev              917 drivers/md/bcache/super.c 		q = bdev_get_queue(dc->bdev);
bdev              984 drivers/md/bcache/super.c 	bd_link_disk_holder(dc->bdev, dc->disk.disk);
bdev             1271 drivers/md/bcache/super.c 		bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
bdev             1280 drivers/md/bcache/super.c 	if (!IS_ERR_OR_NULL(dc->bdev))
bdev             1281 drivers/md/bcache/super.c 		blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
bdev             1307 drivers/md/bcache/super.c 	struct request_queue *q = bdev_get_queue(dc->bdev);
bdev             1334 drivers/md/bcache/super.c 			 dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
bdev             1356 drivers/md/bcache/super.c 				 struct block_device *bdev,
bdev             1363 drivers/md/bcache/super.c 	bdevname(bdev, dc->backing_dev_name);
bdev             1365 drivers/md/bcache/super.c 	dc->bdev = bdev;
bdev             1366 drivers/md/bcache/super.c 	dc->bdev->bd_holder = dc;
bdev             1377 drivers/md/bcache/super.c 	if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj,
bdev             2142 drivers/md/bcache/super.c 	if (!IS_ERR_OR_NULL(ca->bdev))
bdev             2143 drivers/md/bcache/super.c 		blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
bdev             2263 drivers/md/bcache/super.c 				struct block_device *bdev, struct cache *ca)
bdev             2268 drivers/md/bcache/super.c 	bdevname(bdev, ca->cache_dev_name);
bdev             2270 drivers/md/bcache/super.c 	ca->bdev = bdev;
bdev             2271 drivers/md/bcache/super.c 	ca->bdev->bd_holder = ca;
bdev             2277 drivers/md/bcache/super.c 	if (blk_queue_discard(bdev_get_queue(bdev)))
bdev             2288 drivers/md/bcache/super.c 		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
bdev             2299 drivers/md/bcache/super.c 			&part_to_dev(bdev->bd_part)->kobj,
bdev             2339 drivers/md/bcache/super.c static bool bch_is_open_backing(struct block_device *bdev)
bdev             2346 drivers/md/bcache/super.c 			if (dc->bdev == bdev)
bdev             2349 drivers/md/bcache/super.c 		if (dc->bdev == bdev)
bdev             2354 drivers/md/bcache/super.c static bool bch_is_open_cache(struct block_device *bdev)
bdev             2362 drivers/md/bcache/super.c 			if (ca->bdev == bdev)
bdev             2367 drivers/md/bcache/super.c static bool bch_is_open(struct block_device *bdev)
bdev             2369 drivers/md/bcache/super.c 	return bch_is_open_cache(bdev) || bch_is_open_backing(bdev);
bdev             2378 drivers/md/bcache/super.c 	struct block_device *bdev = NULL;
bdev             2405 drivers/md/bcache/super.c 	bdev = blkdev_get_by_path(strim(path),
bdev             2408 drivers/md/bcache/super.c 	if (IS_ERR(bdev)) {
bdev             2409 drivers/md/bcache/super.c 		if (bdev == ERR_PTR(-EBUSY)) {
bdev             2410 drivers/md/bcache/super.c 			bdev = lookup_bdev(strim(path));
bdev             2412 drivers/md/bcache/super.c 			if (!IS_ERR(bdev) && bch_is_open(bdev))
bdev             2417 drivers/md/bcache/super.c 			if (!IS_ERR(bdev))
bdev             2418 drivers/md/bcache/super.c 				bdput(bdev);
bdev             2426 drivers/md/bcache/super.c 	if (set_blocksize(bdev, 4096))
bdev             2429 drivers/md/bcache/super.c 	err = read_super(sb, bdev, &sb_page);
bdev             2441 drivers/md/bcache/super.c 		ret = register_bdev(sb, sb_page, bdev, dc);
bdev             2445 drivers/md/bcache/super.c 			bdev = NULL;
bdev             2455 drivers/md/bcache/super.c 		if (register_cache(sb, sb_page, bdev, ca) != 0) {
bdev             2456 drivers/md/bcache/super.c 			bdev = NULL;
bdev             2471 drivers/md/bcache/super.c 	if (bdev)
bdev             2472 drivers/md/bcache/super.c 		blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
bdev             1108 drivers/md/bcache/sysfs.c 		if (blk_queue_discard(bdev_get_queue(ca->bdev)))
bdev              589 drivers/md/bcache/util.h static inline sector_t bdev_sectors(struct block_device *bdev)
bdev              591 drivers/md/bcache/util.h 	return bdev->bd_inode->i_size >> 9;
bdev               48 drivers/md/bcache/writeback.c 		div64_u64(bdev_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT,
bdev              358 drivers/md/bcache/writeback.c 		bio_set_dev(&io->bio, io->dc->bdev);
bdev              474 drivers/md/bcache/writeback.c 				    PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
bdev               88 drivers/md/dm-bufio.c 	struct block_device *bdev;
bdev              566 drivers/md/dm-bufio.c 		.bdev = b->c->bdev,
bdev              611 drivers/md/dm-bufio.c 	bio_set_dev(bio, b->c->bdev);
bdev             1317 drivers/md/dm-bufio.c 		.bdev = c->bdev,
bdev             1440 drivers/md/dm-bufio.c 	sector_t s = i_size_read(c->bdev->bd_inode) >> SECTOR_SHIFT;
bdev             1605 drivers/md/dm-bufio.c struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
bdev             1628 drivers/md/dm-bufio.c 	c->bdev = bdev;
bdev              108 drivers/md/dm-cache-metadata.c 	struct block_device *bdev;
bdev              337 drivers/md/dm-cache-metadata.c 	sector_t bdev_size = i_size_read(cmd->bdev->bd_inode) >> SECTOR_SHIFT;
bdev              452 drivers/md/dm-cache-metadata.c 	if (get_disk_ro(cmd->bdev->bd_disk))
bdev              536 drivers/md/dm-cache-metadata.c 	cmd->bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
bdev              742 drivers/md/dm-cache-metadata.c static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
bdev              760 drivers/md/dm-cache-metadata.c 	cmd->bdev = bdev;
bdev              790 drivers/md/dm-cache-metadata.c static struct dm_cache_metadata *lookup(struct block_device *bdev)
bdev              795 drivers/md/dm-cache-metadata.c 		if (cmd->bdev == bdev) {
bdev              803 drivers/md/dm-cache-metadata.c static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
bdev              812 drivers/md/dm-cache-metadata.c 	cmd = lookup(bdev);
bdev              818 drivers/md/dm-cache-metadata.c 	cmd = metadata_open(bdev, data_block_size, may_format_device,
bdev              822 drivers/md/dm-cache-metadata.c 		cmd2 = lookup(bdev);
bdev              848 drivers/md/dm-cache-metadata.c struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
bdev              854 drivers/md/dm-cache-metadata.c 	struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size, may_format_device,
bdev               59 drivers/md/dm-cache-metadata.h struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
bdev              808 drivers/md/dm-cache-target.c 	bio_set_dev(bio, cache->origin_dev->bdev);
bdev              817 drivers/md/dm-cache-target.c 	bio_set_dev(bio, cache->cache_dev->bdev);
bdev             1191 drivers/md/dm-cache-target.c 	o_region.bdev = cache->origin_dev->bdev;
bdev             1195 drivers/md/dm-cache-target.c 	c_region.bdev = cache->cache_dev->bdev;
bdev             2040 drivers/md/dm-cache-target.c 	return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
bdev             2138 drivers/md/dm-cache-target.c 		       bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
bdev             2449 drivers/md/dm-cache-target.c 	struct request_queue *q = bdev_get_queue(dev->bdev);
bdev             2538 drivers/md/dm-cache-target.c 	cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
bdev             3233 drivers/md/dm-cache-target.c 		format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
bdev             3235 drivers/md/dm-cache-target.c 		format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
bdev             3237 drivers/md/dm-cache-target.c 		format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
bdev             3448 drivers/md/dm-cache-target.c 	struct block_device *origin_bdev = cache->origin_dev->bdev;
bdev             3471 drivers/md/dm-cache-target.c 	struct block_device *origin_bdev = cache->origin_dev->bdev;
bdev              118 drivers/md/dm-clone-metadata.c 	struct block_device *bdev;
bdev              450 drivers/md/dm-clone-metadata.c 	cmd->bm = dm_block_manager_create(cmd->bdev,
bdev              563 drivers/md/dm-clone-metadata.c struct dm_clone_metadata *dm_clone_metadata_open(struct block_device *bdev,
bdev              576 drivers/md/dm-clone-metadata.c 	cmd->bdev = bdev;
bdev               67 drivers/md/dm-clone-metadata.h struct dm_clone_metadata *dm_clone_metadata_open(struct block_device *bdev,
bdev              268 drivers/md/dm-clone-target.c 	bio_set_dev(bio, clone->source_dev->bdev);
bdev              273 drivers/md/dm-clone-target.c 	bio_set_dev(bio, clone->dest_dev->bdev);
bdev              824 drivers/md/dm-clone-target.c 	from.bdev = clone->source_dev->bdev;
bdev              828 drivers/md/dm-clone-target.c 	to.bdev = clone->dest_dev->bdev;
bdev             1156 drivers/md/dm-clone-target.c 	bio_set_dev(&clone->flush_bio, clone->dest_dev->bdev);
bdev             1498 drivers/md/dm-clone-target.c 		format_dev_t(buf, clone->metadata_dev->bdev->bd_dev);
bdev             1501 drivers/md/dm-clone-target.c 		format_dev_t(buf, clone->dest_dev->bdev->bd_dev);
bdev             1504 drivers/md/dm-clone-target.c 		format_dev_t(buf, clone->source_dev->bdev->bd_dev);
bdev             1522 drivers/md/dm-clone-target.c 	source_q = bdev_get_queue(clone->source_dev->bdev);
bdev             1523 drivers/md/dm-clone-target.c 	dest_q = bdev_get_queue(clone->dest_dev->bdev);
bdev             1531 drivers/md/dm-clone-target.c 	return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
bdev             1670 drivers/md/dm-clone-target.c 	if (region_size % (bdev_logical_block_size(clone->source_dev->bdev) >> 9) ||
bdev             1671 drivers/md/dm-clone-target.c 	    region_size % (bdev_logical_block_size(clone->dest_dev->bdev) >> 9)) {
bdev             1711 drivers/md/dm-clone-target.c 		       bdevname(clone->metadata_dev->bdev, b), DM_CLONE_METADATA_MAX_SECTORS);
bdev             1864 drivers/md/dm-clone-target.c 	clone->cmd = dm_clone_metadata_open(clone->metadata_dev->bdev, ti->len,
bdev             2038 drivers/md/dm-clone-target.c static bool bdev_supports_discards(struct block_device *bdev)
bdev             2040 drivers/md/dm-clone-target.c 	struct request_queue *q = bdev_get_queue(bdev);
bdev             2051 drivers/md/dm-clone-target.c 	struct block_device *dest_dev = clone->dest_dev->bdev;
bdev             2073 drivers/md/dm-clone-target.c 	struct block_device *dest_bdev = clone->dest_dev->bdev;
bdev              107 drivers/md/dm-core.h 	struct block_device *bdev;
bdev              848 drivers/md/dm-crypt.c 	struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk);
bdev             1450 drivers/md/dm-crypt.c 	bio_set_dev(clone, cc->dev->bdev);
bdev             2760 drivers/md/dm-crypt.c 		bio_set_dev(bio, cc->dev->bdev);
bdev              297 drivers/md/dm-delay.c 	bio_set_dev(bio, c->dev->bdev);
bdev              214 drivers/md/dm-dust.c 	bio_set_dev(bio, dd->dev->bdev);
bdev              376 drivers/md/dm-dust.c 	sector_t size = i_size_read(dd->dev->bdev->bd_inode) >> SECTOR_SHIFT;
bdev              462 drivers/md/dm-dust.c static int dust_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
bdev              467 drivers/md/dm-dust.c 	*bdev = dev->bdev;
bdev              473 drivers/md/dm-dust.c 	    ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
bdev              261 drivers/md/dm-era-target.c 	struct block_device *bdev;
bdev              617 drivers/md/dm-era-target.c 	md->bm = dm_block_manager_create(md->bdev, DM_ERA_METADATA_BLOCK_SIZE,
bdev              777 drivers/md/dm-era-target.c static struct era_metadata *metadata_open(struct block_device *bdev,
bdev              787 drivers/md/dm-era-target.c 	md->bdev = bdev;
bdev             1196 drivers/md/dm-era-target.c 	bio_set_dev(bio, era->origin_dev->bdev);
bdev             1380 drivers/md/dm-era-target.c 	struct request_queue *q = bdev_get_queue(dev->bdev);
bdev             1481 drivers/md/dm-era-target.c 	md = metadata_open(era->metadata_dev->bdev, era->sectors_per_block, true);
bdev             1626 drivers/md/dm-era-target.c 		format_dev_t(buf, era->metadata_dev->bdev->bd_dev);
bdev             1628 drivers/md/dm-era-target.c 		format_dev_t(buf, era->origin_dev->bdev->bd_dev);
bdev             1664 drivers/md/dm-era-target.c 	return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
bdev              172 drivers/md/dm-exception-store.c 	    (bdev_logical_block_size(dm_snap_cow(store->snap)->bdev) >> 9) ||
bdev              174 drivers/md/dm-exception-store.c 	    (bdev_logical_block_size(dm_snap_origin(store->snap)->bdev) >> 9)) {
bdev              169 drivers/md/dm-exception-store.h static inline sector_t get_dev_size(struct block_device *bdev)
bdev              171 drivers/md/dm-exception-store.h 	return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
bdev              282 drivers/md/dm-flakey.c 	bio_set_dev(bio, fc->dev->bdev);
bdev              447 drivers/md/dm-flakey.c static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
bdev              451 drivers/md/dm-flakey.c 	*bdev = fc->dev->bdev;
bdev              457 drivers/md/dm-flakey.c 	    ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
bdev              470 drivers/md/dm-flakey.c 	ret = blkdev_report_zones(fc->dev->bdev, flakey_map_sector(ti, sector),
bdev              485 drivers/md/dm-integrity.c 	io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
bdev              967 drivers/md/dm-integrity.c 	io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
bdev             1081 drivers/md/dm-integrity.c 	io_loc.bdev = ic->dev->bdev;
bdev             1999 drivers/md/dm-integrity.c 	bio_set_dev(bio, ic->dev->bdev);
bdev             2421 drivers/md/dm-integrity.c 	io_loc.bdev = ic->dev->bdev;
bdev             2534 drivers/md/dm-integrity.c 		blkdev_issue_flush(ic->dev->bdev, GFP_NOIO, NULL);
bdev             3727 drivers/md/dm-integrity.c 	ic->data_device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT;
bdev             3731 drivers/md/dm-integrity.c 		ic->meta_device_sectors = i_size_read(ic->meta_dev->bdev->bd_inode) >> SECTOR_SHIFT;
bdev             3975 drivers/md/dm-integrity.c 	DEBUG_print("	data_device_sectors 0x%llx\n", i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT);
bdev             4012 drivers/md/dm-integrity.c 	ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
bdev              306 drivers/md/dm-io.c 	struct request_queue *q = bdev_get_queue(where->bdev);
bdev              350 drivers/md/dm-io.c 		bio_set_dev(bio, where->bdev);
bdev             1486 drivers/md/dm-ioctl.c 		deps->dev[count++] = huge_encode_dev(dd->dm_dev->bdev->bd_dev);
bdev              806 drivers/md/dm-kcopyd.c 			if (bdev_zoned_model(dests[i].bdev) == BLK_ZONED_HM) {
bdev              834 drivers/md/dm-kcopyd.c 			if (!bdev_write_zeroes_sectors(job->dests[i].bdev)) {
bdev               92 drivers/md/dm-linear.c 	bio_set_dev(bio, lc->dev->bdev);
bdev              122 drivers/md/dm-linear.c static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
bdev              127 drivers/md/dm-linear.c 	*bdev = dev->bdev;
bdev              133 drivers/md/dm-linear.c 	    ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
bdev              146 drivers/md/dm-linear.c 	ret = blkdev_report_zones(lc->dev->bdev, linear_map_sector(ti, sector),
bdev              171 drivers/md/dm-linear.c 	struct block_device *bdev = lc->dev->bdev;
bdev              176 drivers/md/dm-linear.c 	ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages * PAGE_SIZE, &pgoff);
bdev              186 drivers/md/dm-linear.c 	struct block_device *bdev = lc->dev->bdev;
bdev              191 drivers/md/dm-linear.c 	if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
bdev              200 drivers/md/dm-linear.c 	struct block_device *bdev = lc->dev->bdev;
bdev              205 drivers/md/dm-linear.c 	if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
bdev              227 drivers/md/dm-log-writes.c 	bio_set_dev(bio, lc->logdev->bdev);
bdev              287 drivers/md/dm-log-writes.c 		bio_set_dev(bio, lc->logdev->bdev);
bdev              374 drivers/md/dm-log-writes.c 	bio_set_dev(bio, lc->logdev->bdev);
bdev              396 drivers/md/dm-log-writes.c 			bio_set_dev(bio, lc->logdev->bdev);
bdev              449 drivers/md/dm-log-writes.c 	return i_size_read(lc->logdev->bdev->bd_inode) >> SECTOR_SHIFT;
bdev              572 drivers/md/dm-log-writes.c 	lc->sectorsize = bdev_logical_block_size(lc->dev->bdev);
bdev              662 drivers/md/dm-log-writes.c 	bio_set_dev(bio, lc->dev->bdev);
bdev              841 drivers/md/dm-log-writes.c 				    struct block_device **bdev)
bdev              846 drivers/md/dm-log-writes.c 	*bdev = dev->bdev;
bdev              850 drivers/md/dm-log-writes.c 	if (ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
bdev              890 drivers/md/dm-log-writes.c 	struct request_queue *q = bdev_get_queue(lc->dev->bdev);
bdev              897 drivers/md/dm-log-writes.c 	limits->logical_block_size = bdev_logical_block_size(lc->dev->bdev);
bdev              898 drivers/md/dm-log-writes.c 	limits->physical_block_size = bdev_physical_block_size(lc->dev->bdev);
bdev              955 drivers/md/dm-log-writes.c 	ret = bdev_dax_pgoff(lc->dev->bdev, sector, nr_pages * PAGE_SIZE, &pgoff);
bdev              969 drivers/md/dm-log-writes.c 	if (bdev_dax_pgoff(lc->dev->bdev, sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
bdev              992 drivers/md/dm-log-writes.c 	if (bdev_dax_pgoff(lc->dev->bdev, sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
bdev              305 drivers/md/dm-log.c 		.bdev = lc->header_location.bdev,
bdev              439 drivers/md/dm-log.c 		lc->header_location.bdev = lc->log_dev->bdev;
bdev              448 drivers/md/dm-log.c 							    bdev));
bdev              450 drivers/md/dm-log.c 		if (buf_size > i_size_read(dev->bdev->bd_inode)) {
bdev              490 drivers/md/dm-mpath.c 	struct block_device *bdev;
bdev              514 drivers/md/dm-mpath.c 	bdev = pgpath->path.dev->bdev;
bdev              515 drivers/md/dm-mpath.c 	q = bdev_get_queue(bdev);
bdev              536 drivers/md/dm-mpath.c 	clone->rq_disk = bdev->bd_disk;
bdev              622 drivers/md/dm-mpath.c 	bio_set_dev(bio, pgpath->path.dev->bdev);
bdev              780 drivers/md/dm-mpath.c static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
bdev              783 drivers/md/dm-mpath.c 	struct request_queue *q = bdev_get_queue(bdev);
bdev              816 drivers/md/dm-mpath.c 			       bdevname(bdev, b));
bdev              862 drivers/md/dm-mpath.c 	q = bdev_get_queue(p->path.dev->bdev);
bdev              866 drivers/md/dm-mpath.c 		r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
bdev             1505 drivers/md/dm-mpath.c 	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
bdev             1852 drivers/md/dm-mpath.c 				   struct block_device **bdev)
bdev             1864 drivers/md/dm-mpath.c 			*bdev = current_pgpath->path.dev->bdev;
bdev             1892 drivers/md/dm-mpath.c 	if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
bdev             1919 drivers/md/dm-mpath.c 	struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
bdev              890 drivers/md/dm-raid.c 			rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev;
bdev              892 drivers/md/dm-raid.c 		rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
bdev             1260 drivers/md/dm-raid.c 			jdev->bdev = rs->journal_dev.dev->bdev;
bdev             1261 drivers/md/dm-raid.c 			jdev->sectors = to_sector(i_size_read(jdev->bdev->bd_inode));
bdev             1592 drivers/md/dm-raid.c 		    rdev->bdev && rdev->sectors)
bdev             1606 drivers/md/dm-raid.c 		if (!test_bit(Journal, &rdev->flags) && rdev->bdev) {
bdev             1607 drivers/md/dm-raid.c 			ds = min(ds, to_sector(i_size_read(rdev->bdev->bd_inode)));
bdev             2684 drivers/md/dm-raid.c 	    to_sector(i_size_read(rdev->bdev->bd_inode)) - rs->md.dev_sectors < MIN_FREE_RESHAPE_SPACE) {
bdev             2973 drivers/md/dm-raid.c 		if (!rs->dev[i].rdev.bdev)
bdev             2976 drivers/md/dm-raid.c 		q = bdev_get_queue(rs->dev[i].rdev.bdev);
bdev             3393 drivers/md/dm-raid.c 	if (!rdev->bdev)
bdev              271 drivers/md/dm-raid1.c 		io[i].bdev = m->dev->bdev;
bdev              340 drivers/md/dm-raid1.c 	from.bdev = m->dev->bdev;
bdev              359 drivers/md/dm-raid1.c 		dest->bdev = m->dev->bdev;
bdev              460 drivers/md/dm-raid1.c 	bio_set_dev(bio, m->dev->bdev);
bdev              467 drivers/md/dm-raid1.c 	io->bdev = m->dev->bdev;
bdev              233 drivers/md/dm-snap-persistent.c 		.bdev = dm_snap_cow(ps->store->snap)->bdev,
bdev              325 drivers/md/dm-snap-persistent.c 					    bdev) >> 9);
bdev              502 drivers/md/dm-snap-persistent.c 	client = dm_bufio_client_create(dm_snap_cow(ps->store->snap)->bdev,
bdev              583 drivers/md/dm-snap-persistent.c 	*total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev);
bdev              680 drivers/md/dm-snap-persistent.c 	sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
bdev               43 drivers/md/dm-snap-transient.c 	sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
bdev               69 drivers/md/dm-snap-transient.c 	*total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev);
bdev              313 drivers/md/dm-snap.c 	struct block_device *bdev;
bdev              380 drivers/md/dm-snap.c static unsigned origin_hash(struct block_device *bdev)
bdev              382 drivers/md/dm-snap.c 	return bdev->bd_dev & ORIGIN_MASK;
bdev              392 drivers/md/dm-snap.c 		if (bdev_equal(o->bdev, origin))
bdev              400 drivers/md/dm-snap.c 	struct list_head *sl = &_origins[origin_hash(o->bdev)];
bdev              411 drivers/md/dm-snap.c 		if (bdev_equal(o->dev->bdev, origin))
bdev              419 drivers/md/dm-snap.c 	struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)];
bdev              453 drivers/md/dm-snap.c 	o = __lookup_origin(snap->origin->bdev);
bdev              460 drivers/md/dm-snap.c 		if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
bdev              551 drivers/md/dm-snap.c 	struct block_device *bdev = snap->origin->bdev;
bdev              566 drivers/md/dm-snap.c 	o = __lookup_origin(bdev);
bdev              575 drivers/md/dm-snap.c 		o->bdev = bdev;
bdev              593 drivers/md/dm-snap.c 	struct block_device *bdev = s->origin->bdev;
bdev              598 drivers/md/dm-snap.c 	__insert_snapshot(__lookup_origin(bdev), s);
bdev              608 drivers/md/dm-snap.c 	o = __lookup_origin(s->origin->bdev);
bdev              885 drivers/md/dm-snap.c 	cow_dev_size = get_dev_size(s->cow->bdev);
bdev             1080 drivers/md/dm-snap.c 	dest.bdev = s->origin->bdev;
bdev             1082 drivers/md/dm-snap.c 	dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector);
bdev             1084 drivers/md/dm-snap.c 	src.bdev = s->cow->bdev;
bdev             1275 drivers/md/dm-snap.c 	origin_dev = s->origin->bdev->bd_dev;
bdev             1791 drivers/md/dm-snap.c 	struct block_device *bdev = s->origin->bdev;
bdev             1794 drivers/md/dm-snap.c 	dev_size = get_dev_size(bdev);
bdev             1796 drivers/md/dm-snap.c 	src.bdev = bdev;
bdev             1800 drivers/md/dm-snap.c 	dest.bdev = s->cow->bdev;
bdev             1903 drivers/md/dm-snap.c 	bio_set_dev(bio, s->cow->bdev);
bdev             1925 drivers/md/dm-snap.c 	dest.bdev = s->cow->bdev;
bdev             1953 drivers/md/dm-snap.c 		bio_set_dev(bio, s->cow->bdev);
bdev             1987 drivers/md/dm-snap.c 			bio_set_dev(bio, s->origin->bdev);
bdev             2086 drivers/md/dm-snap.c 		bio_set_dev(bio, s->origin->bdev);
bdev             2120 drivers/md/dm-snap.c 			bio_set_dev(bio, s->origin->bdev);
bdev             2122 drivers/md/dm-snap.c 			bio_set_dev(bio, s->cow->bdev);
bdev             2148 drivers/md/dm-snap.c 			bio_set_dev(bio, s->origin->bdev);
bdev             2162 drivers/md/dm-snap.c 	bio_set_dev(bio, s->origin->bdev);
bdev             2229 drivers/md/dm-snap.c 	o = __lookup_dm_origin(s->origin->bdev);
bdev             2282 drivers/md/dm-snap.c static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
bdev             2287 drivers/md/dm-snap.c 	min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
bdev             2305 drivers/md/dm-snap.c 	ti->max_io_len = get_origin_minimum_chunksize(s->origin->bdev);
bdev             2379 drivers/md/dm-snap.c 		r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data);
bdev             2548 drivers/md/dm-snap.c 	o = __lookup_origin(origin->bdev);
bdev             2589 drivers/md/dm-snap.c 	o = __lookup_origin(merging_snap->origin->bdev);
bdev             2656 drivers/md/dm-snap.c 	bio_set_dev(bio, o->dev->bdev);
bdev             2682 drivers/md/dm-snap.c 	o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev);
bdev              274 drivers/md/dm-stripe.c 		bio_set_dev(bio, sc->stripe[target_stripe].dev->bdev);
bdev              295 drivers/md/dm-stripe.c 		bio_set_dev(bio, sc->stripe[target_bio_nr].dev->bdev);
bdev              311 drivers/md/dm-stripe.c 	bio_set_dev(bio, sc->stripe[stripe].dev->bdev);
bdev              323 drivers/md/dm-stripe.c 	struct block_device *bdev;
bdev              330 drivers/md/dm-stripe.c 	bdev = sc->stripe[stripe].dev->bdev;
bdev              332 drivers/md/dm-stripe.c 	ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages * PAGE_SIZE, &pgoff);
bdev              344 drivers/md/dm-stripe.c 	struct block_device *bdev;
bdev              350 drivers/md/dm-stripe.c 	bdev = sc->stripe[stripe].dev->bdev;
bdev              352 drivers/md/dm-stripe.c 	if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
bdev              363 drivers/md/dm-stripe.c 	struct block_device *bdev;
bdev              369 drivers/md/dm-stripe.c 	bdev = sc->stripe[stripe].dev->bdev;
bdev              371 drivers/md/dm-stripe.c 	if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
bdev              325 drivers/md/dm-switch.c 	bio_set_dev(bio, sctx->path_list[path_nr].dmdev->bdev);
bdev              515 drivers/md/dm-switch.c static int switch_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
bdev              522 drivers/md/dm-switch.c 	*bdev = sctx->path_list[path_nr].dmdev->bdev;
bdev              528 drivers/md/dm-switch.c 	    i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
bdev              270 drivers/md/dm-table.c 		if (dd->dm_dev->bdev->bd_dev == dev)
bdev              284 drivers/md/dm-table.c 	struct block_device *bdev = dev->bdev;
bdev              286 drivers/md/dm-table.c 		i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
bdev              296 drivers/md/dm-table.c 	q = bdev_get_queue(bdev);
bdev              300 drivers/md/dm-table.c 		       dm_device_name(ti->table->md), bdevname(bdev, b),
bdev              313 drivers/md/dm-table.c 		       dm_device_name(ti->table->md), bdevname(bdev, b),
bdev              324 drivers/md/dm-table.c 	if (bdev_zoned_model(bdev) != BLK_ZONED_NONE) {
bdev              325 drivers/md/dm-table.c 		unsigned int zone_sectors = bdev_zone_sectors(bdev);
bdev              331 drivers/md/dm-table.c 			       zone_sectors, bdevname(bdev, b));
bdev              348 drivers/md/dm-table.c 			       zone_sectors, bdevname(bdev, b));
bdev              361 drivers/md/dm-table.c 		       limits->logical_block_size, bdevname(bdev, b));
bdev              370 drivers/md/dm-table.c 		       limits->logical_block_size, bdevname(bdev, b));
bdev              391 drivers/md/dm-table.c 	r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev,
bdev              408 drivers/md/dm-table.c 	struct block_device *bdev;
bdev              410 drivers/md/dm-table.c 	bdev = lookup_bdev(path);
bdev              411 drivers/md/dm-table.c 	if (IS_ERR(bdev))
bdev              414 drivers/md/dm-table.c 		dev = bdev->bd_dev;
bdev              415 drivers/md/dm-table.c 		bdput(bdev);
bdev              471 drivers/md/dm-table.c 	struct block_device *bdev = dev->bdev;
bdev              472 drivers/md/dm-table.c 	struct request_queue *q = bdev_get_queue(bdev);
bdev              477 drivers/md/dm-table.c 		       dm_device_name(ti->table->md), bdevname(bdev, b));
bdev              481 drivers/md/dm-table.c 	if (bdev_stack_limits(limits, bdev, start) < 0)
bdev              485 drivers/md/dm-table.c 		       dm_device_name(ti->table->md), bdevname(bdev, b),
bdev              887 drivers/md/dm-table.c 	return generic_fsdax_supported(dev->dax_dev, dev->bdev, blocksize,
bdev              929 drivers/md/dm-table.c 	struct request_queue *q = bdev_get_queue(dev->bdev);
bdev             1214 drivers/md/dm-table.c 		template_disk = dd->dm_dev->bdev->bd_disk;
bdev             1422 drivers/md/dm-table.c 	struct request_queue *q = bdev_get_queue(dev->bdev);
bdev             1452 drivers/md/dm-table.c 	struct request_queue *q = bdev_get_queue(dev->bdev);
bdev             1635 drivers/md/dm-table.c 	struct request_queue *q = bdev_get_queue(dev->bdev);
bdev             1702 drivers/md/dm-table.c 	struct request_queue *q = bdev_get_queue(dev->bdev);
bdev             1710 drivers/md/dm-table.c 	struct request_queue *q = bdev_get_queue(dev->bdev);
bdev             1738 drivers/md/dm-table.c 	return (strncmp(bdevname(dev->bdev, b), "nvme", 4) == 0);
bdev             1749 drivers/md/dm-table.c 	struct request_queue *q = bdev_get_queue(dev->bdev);
bdev             1776 drivers/md/dm-table.c 	struct request_queue *q = bdev_get_queue(dev->bdev);
bdev             1803 drivers/md/dm-table.c 	struct request_queue *q = bdev_get_queue(dev->bdev);
bdev             1837 drivers/md/dm-table.c 	struct request_queue *q = bdev_get_queue(dev->bdev);
bdev             1865 drivers/md/dm-table.c 	struct request_queue *q = bdev_get_queue(dev->bdev);
bdev             2095 drivers/md/dm-table.c 		struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
bdev             2103 drivers/md/dm-table.c 				     bdevname(dd->dm_dev->bdev, b));
bdev              148 drivers/md/dm-thin-metadata.c 	struct block_device *bdev;
bdev              529 drivers/md/dm-thin-metadata.c 	sector_t bdev_size = i_size_read(pmd->bdev->bd_inode) >> SECTOR_SHIFT;
bdev              639 drivers/md/dm-thin-metadata.c 	if (get_disk_ro(pmd->bdev->bd_disk))
bdev              738 drivers/md/dm-thin-metadata.c 	pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
bdev              892 drivers/md/dm-thin-metadata.c struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
bdev              910 drivers/md/dm-thin-metadata.c 	pmd->bdev = bdev;
bdev               44 drivers/md/dm-thin-metadata.h struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
bdev              403 drivers/md/dm-thin.c 	return __blkdev_issue_discard(tc->pool_dev->bdev, s, len,
bdev              726 drivers/md/dm-thin.c 	bio_set_dev(bio, tc->pool_dev->bdev);
bdev              738 drivers/md/dm-thin.c 	bio_set_dev(bio, tc->origin_dev->bdev);
bdev             1296 drivers/md/dm-thin.c 	to.bdev = tc->pool_dev->bdev;
bdev             1356 drivers/md/dm-thin.c 		from.bdev = origin->bdev;
bdev             1360 drivers/md/dm-thin.c 		to.bdev = tc->pool_dev->bdev;
bdev             2817 drivers/md/dm-thin.c 	q = bdev_get_queue(pt->data_dev->bdev);
bdev             2841 drivers/md/dm-thin.c 	struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
bdev             2858 drivers/md/dm-thin.c 	struct block_device *data_bdev = pt->data_dev->bdev;
bdev             3230 drivers/md/dm-thin.c 	bio_set_dev(flush_bio, pt->data_dev->bdev);
bdev             3236 drivers/md/dm-thin.c static sector_t get_dev_size(struct block_device *bdev)
bdev             3238 drivers/md/dm-thin.c 	return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
bdev             3241 drivers/md/dm-thin.c static void warn_if_metadata_device_too_big(struct block_device *bdev)
bdev             3243 drivers/md/dm-thin.c 	sector_t metadata_dev_size = get_dev_size(bdev);
bdev             3248 drivers/md/dm-thin.c 		       bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
bdev             3251 drivers/md/dm-thin.c static sector_t get_metadata_dev_size(struct block_device *bdev)
bdev             3253 drivers/md/dm-thin.c 	sector_t metadata_dev_size = get_dev_size(bdev);
bdev             3261 drivers/md/dm-thin.c static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
bdev             3263 drivers/md/dm-thin.c 	sector_t metadata_dev_size = get_metadata_dev_size(bdev);
bdev             3283 drivers/md/dm-thin.c 	dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4;
bdev             3350 drivers/md/dm-thin.c 	warn_if_metadata_device_too_big(metadata_dev->bdev);
bdev             3379 drivers/md/dm-thin.c 	pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev, data_dev->bdev,
bdev             3463 drivers/md/dm-thin.c 	bio_set_dev(bio, pt->data_dev->bdev);
bdev             4040 drivers/md/dm-thin.c 		       format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
bdev             4041 drivers/md/dm-thin.c 		       format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
bdev             4249 drivers/md/dm-thin.c 	pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
bdev             4405 drivers/md/dm-thin.c 		tc->origin_size = get_dev_size(tc->origin_dev->bdev);
bdev             4454 drivers/md/dm-thin.c 			       format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
bdev             4457 drivers/md/dm-thin.c 				DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
bdev              138 drivers/md/dm-unstripe.c 	bio_set_dev(bio, uc->dev->bdev);
bdev              746 drivers/md/dm-verity-fec.c 	f->bufio = dm_bufio_client_create(f->dev->bdev,
bdev              760 drivers/md/dm-verity-fec.c 	f->data_bufio = dm_bufio_client_create(v->data_dev->bdev,
bdev              638 drivers/md/dm-verity-target.c 	bio_set_dev(bio, v->data_dev->bdev);
bdev              747 drivers/md/dm-verity-target.c static int verity_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
bdev              751 drivers/md/dm-verity-target.c 	*bdev = v->data_dev->bdev;
bdev              754 drivers/md/dm-verity-target.c 	    ti->len != i_size_read(v->data_dev->bdev->bd_inode) >> SECTOR_SHIFT)
bdev             1005 drivers/md/dm-verity-target.c 	    num < bdev_logical_block_size(v->data_dev->bdev) ||
bdev             1015 drivers/md/dm-verity-target.c 	    num < bdev_logical_block_size(v->hash_dev->bdev) ||
bdev             1162 drivers/md/dm-verity-target.c 	v->bufio = dm_bufio_client_create(v->hash_dev->bdev,
bdev              470 drivers/md/dm-writecache.c 		region.bdev = wc->ssd_dev->bdev;
bdev              519 drivers/md/dm-writecache.c 	region.bdev = dev->bdev;
bdev              886 drivers/md/dm-writecache.c 	region.bdev = wc->ssd_dev->bdev;
bdev             1128 drivers/md/dm-writecache.c 			bio_set_dev(bio, wc->dev->bdev);
bdev             1205 drivers/md/dm-writecache.c 				bio_set_dev(bio, wc->ssd_dev->bdev);
bdev             1247 drivers/md/dm-writecache.c 				bio_set_dev(bio, wc->ssd_dev->bdev);
bdev             1268 drivers/md/dm-writecache.c 	bio_set_dev(bio, wc->dev->bdev);
bdev             1519 drivers/md/dm-writecache.c 		bio_set_dev(bio, wc->dev->bdev);
bdev             1573 drivers/md/dm-writecache.c 		from.bdev = wc->ssd_dev->bdev;
bdev             1576 drivers/md/dm-writecache.c 		to.bdev = wc->dev->bdev;
bdev             2000 drivers/md/dm-writecache.c 	wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode);
bdev             2015 drivers/md/dm-writecache.c 	if (wc->block_size < bdev_logical_block_size(wc->dev->bdev) ||
bdev             2016 drivers/md/dm-writecache.c 	    wc->block_size < bdev_logical_block_size(wc->ssd_dev->bdev)) {
bdev              443 drivers/md/dm-zoned-metadata.c 	bio_set_dev(bio, zmd->dev->bdev);
bdev              597 drivers/md/dm-zoned-metadata.c 	bio_set_dev(bio, zmd->dev->bdev);
bdev              624 drivers/md/dm-zoned-metadata.c 	bio_set_dev(bio, zmd->dev->bdev);
bdev              664 drivers/md/dm-zoned-metadata.c 		ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
bdev              706 drivers/md/dm-zoned-metadata.c 		ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
bdev              775 drivers/md/dm-zoned-metadata.c 		ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
bdev             1201 drivers/md/dm-zoned-metadata.c 		ret = blkdev_report_zones(dev->bdev, sector, blkz, &nr_blkz);
bdev             1250 drivers/md/dm-zoned-metadata.c 	ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone),
bdev             1327 drivers/md/dm-zoned-metadata.c 		ret = blkdev_reset_zones(dev->bdev,
bdev             2475 drivers/md/dm-zoned-metadata.c 		     bdev_zoned_model(dev->bdev) == BLK_ZONED_HA ?
bdev               77 drivers/md/dm-zoned-reclaim.c 	ret = blkdev_issue_zeroout(zrc->dev->bdev,
bdev              159 drivers/md/dm-zoned-reclaim.c 		src.bdev = dev->bdev;
bdev              163 drivers/md/dm-zoned-reclaim.c 		dst.bdev = dev->bdev;
bdev              128 drivers/md/dm-zoned-target.c 	bio_set_dev(clone, dmz->dev->bdev);
bdev              582 drivers/md/dm-zoned-target.c 	if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
bdev              605 drivers/md/dm-zoned-target.c 	disk = dmz_dev->bdev->bd_disk;
bdev              637 drivers/md/dm-zoned-target.c 	bio_set_dev(bio, dev->bdev);
bdev              704 drivers/md/dm-zoned-target.c 	dev->bdev = dmz->ddev->bdev;
bdev              705 drivers/md/dm-zoned-target.c 	(void)bdevname(dev->bdev, dev->name);
bdev              707 drivers/md/dm-zoned-target.c 	if (bdev_zoned_model(dev->bdev) == BLK_ZONED_NONE) {
bdev              713 drivers/md/dm-zoned-target.c 	q = bdev_get_queue(dev->bdev);
bdev              714 drivers/md/dm-zoned-target.c 	dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
bdev              730 drivers/md/dm-zoned-target.c 	dev->nr_zones = blkdev_nr_zones(dev->bdev);
bdev              923 drivers/md/dm-zoned-target.c static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
bdev              930 drivers/md/dm-zoned-target.c 	*bdev = dmz->dev->bdev;
bdev               52 drivers/md/dm-zoned.h 	struct block_device	*bdev;
bdev              329 drivers/md/dm.c static int dm_blk_open(struct block_device *bdev, fmode_t mode)
bdev              335 drivers/md/dm.c 	md = bdev->bd_disk->private_data;
bdev              436 drivers/md/dm.c static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
bdev              438 drivers/md/dm.c 	struct mapped_device *md = bdev->bd_disk->private_data;
bdev              493 drivers/md/dm.c 			    struct block_device **bdev)
bdev              517 drivers/md/dm.c 	r = tgt->type->prepare_ioctl(tgt, bdev);
bdev              533 drivers/md/dm.c static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
bdev              536 drivers/md/dm.c 	struct mapped_device *md = bdev->bd_disk->private_data;
bdev              539 drivers/md/dm.c 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
bdev              557 drivers/md/dm.c 	r =  __blkdev_driver_ioctl(bdev, mode, cmd, arg);
bdev              745 drivers/md/dm.c 	struct block_device *bdev;
bdev              749 drivers/md/dm.c 	BUG_ON(td->dm_dev.bdev);
bdev              751 drivers/md/dm.c 	bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr);
bdev              752 drivers/md/dm.c 	if (IS_ERR(bdev))
bdev              753 drivers/md/dm.c 		return PTR_ERR(bdev);
bdev              755 drivers/md/dm.c 	r = bd_link_disk_holder(bdev, dm_disk(md));
bdev              757 drivers/md/dm.c 		blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL);
bdev              761 drivers/md/dm.c 	td->dm_dev.bdev = bdev;
bdev              762 drivers/md/dm.c 	td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
bdev              771 drivers/md/dm.c 	if (!td->dm_dev.bdev)
bdev              774 drivers/md/dm.c 	bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md));
bdev              775 drivers/md/dm.c 	blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL);
bdev              777 drivers/md/dm.c 	td->dm_dev.bdev = NULL;
bdev              787 drivers/md/dm.c 		if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
bdev              809 drivers/md/dm.c 		td->dm_dev.bdev = NULL;
bdev             1108 drivers/md/dm.c static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
bdev             1434 drivers/md/dm.c 	bio_set_dev(ci->bio, ci->io->md->bdev);
bdev             1906 drivers/md/dm.c 	if (md->bdev) {
bdev             1907 drivers/md/dm.c 		bdput(md->bdev);
bdev             1908 drivers/md/dm.c 		md->bdev = NULL;
bdev             2004 drivers/md/dm.c 	md->bdev = bdget_disk(md->disk, 0);
bdev             2005 drivers/md/dm.c 	if (!md->bdev)
bdev             2119 drivers/md/dm.c 	i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
bdev             2547 drivers/md/dm.c 	md->frozen_sb = freeze_bdev(md->bdev);
bdev             2564 drivers/md/dm.c 	thaw_bdev(md->bdev, md->frozen_sb);
bdev             3054 drivers/md/dm.c static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
bdev             3057 drivers/md/dm.c 	struct mapped_device *md = bdev->bd_disk->private_data;
bdev             3088 drivers/md/dm.c 	const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
bdev             3092 drivers/md/dm.c 	return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags);
bdev             3095 drivers/md/dm.c static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
bdev             3106 drivers/md/dm.c 	ret = dm_call_pr(bdev, __dm_pr_register, &pr);
bdev             3113 drivers/md/dm.c 		dm_call_pr(bdev, __dm_pr_register, &pr);
bdev             3119 drivers/md/dm.c static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
bdev             3122 drivers/md/dm.c 	struct mapped_device *md = bdev->bd_disk->private_data;
bdev             3126 drivers/md/dm.c 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
bdev             3130 drivers/md/dm.c 	ops = bdev->bd_disk->fops->pr_ops;
bdev             3132 drivers/md/dm.c 		r = ops->pr_reserve(bdev, key, type, flags);
bdev             3140 drivers/md/dm.c static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
bdev             3142 drivers/md/dm.c 	struct mapped_device *md = bdev->bd_disk->private_data;
bdev             3146 drivers/md/dm.c 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
bdev             3150 drivers/md/dm.c 	ops = bdev->bd_disk->fops->pr_ops;
bdev             3152 drivers/md/dm.c 		r = ops->pr_release(bdev, key, type);
bdev             3160 drivers/md/dm.c static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
bdev             3163 drivers/md/dm.c 	struct mapped_device *md = bdev->bd_disk->private_data;
bdev             3167 drivers/md/dm.c 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
bdev             3171 drivers/md/dm.c 	ops = bdev->bd_disk->fops->pr_ops;
bdev             3173 drivers/md/dm.c 		r = ops->pr_preempt(bdev, old_key, new_key, type, abort);
bdev             3181 drivers/md/dm.c static int dm_pr_clear(struct block_device *bdev, u64 key)
bdev             3183 drivers/md/dm.c 	struct mapped_device *md = bdev->bd_disk->private_data;
bdev             3187 drivers/md/dm.c 	r = dm_prepare_ioctl(md, &srcu_idx, &bdev);
bdev             3191 drivers/md/dm.c 	ops = bdev->bd_disk->fops->pr_ops;
bdev             3193 drivers/md/dm.c 		r = ops->pr_clear(bdev, key);
bdev              167 drivers/md/md-bitmap.c 				 roundup(size, bdev_logical_block_size(rdev->bdev)),
bdev              215 drivers/md/md-bitmap.c 	struct block_device *bdev;
bdev              225 drivers/md/md-bitmap.c 		bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;
bdev              232 drivers/md/md-bitmap.c 				       bdev_logical_block_size(bdev));
bdev              210 drivers/md/md-faulty.c 		bio_set_dev(b, conf->rdev->bdev);
bdev              215 drivers/md/md-faulty.c 		bio_set_dev(bio, conf->rdev->bdev);
bdev              315 drivers/md/md-faulty.c 		disk_stack_limits(mddev->gendisk, rdev->bdev,
bdev               64 drivers/md/md-linear.c 		struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
bdev              117 drivers/md/md-linear.c 		disk_stack_limits(mddev->gendisk, rdev->bdev,
bdev              123 drivers/md/md-linear.c 		if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
bdev              274 drivers/md/md-linear.c 	bio_set_dev(bio, tmp_dev->rdev->bdev);
bdev              297 drivers/md/md-linear.c 	       bdevname(tmp_dev->rdev->bdev, b),
bdev               93 drivers/md/md-multipath.c 			bdevname(rdev->bdev,b),
bdev              128 drivers/md/md-multipath.c 	bio_set_dev(&mp_bh->bio, multipath->rdev->bdev);
bdev              163 drivers/md/md-multipath.c 			struct request_queue *q = bdev_get_queue(rdev->bdev);
bdev              207 drivers/md/md-multipath.c 	       bdevname(rdev->bdev, b),
bdev              230 drivers/md/md-multipath.c 				 bdevname(tmp->rdev->bdev,b));
bdev              250 drivers/md/md-multipath.c 			disk_stack_limits(mddev->gendisk, rdev->bdev,
bdev              347 drivers/md/md-multipath.c 			bio_set_dev(bio, conf->multipaths[mp_bh->path].rdev->bdev);
bdev              408 drivers/md/md-multipath.c 		disk_stack_limits(mddev->gendisk, rdev->bdev,
bdev              130 drivers/md/md.c 	if (rdev->bdev->bd_queue->nr_hw_queues == 1)
bdev              517 drivers/md/md.c 			bio_set_dev(bi, rdev->bdev);
bdev              785 drivers/md/md.c 		if (rdev->bdev->bd_dev == dev)
bdev              796 drivers/md/md.c 		if (rdev->bdev->bd_dev == dev)
bdev              818 drivers/md/md.c 	sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
bdev              891 drivers/md/md.c 	bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev);
bdev              925 drivers/md/md.c 		bio_set_dev(bio, rdev->bdev);
bdev              959 drivers/md/md.c 	       bdevname(rdev->bdev,b));
bdev             1124 drivers/md/md.c 	bdevname(rdev->bdev, b);
bdev             1177 drivers/md/md.c 				b, bdevname(refdev->bdev,b2));
bdev             1182 drivers/md/md.c 				b, bdevname(refdev->bdev, b2));
bdev             1437 drivers/md/md.c 		d->major = MAJOR(rdev2->bdev->bd_dev);
bdev             1438 drivers/md/md.c 		d->minor = MINOR(rdev2->bdev->bd_dev);
bdev             1560 drivers/md/md.c 		sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
bdev             1592 drivers/md/md.c 			bdevname(rdev->bdev,b));
bdev             1597 drivers/md/md.c 			bdevname(rdev->bdev,b));
bdev             1615 drivers/md/md.c 	bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
bdev             1704 drivers/md/md.c 				bdevname(rdev->bdev,b),
bdev             1705 drivers/md/md.c 				bdevname(refdev->bdev,b2));
bdev             1717 drivers/md/md.c 		sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
bdev             2029 drivers/md/md.c 		bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
bdev             2079 drivers/md/md.c 		max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
bdev             2089 drivers/md/md.c 		sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
bdev             2189 drivers/md/md.c 			if (rdev->bdev->bd_contains ==
bdev             2190 drivers/md/md.c 			    rdev2->bdev->bd_contains) {
bdev             2229 drivers/md/md.c 		if (blk_integrity_compare(reference->bdev->bd_disk,
bdev             2230 drivers/md/md.c 				rdev->bdev->bd_disk) < 0)
bdev             2233 drivers/md/md.c 	if (!reference || !bdev_get_integrity(reference->bdev))
bdev             2240 drivers/md/md.c 			       bdev_get_integrity(reference->bdev));
bdev             2269 drivers/md/md.c 	if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
bdev             2271 drivers/md/md.c 		       mdname(mddev), bdevname(rdev->bdev, name));
bdev             2286 drivers/md/md.c 	if (find_rdev(mddev, rdev->bdev->bd_dev))
bdev             2289 drivers/md/md.c 	if ((bdev_read_only(rdev->bdev) || bdev_read_only(rdev->meta_bdev)) &&
bdev             2333 drivers/md/md.c 	bdevname(rdev->bdev,b);
bdev             2345 drivers/md/md.c 	ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
bdev             2351 drivers/md/md.c 	bd_link_disk_holder(rdev->bdev, mddev->gendisk);
bdev             2375 drivers/md/md.c 	bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
bdev             2377 drivers/md/md.c 	pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
bdev             2402 drivers/md/md.c 	struct block_device *bdev;
bdev             2405 drivers/md/md.c 	bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
bdev             2407 drivers/md/md.c 	if (IS_ERR(bdev)) {
bdev             2409 drivers/md/md.c 		return PTR_ERR(bdev);
bdev             2411 drivers/md/md.c 	rdev->bdev = bdev;
bdev             2417 drivers/md/md.c 	struct block_device *bdev = rdev->bdev;
bdev             2418 drivers/md/md.c 	rdev->bdev = NULL;
bdev             2419 drivers/md/md.c 	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
bdev             2428 drivers/md/md.c 	pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b));
bdev             2432 drivers/md/md.c 		md_autodetect_dev(rdev->bdev->bd_dev);
bdev             2694 drivers/md/md.c 				 bdevname(rdev->bdev, b),
bdev             2707 drivers/md/md.c 				 bdevname(rdev->bdev, b));
bdev             3267 drivers/md/md.c 			sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
bdev             3293 drivers/md/md.c 				if (rdev->bdev == rdev2->bdev &&
bdev             3594 drivers/md/md.c 	size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
bdev             3597 drivers/md/md.c 			bdevname(rdev->bdev,b));
bdev             3607 drivers/md/md.c 				bdevname(rdev->bdev,b),
bdev             3613 drivers/md/md.c 				bdevname(rdev->bdev,b));
bdev             3621 drivers/md/md.c 	if (rdev->bdev)
bdev             3649 drivers/md/md.c 				bdevname(rdev->bdev,b));
bdev             3668 drivers/md/md.c 				mdname(mddev), bdevname(rdev->bdev, b),
bdev             3677 drivers/md/md.c 					bdevname(rdev->bdev,b));
bdev             4276 drivers/md/md.c static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
bdev             4277 drivers/md/md.c static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
bdev             5632 drivers/md/md.c 		sync_blockdev(rdev->bdev);
bdev             5633 drivers/md/md.c 		invalidate_bdev(rdev->bdev);
bdev             5635 drivers/md/md.c 		    (bdev_read_only(rdev->bdev) ||
bdev             5720 drivers/md/md.c 				    rdev->bdev->bd_contains ==
bdev             5721 drivers/md/md.c 				    rdev2->bdev->bd_contains) {
bdev             5724 drivers/md/md.c 						bdevname(rdev->bdev,b),
bdev             5725 drivers/md/md.c 						bdevname(rdev2->bdev,b2));
bdev             5795 drivers/md/md.c 			    !blk_queue_nonrot(bdev_get_queue(rdev->bdev))) {
bdev             5932 drivers/md/md.c 		if (bdev_read_only(rdev->bdev))
bdev             6082 drivers/md/md.c static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
bdev             6109 drivers/md/md.c 	if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
bdev             6145 drivers/md/md.c 		      struct block_device *bdev)
bdev             6170 drivers/md/md.c 	if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
bdev             6246 drivers/md/md.c 		pr_cont("<%s>", bdevname(rdev->bdev,b));
bdev             6283 drivers/md/md.c 		pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b));
bdev             6288 drivers/md/md.c 					 bdevname(rdev->bdev,b));
bdev             6306 drivers/md/md.c 				bdevname(rdev0->bdev, b), rdev0->preferred_minor);
bdev             6322 drivers/md/md.c 				mdname(mddev), bdevname(rdev0->bdev,b));
bdev             6465 drivers/md/md.c 		info.major = MAJOR(rdev->bdev->bd_dev);
bdev             6466 drivers/md/md.c 		info.minor = MINOR(rdev->bdev->bd_dev);
bdev             6527 drivers/md/md.c 					bdevname(rdev->bdev,b),
bdev             6528 drivers/md/md.c 					bdevname(rdev0->bdev,b2));
bdev             6686 drivers/md/md.c 			rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
bdev             6737 drivers/md/md.c 		 bdevname(rdev->bdev,b), mdname(mddev));
bdev             6771 drivers/md/md.c 		rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
bdev             6777 drivers/md/md.c 			bdevname(rdev->bdev,b), mdname(mddev));
bdev             7240 drivers/md/md.c static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
bdev             7242 drivers/md/md.c 	struct mddev *mddev = bdev->bd_disk->private_data;
bdev             7276 drivers/md/md.c static int md_ioctl(struct block_device *bdev, fmode_t mode,
bdev             7320 drivers/md/md.c 	mddev = bdev->bd_disk->private_data;
bdev             7377 drivers/md/md.c 		sync_blockdev(bdev);
bdev             7442 drivers/md/md.c 		err = do_md_stop(mddev, 0, bdev);
bdev             7446 drivers/md/md.c 		err = md_set_readonly(mddev, bdev);
bdev             7573 drivers/md/md.c static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
bdev             7588 drivers/md/md.c 	return md_ioctl(bdev, mode, cmd, arg);
bdev             7592 drivers/md/md.c static int md_open(struct block_device *bdev, fmode_t mode)
bdev             7598 drivers/md/md.c 	struct mddev *mddev = mddev_find(bdev->bd_dev);
bdev             7604 drivers/md/md.c 	if (mddev->gendisk != bdev->bd_disk) {
bdev             7614 drivers/md/md.c 	BUG_ON(mddev != bdev->bd_disk->private_data);
bdev             7629 drivers/md/md.c 	check_disk_change(bdev);
bdev             7803 drivers/md/md.c 			      bdevname(rdev->bdev,b));
bdev             8041 drivers/md/md.c 				bdevname(rdev->bdev,b), rdev->desc_nr);
bdev             8226 drivers/md/md.c 		struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
bdev             9385 drivers/md/md.c 				pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b));
bdev             9403 drivers/md/md.c 					bdevname(rdev2->bdev,b));
bdev               51 drivers/md/md.h 	struct block_device *bdev;	/* block device handle */
bdev              537 drivers/md/md.h static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
bdev              539 drivers/md/md.h 	atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
bdev              747 drivers/md/md.h 	int flags = rdev->bdev->bd_disk->flags;
bdev              378 drivers/md/persistent-data/dm-block-manager.c struct dm_block_manager *dm_block_manager_create(struct block_device *bdev,
bdev              391 drivers/md/persistent-data/dm-block-manager.c 	bm->bufio = dm_bufio_client_create(bdev, block_size, max_held_per_thread,
bdev               35 drivers/md/persistent-data/dm-block-manager.h 	struct block_device *bdev, unsigned block_size,
bdev               40 drivers/md/raid0.c 		struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
bdev               68 drivers/md/raid0.c 							       + k]->bdev, b));
bdev               98 drivers/md/raid0.c 			 bdevname(rdev1->bdev, b));
bdev              107 drivers/md/raid0.c 				      rdev1->bdev->bd_disk->queue));
bdev              113 drivers/md/raid0.c 				 bdevname(rdev1->bdev,b),
bdev              115 drivers/md/raid0.c 				 bdevname(rdev2->bdev,b2),
bdev              260 drivers/md/raid0.c 					 bdevname(rdev->bdev, b));
bdev              266 drivers/md/raid0.c 				 bdevname(rdev->bdev, b), c);
bdev              410 drivers/md/raid0.c 			disk_stack_limits(mddev->gendisk, rdev->bdev,
bdev              412 drivers/md/raid0.c 			if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
bdev              551 drivers/md/raid0.c 		if (__blkdev_issue_discard(rdev->bdev,
bdev              559 drivers/md/raid0.c 			trace_block_bio_remap(bdev_get_queue(rdev->bdev),
bdev              627 drivers/md/raid0.c 	bio_set_dev(bio, tmp_dev->bdev);
bdev              384 drivers/md/raid1.c 				   bdevname(rdev->bdev, b),
bdev              688 drivers/md/raid1.c 		nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
bdev              699 drivers/md/raid1.c 			int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
bdev              785 drivers/md/raid1.c 			struct request_queue *q = bdev_get_queue(rdev->bdev);
bdev              812 drivers/md/raid1.c 		bio_set_dev(bio, rdev->bdev);
bdev             1239 drivers/md/raid1.c 			bdevname(rdev->bdev, b);
bdev             1280 drivers/md/raid1.c 				    bdevname(mirror->rdev->bdev, b));
bdev             1311 drivers/md/raid1.c 	bio_set_dev(read_bio, mirror->rdev->bdev);
bdev             1525 drivers/md/raid1.c 		bio_set_dev(mbio, conf->mirrors[i].rdev->bdev);
bdev             1649 drivers/md/raid1.c 		mdname(mddev), bdevname(rdev->bdev, b),
bdev             1673 drivers/md/raid1.c 				 bdevname(rdev->bdev,b));
bdev             1774 drivers/md/raid1.c 				disk_stack_limits(mddev->gendisk, rdev->bdev,
bdev             1800 drivers/md/raid1.c 	if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
bdev             2133 drivers/md/raid1.c 		bio_set_dev(b, conf->mirrors[i].rdev->bdev);
bdev             2228 drivers/md/raid1.c 		md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
bdev             2329 drivers/md/raid1.c 						bdevname(rdev->bdev, b));
bdev             2367 drivers/md/raid1.c 				bdev_logical_block_size(rdev->bdev) >> 9);
bdev             2394 drivers/md/raid1.c 		bio_set_dev(wbio, rdev->bdev);
bdev             2787 drivers/md/raid1.c 			bio_set_dev(bio, rdev->bdev);
bdev             3119 drivers/md/raid1.c 		disk_stack_limits(mddev->gendisk, rdev->bdev,
bdev             3121 drivers/md/raid1.c 		if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
bdev              395 drivers/md/raid10.c 				   bdevname(rdev->bdev, b),
bdev              798 drivers/md/raid10.c 		nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
bdev              867 drivers/md/raid10.c 			struct request_queue *q = bdev_get_queue(rdev->bdev);
bdev              912 drivers/md/raid10.c 			bio_set_dev(bio, rdev->bdev);
bdev             1097 drivers/md/raid10.c 		bio_set_dev(bio, rdev->bdev);
bdev             1167 drivers/md/raid10.c 			bdevname(err_rdev->bdev, b);
bdev             1190 drivers/md/raid10.c 				   bdevname(rdev->bdev, b),
bdev             1212 drivers/md/raid10.c 	bio_set_dev(read_bio, rdev->bdev);
bdev             1261 drivers/md/raid10.c 	bio_set_dev(mbio, rdev->bdev);
bdev             1668 drivers/md/raid10.c 		mdname(mddev), bdevname(rdev->bdev, b),
bdev             1694 drivers/md/raid10.c 				 bdevname(rdev->bdev,b));
bdev             1795 drivers/md/raid10.c 				disk_stack_limits(mddev->gendisk, rdev->bdev,
bdev             1803 drivers/md/raid10.c 			disk_stack_limits(mddev->gendisk, rdev->bdev,
bdev             1815 drivers/md/raid10.c 	if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
bdev             2096 drivers/md/raid10.c 		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
bdev             2101 drivers/md/raid10.c 		bio_set_dev(tbio, conf->mirrors[d].rdev->bdev);
bdev             2119 drivers/md/raid10.c 		md_sync_acct(conf->mirrors[d].replacement->bdev,
bdev             2251 drivers/md/raid10.c 		md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
bdev             2256 drivers/md/raid10.c 		md_sync_acct(conf->mirrors[d].replacement->bdev,
bdev             2352 drivers/md/raid10.c 		bdevname(rdev->bdev, b);
bdev             2454 drivers/md/raid10.c 					  bdevname(rdev->bdev, b));
bdev             2457 drivers/md/raid10.c 					  bdevname(rdev->bdev, b));
bdev             2490 drivers/md/raid10.c 				       bdevname(rdev->bdev, b));
bdev             2493 drivers/md/raid10.c 				       bdevname(rdev->bdev, b));
bdev             2501 drivers/md/raid10.c 				       bdevname(rdev->bdev, b));
bdev             2542 drivers/md/raid10.c 				bdev_logical_block_size(rdev->bdev) >> 9);
bdev             2559 drivers/md/raid10.c 		bio_set_dev(wbio, rdev->bdev);
bdev             3175 drivers/md/raid10.c 				bio_set_dev(bio, rdev->bdev);
bdev             3197 drivers/md/raid10.c 					bio_set_dev(bio, mrdev->bdev);
bdev             3218 drivers/md/raid10.c 				bio_set_dev(bio, mreplace->bdev);
bdev             3373 drivers/md/raid10.c 			bio_set_dev(bio, rdev->bdev);
bdev             3395 drivers/md/raid10.c 			bio_set_dev(bio, rdev->bdev);
bdev             3805 drivers/md/raid10.c 			disk_stack_limits(mddev->gendisk, rdev->bdev,
bdev             3810 drivers/md/raid10.c 		if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
bdev             4562 drivers/md/raid10.c 	bio_set_dev(read_bio, rdev->bdev);
bdev             4622 drivers/md/raid10.c 		bio_set_dev(b, rdev2->bdev);
bdev              743 drivers/md/raid5-cache.c 	bio_set_dev(bio, log->rdev->bdev);
bdev             1308 drivers/md/raid5-cache.c 	bio_set_dev(&log->flush_bio, log->rdev->bdev);
bdev             1318 drivers/md/raid5-cache.c 	struct block_device *bdev = log->rdev->bdev;
bdev             1323 drivers/md/raid5-cache.c 	if (!blk_queue_discard(bdev_get_queue(bdev)))
bdev             1347 drivers/md/raid5-cache.c 		blkdev_issue_discard(bdev,
bdev             1351 drivers/md/raid5-cache.c 		blkdev_issue_discard(bdev,
bdev             1355 drivers/md/raid5-cache.c 		blkdev_issue_discard(bdev, log->rdev->data_offset, end,
bdev             1684 drivers/md/raid5-cache.c 	bio_set_dev(ctx->ra_bio, log->rdev->bdev);
bdev             3066 drivers/md/raid5-cache.c 	struct request_queue *q = bdev_get_queue(rdev->bdev);
bdev             3072 drivers/md/raid5-cache.c 		 mdname(conf->mddev), bdevname(rdev->bdev, b));
bdev              469 drivers/md/raid5-ppl.c 	bio_set_dev(bio, log->rdev->bdev);
bdev              628 drivers/md/raid5-ppl.c 		struct block_device *bdev = NULL;
bdev              633 drivers/md/raid5-ppl.c 			bdev = rdev->bdev;
bdev              636 drivers/md/raid5-ppl.c 		if (bdev) {
bdev              641 drivers/md/raid5-ppl.c 			bio_set_dev(bio, bdev);
bdev              905 drivers/md/raid5-ppl.c 				 __func__, indent, "", bdevname(rdev->bdev, b),
bdev              948 drivers/md/raid5-ppl.c 		BUG_ON(parity_rdev->bdev->bd_dev != log->rdev->bdev->bd_dev);
bdev              952 drivers/md/raid5-ppl.c 			 bdevname(parity_rdev->bdev, b));
bdev             1040 drivers/md/raid5-ppl.c 	ret = blkdev_issue_flush(rdev->bdev, GFP_KERNEL, NULL);
bdev             1062 drivers/md/raid5-ppl.c 	blkdev_issue_zeroout(rdev->bdev, rdev->ppl.sector,
bdev             1281 drivers/md/raid5-ppl.c 			mdname(rdev->mddev), bdevname(rdev->bdev, b));
bdev             1292 drivers/md/raid5-ppl.c 			mdname(rdev->mddev), bdevname(rdev->bdev, b));
bdev             1300 drivers/md/raid5-ppl.c 			mdname(rdev->mddev), bdevname(rdev->bdev, b));
bdev             1326 drivers/md/raid5-ppl.c 	q = bdev_get_queue(rdev->bdev);
bdev             1477 drivers/md/raid5-ppl.c 		 bdevname(rdev->bdev, b));
bdev             1092 drivers/md/raid5.c 				md_sync_acct(rdev->bdev, STRIPE_SECTORS);
bdev             1096 drivers/md/raid5.c 			bio_set_dev(bi, rdev->bdev);
bdev             1159 drivers/md/raid5.c 				md_sync_acct(rrdev->bdev, STRIPE_SECTORS);
bdev             1163 drivers/md/raid5.c 			bio_set_dev(rbi, rrdev->bdev);
bdev             2507 drivers/md/raid5.c 				bdevname(rdev->bdev, b));
bdev             2524 drivers/md/raid5.c 		const char *bdn = bdevname(rdev->bdev, b);
bdev             2703 drivers/md/raid5.c 		bdevname(rdev->bdev, b),
bdev             5261 drivers/md/raid5.c 		bio_set_dev(align_bi, rdev->bdev);
bdev             6958 drivers/md/raid5.c 		if (blk_queue_nonrot(bdev_get_queue(rdev->bdev))) {
bdev             7043 drivers/md/raid5.c 				mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
bdev             7457 drivers/md/raid5.c 			disk_stack_limits(mddev->gendisk, rdev->bdev,
bdev             7459 drivers/md/raid5.c 			disk_stack_limits(mddev->gendisk, rdev->bdev,
bdev             7548 drivers/md/raid5.c 			       bdevname(tmp->rdev->bdev, b));
bdev               99 drivers/media/pci/bt8xx/dvb-bt8xx.c static int is_pci_slot_eq(struct pci_dev* adev, struct pci_dev* bdev)
bdev              101 drivers/media/pci/bt8xx/dvb-bt8xx.c 	if ((adev->subsystem_vendor == bdev->subsystem_vendor) &&
bdev              102 drivers/media/pci/bt8xx/dvb-bt8xx.c 		(adev->subsystem_device == bdev->subsystem_device) &&
bdev              103 drivers/media/pci/bt8xx/dvb-bt8xx.c 		(adev->bus->number == bdev->bus->number) &&
bdev              104 drivers/media/pci/bt8xx/dvb-bt8xx.c 		(PCI_SLOT(adev->devfn) == PCI_SLOT(bdev->devfn)))
bdev             1937 drivers/memstick/core/ms_block.c static int msb_bd_open(struct block_device *bdev, fmode_t mode)
bdev             1939 drivers/memstick/core/ms_block.c 	struct gendisk *disk = bdev->bd_disk;
bdev             1989 drivers/memstick/core/ms_block.c static int msb_bd_getgeo(struct block_device *bdev,
bdev             1992 drivers/memstick/core/ms_block.c 	struct msb_data *msb = bdev->bd_disk->private_data;
bdev              181 drivers/memstick/core/mspro_block.c static int mspro_block_bd_open(struct block_device *bdev, fmode_t mode)
bdev              183 drivers/memstick/core/mspro_block.c 	struct gendisk *disk = bdev->bd_disk;
bdev              230 drivers/memstick/core/mspro_block.c static int mspro_block_bd_getgeo(struct block_device *bdev,
bdev              233 drivers/memstick/core/mspro_block.c 	struct mspro_block_data *msb = bdev->bd_disk->private_data;
bdev             2085 drivers/message/fusion/mptscsih.c mptscsih_bios_param(struct scsi_device * sdev, struct block_device *bdev,
bdev              125 drivers/message/fusion/mptscsih.h extern int mptscsih_bios_param(struct scsi_device * sdev, struct block_device *bdev, sector_t capacity, int geom[]);
bdev              304 drivers/mmc/core/block.c static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
bdev              306 drivers/mmc/core/block.c 	struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
bdev              312 drivers/mmc/core/block.c 			check_disk_change(bdev);
bdev              335 drivers/mmc/core/block.c mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
bdev              337 drivers/mmc/core/block.c 	geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
bdev              769 drivers/mmc/core/block.c static int mmc_blk_check_blkdev(struct block_device *bdev)
bdev              776 drivers/mmc/core/block.c 	if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
bdev              781 drivers/mmc/core/block.c static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
bdev              789 drivers/mmc/core/block.c 		ret = mmc_blk_check_blkdev(bdev);
bdev              792 drivers/mmc/core/block.c 		md = mmc_blk_get(bdev->bd_disk);
bdev              801 drivers/mmc/core/block.c 		ret = mmc_blk_check_blkdev(bdev);
bdev              804 drivers/mmc/core/block.c 		md = mmc_blk_get(bdev->bd_disk);
bdev              818 drivers/mmc/core/block.c static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
bdev              821 drivers/mmc/core/block.c 	return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
bdev              223 drivers/mtd/devices/block2mtd.c 	struct block_device *bdev;
bdev              235 drivers/mtd/devices/block2mtd.c 	bdev = blkdev_get_by_path(devname, mode, dev);
bdev              242 drivers/mtd/devices/block2mtd.c 	for (i = 0; IS_ERR(bdev) && i <= timeout; i++) {
bdev              257 drivers/mtd/devices/block2mtd.c 		bdev = blkdev_get_by_dev(devt, mode, dev);
bdev              261 drivers/mtd/devices/block2mtd.c 	if (IS_ERR(bdev)) {
bdev              265 drivers/mtd/devices/block2mtd.c 	dev->blkdev = bdev;
bdev              267 drivers/mtd/devices/block2mtd.c 	if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
bdev              204 drivers/mtd/mtd_blkdevs.c static int blktrans_open(struct block_device *bdev, fmode_t mode)
bdev              206 drivers/mtd/mtd_blkdevs.c 	struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
bdev              281 drivers/mtd/mtd_blkdevs.c static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
bdev              283 drivers/mtd/mtd_blkdevs.c 	struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
bdev              301 drivers/mtd/mtd_blkdevs.c static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
bdev              304 drivers/mtd/mtd_blkdevs.c 	struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
bdev              123 drivers/mtd/mtdsuper.c 	struct block_device *bdev;
bdev              172 drivers/mtd/mtdsuper.c 	bdev = lookup_bdev(fc->source);
bdev              173 drivers/mtd/mtdsuper.c 	if (IS_ERR(bdev)) {
bdev              174 drivers/mtd/mtdsuper.c 		ret = PTR_ERR(bdev);
bdev              180 drivers/mtd/mtdsuper.c 	major = MAJOR(bdev->bd_dev);
bdev              181 drivers/mtd/mtdsuper.c 	mtdnr = MINOR(bdev->bd_dev);
bdev              182 drivers/mtd/mtdsuper.c 	bdput(bdev);
bdev              219 drivers/mtd/ubi/block.c static int ubiblock_open(struct block_device *bdev, fmode_t mode)
bdev              221 drivers/mtd/ubi/block.c 	struct ubiblock *dev = bdev->bd_disk->private_data;
bdev              275 drivers/mtd/ubi/block.c static int ubiblock_getgeo(struct block_device *bdev, struct hd_geometry *geo)
bdev              280 drivers/mtd/ubi/block.c 	geo->sectors = get_capacity(bdev->bd_disk);
bdev             1023 drivers/net/ethernet/amd/declance.c static int dec_lance_probe(struct device *bdev, const int type)
bdev             1040 drivers/net/ethernet/amd/declance.c 	if (bdev)
bdev             1041 drivers/net/ethernet/amd/declance.c 		snprintf(name, sizeof(name), "%s", dev_name(bdev));
bdev             1113 drivers/net/ethernet/amd/declance.c 		dev_set_drvdata(bdev, dev);
bdev             1115 drivers/net/ethernet/amd/declance.c 		start = to_tc_dev(bdev)->resource.start;
bdev             1116 drivers/net/ethernet/amd/declance.c 		len = to_tc_dev(bdev)->resource.end - start + 1;
bdev             1117 drivers/net/ethernet/amd/declance.c 		if (!request_mem_region(start, len, dev_name(bdev))) {
bdev             1120 drivers/net/ethernet/amd/declance.c 			       dev_name(bdev));
bdev             1128 drivers/net/ethernet/amd/declance.c 		dev->irq = to_tc_dev(bdev)->interrupt;
bdev             1265 drivers/net/ethernet/amd/declance.c 	if (!bdev) {
bdev             1274 drivers/net/ethernet/amd/declance.c 	if (bdev)
bdev             1342 drivers/net/ethernet/amd/declance.c static void dec_lance_remove(struct device *bdev)
bdev             1344 drivers/net/ethernet/amd/declance.c 	struct net_device *dev = dev_get_drvdata(bdev);
bdev             1348 drivers/net/ethernet/amd/declance.c 	start = to_tc_dev(bdev)->resource.start;
bdev             1349 drivers/net/ethernet/amd/declance.c 	len = to_tc_dev(bdev)->resource.end - start + 1;
bdev             11299 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	struct pci_dev *bdev, *vfdev;
bdev             11308 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	bdev = pdev->bus->self;
bdev             11309 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
bdev             11310 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 		bdev = bdev->bus->self;
bdev             11312 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	if (!bdev)
bdev             11315 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR);
bdev             2642 drivers/net/ethernet/sun/sunhme.c 	struct pci_dev *bdev = pdev->bus->self;
bdev             2645 drivers/net/ethernet/sun/sunhme.c 	if (!bdev) return NULL;
bdev             2649 drivers/net/ethernet/sun/sunhme.c 		if (qpdev == bdev)
bdev             2659 drivers/net/ethernet/sun/sunhme.c 		qp->quattro_dev = bdev;
bdev              376 drivers/net/fddi/defxx.c 	struct device __maybe_unused *bdev = bp->bus_dev;
bdev              377 drivers/net/fddi/defxx.c 	int dfx_bus_tc = DFX_BUS_TC(bdev);
bdev              400 drivers/net/fddi/defxx.c 	struct device __maybe_unused *bdev = bp->bus_dev;
bdev              401 drivers/net/fddi/defxx.c 	int dfx_bus_tc = DFX_BUS_TC(bdev);
bdev              434 drivers/net/fddi/defxx.c static void dfx_get_bars(struct device *bdev,
bdev              437 drivers/net/fddi/defxx.c 	int dfx_bus_pci = dev_is_pci(bdev);
bdev              438 drivers/net/fddi/defxx.c 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
bdev              439 drivers/net/fddi/defxx.c 	int dfx_bus_tc = DFX_BUS_TC(bdev);
bdev              445 drivers/net/fddi/defxx.c 		bar_start[0] = pci_resource_start(to_pci_dev(bdev), num);
bdev              446 drivers/net/fddi/defxx.c 		bar_len[0] = pci_resource_len(to_pci_dev(bdev), num);
bdev              451 drivers/net/fddi/defxx.c 		unsigned long base_addr = to_eisa_device(bdev)->base_addr;
bdev              481 drivers/net/fddi/defxx.c 		bar_start[0] = to_tc_dev(bdev)->resource.start +
bdev              526 drivers/net/fddi/defxx.c static int dfx_register(struct device *bdev)
bdev              529 drivers/net/fddi/defxx.c 	int dfx_bus_pci = dev_is_pci(bdev);
bdev              530 drivers/net/fddi/defxx.c 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
bdev              531 drivers/net/fddi/defxx.c 	int dfx_bus_tc = DFX_BUS_TC(bdev);
bdev              533 drivers/net/fddi/defxx.c 	const char *print_name = dev_name(bdev);
bdev              556 drivers/net/fddi/defxx.c 		err = pci_enable_device(to_pci_dev(bdev));
bdev              564 drivers/net/fddi/defxx.c 	SET_NETDEV_DEV(dev, bdev);
bdev              567 drivers/net/fddi/defxx.c 	bp->bus_dev = bdev;
bdev              568 drivers/net/fddi/defxx.c 	dev_set_drvdata(bdev, dev);
bdev              570 drivers/net/fddi/defxx.c 	dfx_get_bars(bdev, bar_start, bar_len);
bdev              632 drivers/net/fddi/defxx.c 		pci_set_master(to_pci_dev(bdev));
bdev              655 drivers/net/fddi/defxx.c 		dma_free_coherent(bdev, alloc_size,
bdev              678 drivers/net/fddi/defxx.c 		pci_disable_device(to_pci_dev(bdev));
bdev              720 drivers/net/fddi/defxx.c 	struct device *bdev = bp->bus_dev;
bdev              721 drivers/net/fddi/defxx.c 	int dfx_bus_pci = dev_is_pci(bdev);
bdev              722 drivers/net/fddi/defxx.c 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
bdev              723 drivers/net/fddi/defxx.c 	int dfx_bus_tc = DFX_BUS_TC(bdev);
bdev              735 drivers/net/fddi/defxx.c 		dev->irq = to_tc_dev(bdev)->interrupt;
bdev              737 drivers/net/fddi/defxx.c 		unsigned long base_addr = to_eisa_device(bdev)->base_addr;
bdev              829 drivers/net/fddi/defxx.c 		struct pci_dev *pdev = to_pci_dev(bdev);
bdev              880 drivers/net/fddi/defxx.c 	struct device *bdev = bp->bus_dev;
bdev              881 drivers/net/fddi/defxx.c 	int dfx_bus_pci = dev_is_pci(bdev);
bdev              882 drivers/net/fddi/defxx.c 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
bdev              890 drivers/net/fddi/defxx.c 		unsigned long base_addr = to_eisa_device(bdev)->base_addr;
bdev              942 drivers/net/fddi/defxx.c 	struct device __maybe_unused *bdev = bp->bus_dev;
bdev              943 drivers/net/fddi/defxx.c 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
bdev              959 drivers/net/fddi/defxx.c 		if (to_eisa_device(bdev)->id.driver_data == DEFEA_PROD_ID_2) {
bdev             1040 drivers/net/fddi/defxx.c 	struct device *bdev = bp->bus_dev;
bdev             1041 drivers/net/fddi/defxx.c 	int dfx_bus_pci = dev_is_pci(bdev);
bdev             1042 drivers/net/fddi/defxx.c 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
bdev             1043 drivers/net/fddi/defxx.c 	int dfx_bus_tc = DFX_BUS_TC(bdev);
bdev             1949 drivers/net/fddi/defxx.c 	struct device *bdev = bp->bus_dev;
bdev             1950 drivers/net/fddi/defxx.c 	int dfx_bus_pci = dev_is_pci(bdev);
bdev             1951 drivers/net/fddi/defxx.c 	int dfx_bus_eisa = DFX_BUS_EISA(bdev);
bdev             1952 drivers/net/fddi/defxx.c 	int dfx_bus_tc = DFX_BUS_TC(bdev);
bdev             1982 drivers/net/fddi/defxx.c 		unsigned long base_addr = to_eisa_device(bdev)->base_addr;
bdev             3693 drivers/net/fddi/defxx.c static void dfx_unregister(struct device *bdev)
bdev             3695 drivers/net/fddi/defxx.c 	struct net_device *dev = dev_get_drvdata(bdev);
bdev             3697 drivers/net/fddi/defxx.c 	int dfx_bus_pci = dev_is_pci(bdev);
bdev             3698 drivers/net/fddi/defxx.c 	int dfx_bus_tc = DFX_BUS_TC(bdev);
bdev             3714 drivers/net/fddi/defxx.c 		dma_free_coherent(bdev, alloc_size,
bdev             3719 drivers/net/fddi/defxx.c 	dfx_get_bars(bdev, bar_start, bar_len);
bdev             3731 drivers/net/fddi/defxx.c 		pci_disable_device(to_pci_dev(bdev));
bdev              703 drivers/net/fddi/defza.c 		dma_sync_single_for_cpu(fp->bdev,
bdev              717 drivers/net/fddi/defza.c 			newdma = dma_map_single(fp->bdev, newskb->data,
bdev              720 drivers/net/fddi/defza.c 			if (dma_mapping_error(fp->bdev, newdma)) {
bdev              730 drivers/net/fddi/defza.c 			dma_unmap_single(fp->bdev, dma, FZA_RX_BUFFER_SIZE,
bdev             1163 drivers/net/fddi/defza.c 			dma = dma_map_single(fp->bdev, skb->data,
bdev             1166 drivers/net/fddi/defza.c 			if (dma_mapping_error(fp->bdev, dma)) {
bdev             1173 drivers/net/fddi/defza.c 				dma_unmap_single(fp->bdev, fp->rx_dma[i],
bdev             1256 drivers/net/fddi/defza.c 			dma_unmap_single(fp->bdev, fp->rx_dma[i],
bdev             1273 drivers/net/fddi/defza.c static int fza_probe(struct device *bdev)
bdev             1285 drivers/net/fddi/defza.c 	struct tc_dev *tdev = to_tc_dev(bdev);
bdev             1303 drivers/net/fddi/defza.c 	SET_NETDEV_DEV(dev, bdev);
bdev             1306 drivers/net/fddi/defza.c 	dev_set_drvdata(bdev, dev);
bdev             1308 drivers/net/fddi/defza.c 	fp->bdev = bdev;
bdev             1309 drivers/net/fddi/defza.c 	fp->name = dev_name(bdev);
bdev             1314 drivers/net/fddi/defza.c 	if (!request_mem_region(start, len, dev_name(bdev))) {
bdev             1492 drivers/net/fddi/defza.c 	get_device(bdev);
bdev             1513 drivers/net/fddi/defza.c static int fza_remove(struct device *bdev)
bdev             1515 drivers/net/fddi/defza.c 	struct net_device *dev = dev_get_drvdata(bdev);
bdev             1517 drivers/net/fddi/defza.c 	struct tc_dev *tdev = to_tc_dev(bdev);
bdev             1520 drivers/net/fddi/defza.c 	put_device(bdev);
bdev              665 drivers/net/fddi/defza.h 	struct device *bdev;		/* pointer to the bus device */
bdev               32 drivers/net/wireless/broadcom/b43/bus.c 	return bcma_core_is_enabled(dev->bdev);
bdev               37 drivers/net/wireless/broadcom/b43/bus.c 	bcma_core_enable(dev->bdev, core_specific_flags);
bdev               42 drivers/net/wireless/broadcom/b43/bus.c 	bcma_core_disable(dev->bdev, core_specific_flags);
bdev               46 drivers/net/wireless/broadcom/b43/bus.c 	return bcma_read16(dev->bdev, offset);
bdev               50 drivers/net/wireless/broadcom/b43/bus.c 	return bcma_read32(dev->bdev, offset);
bdev               55 drivers/net/wireless/broadcom/b43/bus.c 	bcma_write16(dev->bdev, offset, value);
bdev               60 drivers/net/wireless/broadcom/b43/bus.c 	bcma_write32(dev->bdev, offset, value);
bdev               66 drivers/net/wireless/broadcom/b43/bus.c 	bcma_block_read(dev->bdev, buffer, count, offset, reg_width);
bdev               72 drivers/net/wireless/broadcom/b43/bus.c 	bcma_block_write(dev->bdev, buffer, count, offset, reg_width);
bdev               82 drivers/net/wireless/broadcom/b43/bus.c 	dev->bdev = core;
bdev              228 drivers/net/wireless/broadcom/b43/bus.c 		return bcma_get_drvdata(dev->bdev);
bdev              243 drivers/net/wireless/broadcom/b43/bus.c 		bcma_set_drvdata(dev->bdev, wldev);
bdev               17 drivers/net/wireless/broadcom/b43/bus.h 		struct bcma_device *bdev;
bdev               71 drivers/net/wireless/broadcom/b43/bus.h 		return (dev->bdev->bus->hosttype == BCMA_HOSTTYPE_PCI);
bdev              808 drivers/net/wireless/broadcom/b43/dma.c 		tmp = bcma_aread32(dev->dev->bdev, BCMA_IOST);
bdev             1068 drivers/net/wireless/broadcom/b43/dma.c 		dma->translation = bcma_core_dma_translation(dev->dev->bdev);
bdev             1210 drivers/net/wireless/broadcom/b43/main.c 		bcma_cc = &dev->dev->bdev->bus->drv_cc;
bdev             1237 drivers/net/wireless/broadcom/b43/main.c 	flags = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
bdev             1240 drivers/net/wireless/broadcom/b43/main.c 	bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, flags);
bdev             1262 drivers/net/wireless/broadcom/b43/main.c 		tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
bdev             1265 drivers/net/wireless/broadcom/b43/main.c 		bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
bdev             1267 drivers/net/wireless/broadcom/b43/main.c 		tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
bdev             1269 drivers/net/wireless/broadcom/b43/main.c 		bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
bdev             1271 drivers/net/wireless/broadcom/b43/main.c 		tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
bdev             1273 drivers/net/wireless/broadcom/b43/main.c 		bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
bdev             1276 drivers/net/wireless/broadcom/b43/main.c 	bcma_core_set_clockmode(dev->dev->bdev, BCMA_CLKMODE_FAST);
bdev             1278 drivers/net/wireless/broadcom/b43/main.c 	bcma_core_pll_ctl(dev->dev->bdev, req, status, true);
bdev             2905 drivers/net/wireless/broadcom/b43/main.c 		bcma_chipco_gpio_control(&dev->dev->bdev->bus->drv_cc, mask, set);
bdev             2932 drivers/net/wireless/broadcom/b43/main.c 		bcma_chipco_gpio_control(&dev->dev->bdev->bus->drv_cc, ~0, 0);
bdev             3015 drivers/net/wireless/broadcom/b43/main.c 		tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
bdev             3020 drivers/net/wireless/broadcom/b43/main.c 		bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
bdev             3923 drivers/net/wireless/broadcom/b43/main.c 		tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
bdev             3928 drivers/net/wireless/broadcom/b43/main.c 		bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
bdev             4783 drivers/net/wireless/broadcom/b43/main.c 		bcma_host_pci_down(dev->dev->bdev->bus);
bdev             4830 drivers/net/wireless/broadcom/b43/main.c 		bcma_host_pci_irq_ctl(dev->dev->bdev->bus,
bdev             4831 drivers/net/wireless/broadcom/b43/main.c 				      dev->dev->bdev, true);
bdev             4832 drivers/net/wireless/broadcom/b43/main.c 		bcma_host_pci_up(dev->dev->bdev->bus);
bdev             5297 drivers/net/wireless/broadcom/b43/main.c 	    dev->dev->bdev->bus->hosttype == BCMA_HOSTTYPE_PCI)
bdev             5298 drivers/net/wireless/broadcom/b43/main.c 		dev_id = dev->dev->bdev->bus->host_pci->device;
bdev             5390 drivers/net/wireless/broadcom/b43/main.c 		tmp = bcma_aread32(dev->dev->bdev, BCMA_IOST);
bdev              339 drivers/net/wireless/broadcom/b43/phy_common.c 		tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
bdev              343 drivers/net/wireless/broadcom/b43/phy_common.c 		bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
bdev              346 drivers/net/wireless/broadcom/b43/phy_common.c 		tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
bdev              348 drivers/net/wireless/broadcom/b43/phy_common.c 		bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
bdev              379 drivers/net/wireless/broadcom/b43/phy_common.c 		tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
bdev              383 drivers/net/wireless/broadcom/b43/phy_common.c 		bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
bdev              387 drivers/net/wireless/broadcom/b43/phy_common.c 		tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
bdev              390 drivers/net/wireless/broadcom/b43/phy_common.c 		bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
bdev              574 drivers/net/wireless/broadcom/b43/phy_common.c 		tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
bdev              579 drivers/net/wireless/broadcom/b43/phy_common.c 		bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
bdev             2144 drivers/net/wireless/broadcom/b43/phy_g.c 	struct b43_bus_dev *bdev = dev->dev;
bdev             2175 drivers/net/wireless/broadcom/b43/phy_g.c 				if (bdev->board_vendor == SSB_BOARDVENDOR_BCM
bdev             2176 drivers/net/wireless/broadcom/b43/phy_g.c 				    && bdev->board_type == SSB_BOARD_BCM4309G
bdev             2177 drivers/net/wireless/broadcom/b43/phy_g.c 				    && bdev->board_rev >= 30)
bdev             2179 drivers/net/wireless/broadcom/b43/phy_g.c 				else if (bdev->board_vendor ==
bdev             2181 drivers/net/wireless/broadcom/b43/phy_g.c 					 && bdev->board_type ==
bdev             2187 drivers/net/wireless/broadcom/b43/phy_g.c 				if (bdev->board_vendor == SSB_BOARDVENDOR_BCM
bdev             2188 drivers/net/wireless/broadcom/b43/phy_g.c 				    && bdev->board_type == SSB_BOARD_BCM4309G
bdev             2189 drivers/net/wireless/broadcom/b43/phy_g.c 				    && bdev->board_rev >= 30)
bdev             2197 drivers/net/wireless/broadcom/b43/phy_g.c 				if (bdev->board_vendor == SSB_BOARDVENDOR_BCM
bdev             2198 drivers/net/wireless/broadcom/b43/phy_g.c 				    && bdev->board_type == SSB_BOARD_BCM4309G
bdev             2199 drivers/net/wireless/broadcom/b43/phy_g.c 				    && bdev->board_rev >= 30)
bdev             2201 drivers/net/wireless/broadcom/b43/phy_g.c 				else if (bdev->board_vendor ==
bdev             2203 drivers/net/wireless/broadcom/b43/phy_g.c 					 && bdev->board_type ==
bdev             2206 drivers/net/wireless/broadcom/b43/phy_g.c 				else if (bdev->chip_id == 0x4320)
bdev              736 drivers/net/wireless/broadcom/b43/phy_ht.c 	struct bcma_device *core = dev->dev->bdev;
bdev              589 drivers/net/wireless/broadcom/b43/phy_lcn.c 	struct bcma_drv_cc *cc = &dev->dev->bdev->bus->drv_cc;
bdev              700 drivers/net/wireless/broadcom/b43/phy_lcn.c 	struct bcma_drv_cc *cc = &dev->dev->bdev->bus->drv_cc;
bdev             5997 drivers/net/wireless/broadcom/b43/phy_n.c 			bcma_chipco_gpio_control(&dev->dev->bdev->bus->drv_cc,
bdev             6043 drivers/net/wireless/broadcom/b43/phy_n.c 			bcma_cc_set32(&dev->dev->bdev->bus->drv_cc,
bdev             6255 drivers/net/wireless/broadcom/b43/phy_n.c 		bcma_pmu_spuravoid_pllupdate(&dev->dev->bdev->bus->drv_cc,
bdev             1486 drivers/nvdimm/btt.c static int btt_rw_page(struct block_device *bdev, sector_t sector,
bdev             1489 drivers/nvdimm/btt.c 	struct btt *btt = bdev->bd_disk->private_data;
bdev              221 drivers/nvdimm/pmem.c static int pmem_rw_page(struct block_device *bdev, sector_t sector,
bdev              224 drivers/nvdimm/pmem.c 	struct pmem_device *pmem = bdev->bd_queue->queuedata;
bdev             1560 drivers/nvme/host/core.c static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
bdev             1568 drivers/nvme/host/core.c 	ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
bdev             1605 drivers/nvme/host/core.c static int nvme_open(struct block_device *bdev, fmode_t mode)
bdev             1607 drivers/nvme/host/core.c 	struct nvme_ns *ns = bdev->bd_disk->private_data;
bdev             1635 drivers/nvme/host/core.c static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
bdev             1640 drivers/nvme/host/core.c 	geo->cylinders = get_capacity(bdev->bd_disk) >> 11;
bdev             1932 drivers/nvme/host/core.c static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
bdev             1941 drivers/nvme/host/core.c 	ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
bdev             1958 drivers/nvme/host/core.c static int nvme_pr_register(struct block_device *bdev, u64 old,
bdev             1969 drivers/nvme/host/core.c 	return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
bdev             1972 drivers/nvme/host/core.c static int nvme_pr_reserve(struct block_device *bdev, u64 key,
bdev             1982 drivers/nvme/host/core.c 	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
bdev             1985 drivers/nvme/host/core.c static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
bdev             1989 drivers/nvme/host/core.c 	return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
bdev             1992 drivers/nvme/host/core.c static int nvme_pr_clear(struct block_device *bdev, u64 key)
bdev             1995 drivers/nvme/host/core.c 	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
bdev             1998 drivers/nvme/host/core.c static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
bdev             2001 drivers/nvme/host/core.c 	return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
bdev             2046 drivers/nvme/host/core.c static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode)
bdev             2048 drivers/nvme/host/core.c 	struct nvme_ns_head *head = bdev->bd_disk->private_data;
bdev               78 drivers/nvme/target/admin-cmd.c 	if (!ns->bdev)
bdev               81 drivers/nvme/target/admin-cmd.c 	host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
bdev               82 drivers/nvme/target/admin-cmd.c 	data_units_read = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
bdev               84 drivers/nvme/target/admin-cmd.c 	host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
bdev               85 drivers/nvme/target/admin-cmd.c 	data_units_written = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
bdev              111 drivers/nvme/target/admin-cmd.c 		if (!ns->bdev)
bdev              113 drivers/nvme/target/admin-cmd.c 		host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
bdev              115 drivers/nvme/target/admin-cmd.c 			part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000);
bdev              116 drivers/nvme/target/admin-cmd.c 		host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
bdev              118 drivers/nvme/target/admin-cmd.c 			part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000);
bdev              445 drivers/nvme/target/admin-cmd.c 	if (ns->bdev)
bdev              446 drivers/nvme/target/admin-cmd.c 		nvmet_bdev_set_limits(ns->bdev, id);
bdev              438 drivers/nvme/target/core.c 	if (!ns->bdev) {
bdev              443 drivers/nvme/target/core.c 	if (!blk_queue_pci_p2pdma(ns->bdev->bd_queue)) {
bdev               11 drivers/nvme/target/io-cmd-bdev.c void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
bdev               13 drivers/nvme/target/io-cmd-bdev.c 	const struct queue_limits *ql = &bdev_get_queue(bdev)->limits;
bdev               54 drivers/nvme/target/io-cmd-bdev.c 	ns->bdev = blkdev_get_by_path(ns->device_path,
bdev               56 drivers/nvme/target/io-cmd-bdev.c 	if (IS_ERR(ns->bdev)) {
bdev               57 drivers/nvme/target/io-cmd-bdev.c 		ret = PTR_ERR(ns->bdev);
bdev               60 drivers/nvme/target/io-cmd-bdev.c 					ns->device_path, PTR_ERR(ns->bdev));
bdev               62 drivers/nvme/target/io-cmd-bdev.c 		ns->bdev = NULL;
bdev               65 drivers/nvme/target/io-cmd-bdev.c 	ns->size = i_size_read(ns->bdev->bd_inode);
bdev               66 drivers/nvme/target/io-cmd-bdev.c 	ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
bdev               72 drivers/nvme/target/io-cmd-bdev.c 	if (ns->bdev) {
bdev               73 drivers/nvme/target/io-cmd-bdev.c 		blkdev_put(ns->bdev, FMODE_WRITE | FMODE_READ);
bdev               74 drivers/nvme/target/io-cmd-bdev.c 		ns->bdev = NULL;
bdev              179 drivers/nvme/target/io-cmd-bdev.c 	bio_set_dev(bio, req->ns->bdev);
bdev              191 drivers/nvme/target/io-cmd-bdev.c 			bio_set_dev(bio, req->ns->bdev);
bdev              211 drivers/nvme/target/io-cmd-bdev.c 	bio_set_dev(bio, req->ns->bdev);
bdev              221 drivers/nvme/target/io-cmd-bdev.c 	if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL, NULL))
bdev              232 drivers/nvme/target/io-cmd-bdev.c 	ret = __blkdev_issue_discard(ns->bdev,
bdev              303 drivers/nvme/target/io-cmd-bdev.c 	ret = __blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
bdev               55 drivers/nvme/target/nvmet.h 	struct block_device	*bdev;
bdev               88 drivers/nvme/target/nvmet.h 	return ns->bdev ? disk_to_dev(ns->bdev->bd_disk) : NULL;
bdev              368 drivers/nvme/target/nvmet.h void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
bdev               29 drivers/pci/controller/pcie-iproc-bcma.c 	struct bcma_device *bdev = container_of(pcie->dev, struct bcma_device, dev);
bdev               31 drivers/pci/controller/pcie-iproc-bcma.c 	return bcma_core_irq(bdev, 5);
bdev               34 drivers/pci/controller/pcie-iproc-bcma.c static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
bdev               36 drivers/pci/controller/pcie-iproc-bcma.c 	struct device *dev = &bdev->dev;
bdev               51 drivers/pci/controller/pcie-iproc-bcma.c 	pcie->base = bdev->io_addr;
bdev               57 drivers/pci/controller/pcie-iproc-bcma.c 	pcie->base_addr = bdev->addr;
bdev               59 drivers/pci/controller/pcie-iproc-bcma.c 	pcie->mem.start = bdev->addr_s[0];
bdev               60 drivers/pci/controller/pcie-iproc-bcma.c 	pcie->mem.end = bdev->addr_s[0] + SZ_128M - 1;
bdev               74 drivers/pci/controller/pcie-iproc-bcma.c 	bcma_set_drvdata(bdev, pcie);
bdev               78 drivers/pci/controller/pcie-iproc-bcma.c static void iproc_pcie_bcma_remove(struct bcma_device *bdev)
bdev               80 drivers/pci/controller/pcie-iproc-bcma.c 	struct iproc_pcie *pcie = bcma_get_drvdata(bdev);
bdev               53 drivers/platform/x86/apple-gmux.c 	struct backlight_device *bdev;
bdev              607 drivers/platform/x86/apple-gmux.c 	struct backlight_device *bdev;
bdev              683 drivers/platform/x86/apple-gmux.c 	bdev = backlight_device_register("gmux_backlight", &pnp->dev,
bdev              685 drivers/platform/x86/apple-gmux.c 	if (IS_ERR(bdev)) {
bdev              686 drivers/platform/x86/apple-gmux.c 		ret = PTR_ERR(bdev);
bdev              690 drivers/platform/x86/apple-gmux.c 	gmux_data->bdev = bdev;
bdev              691 drivers/platform/x86/apple-gmux.c 	bdev->props.brightness = gmux_get_brightness(bdev);
bdev              692 drivers/platform/x86/apple-gmux.c 	backlight_update_status(bdev);
bdev              783 drivers/platform/x86/apple-gmux.c 	backlight_device_unregister(bdev);
bdev              804 drivers/platform/x86/apple-gmux.c 	backlight_device_unregister(gmux_data->bdev);
bdev              445 drivers/s390/block/dasd.c 		disk = device->block->bdev->bd_disk;
bdev              472 drivers/s390/block/dasd.c 		disk = device->block->bdev->bd_disk;
bdev             3304 drivers/s390/block/dasd.c static int dasd_open(struct block_device *bdev, fmode_t mode)
bdev             3309 drivers/s390/block/dasd.c 	base = dasd_device_from_gendisk(bdev->bd_disk);
bdev             3370 drivers/s390/block/dasd.c static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
bdev             3374 drivers/s390/block/dasd.c 	base = dasd_device_from_gendisk(bdev->bd_disk);
bdev             3384 drivers/s390/block/dasd.c 	geo->start = get_start_sect(bdev) >> base->block->s2b_shift;
bdev             3661 drivers/s390/block/dasd.c 		max_count = device->block->bdev ? 0 : -1;
bdev             3708 drivers/s390/block/dasd.c 			rc = fsync_bdev(device->block->bdev);
bdev              101 drivers/s390/block/dasd_genhd.c 	struct block_device *bdev;
bdev              104 drivers/s390/block/dasd_genhd.c 	bdev = bdget_disk(block->gdp, 0);
bdev              105 drivers/s390/block/dasd_genhd.c 	if (!bdev) {
bdev              111 drivers/s390/block/dasd_genhd.c 	rc = blkdev_get(bdev, FMODE_READ, NULL);
bdev              119 drivers/s390/block/dasd_genhd.c 	rc = blkdev_reread_part(bdev);
bdev              134 drivers/s390/block/dasd_genhd.c 	block->bdev = bdev;
bdev              147 drivers/s390/block/dasd_genhd.c 	struct block_device *bdev;
bdev              153 drivers/s390/block/dasd_genhd.c 	bdev = block->bdev;
bdev              154 drivers/s390/block/dasd_genhd.c 	block->bdev = NULL;
bdev              166 drivers/s390/block/dasd_genhd.c 		ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg);
bdev              170 drivers/s390/block/dasd_genhd.c 	blkdev_put(bdev, FMODE_READ);
bdev              558 drivers/s390/block/dasd_int.h 	struct block_device *bdev;
bdev               44 drivers/s390/block/dasd_ioctl.c dasd_ioctl_enable(struct block_device *bdev)
bdev               51 drivers/s390/block/dasd_ioctl.c 	base = dasd_device_from_gendisk(bdev->bd_disk);
bdev               57 drivers/s390/block/dasd_ioctl.c 	mutex_lock(&bdev->bd_mutex);
bdev               58 drivers/s390/block/dasd_ioctl.c 	i_size_write(bdev->bd_inode,
bdev               60 drivers/s390/block/dasd_ioctl.c 	mutex_unlock(&bdev->bd_mutex);
bdev               70 drivers/s390/block/dasd_ioctl.c dasd_ioctl_disable(struct block_device *bdev)
bdev               77 drivers/s390/block/dasd_ioctl.c 	base = dasd_device_from_gendisk(bdev->bd_disk);
bdev               93 drivers/s390/block/dasd_ioctl.c 	mutex_lock(&bdev->bd_mutex);
bdev               94 drivers/s390/block/dasd_ioctl.c 	i_size_write(bdev->bd_inode, 0);
bdev               95 drivers/s390/block/dasd_ioctl.c 	mutex_unlock(&bdev->bd_mutex);
bdev              229 drivers/s390/block/dasd_ioctl.c 		struct block_device *bdev = bdget_disk(block->gdp, 0);
bdev              230 drivers/s390/block/dasd_ioctl.c 		bdev->bd_inode->i_blkbits = blksize_bits(fdata->blksize);
bdev              231 drivers/s390/block/dasd_ioctl.c 		bdput(bdev);
bdev              262 drivers/s390/block/dasd_ioctl.c dasd_ioctl_format(struct block_device *bdev, void __user *argp)
bdev              272 drivers/s390/block/dasd_ioctl.c 	base = dasd_device_from_gendisk(bdev->bd_disk);
bdev              284 drivers/s390/block/dasd_ioctl.c 	if (bdev != bdev->bd_contains) {
bdev              299 drivers/s390/block/dasd_ioctl.c static int dasd_ioctl_check_format(struct block_device *bdev, void __user *argp)
bdev              308 drivers/s390/block/dasd_ioctl.c 	base = dasd_device_from_gendisk(bdev->bd_disk);
bdev              311 drivers/s390/block/dasd_ioctl.c 	if (bdev != bdev->bd_contains) {
bdev              350 drivers/s390/block/dasd_ioctl.c static int dasd_ioctl_release_space(struct block_device *bdev, void __user *argp)
bdev              361 drivers/s390/block/dasd_ioctl.c 	base = dasd_device_from_gendisk(bdev->bd_disk);
bdev              369 drivers/s390/block/dasd_ioctl.c 	if (bdev != bdev->bd_contains) {
bdev              503 drivers/s390/block/dasd_ioctl.c 	if (!block->bdev)
bdev              538 drivers/s390/block/dasd_ioctl.c dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp)
bdev              545 drivers/s390/block/dasd_ioctl.c 	if (bdev != bdev->bd_contains)
bdev              550 drivers/s390/block/dasd_ioctl.c 	base = dasd_device_from_gendisk(bdev->bd_disk);
bdev              557 drivers/s390/block/dasd_ioctl.c 	set_disk_ro(bdev->bd_disk, intval);
bdev              576 drivers/s390/block/dasd_ioctl.c int dasd_ioctl(struct block_device *bdev, fmode_t mode,
bdev              594 drivers/s390/block/dasd_ioctl.c 	base = dasd_device_from_gendisk(bdev->bd_disk);
bdev              601 drivers/s390/block/dasd_ioctl.c 		rc = dasd_ioctl_disable(bdev);
bdev              604 drivers/s390/block/dasd_ioctl.c 		rc = dasd_ioctl_enable(bdev);
bdev              619 drivers/s390/block/dasd_ioctl.c 		rc = dasd_ioctl_format(bdev, argp);
bdev              622 drivers/s390/block/dasd_ioctl.c 		rc = dasd_ioctl_check_format(bdev, argp);
bdev              637 drivers/s390/block/dasd_ioctl.c 		rc = dasd_ioctl_set_ro(bdev, argp);
bdev              652 drivers/s390/block/dasd_ioctl.c 		rc = dasd_ioctl_release_space(bdev, argp);
bdev               32 drivers/s390/block/dcssblk.c static int dcssblk_open(struct block_device *bdev, fmode_t mode);
bdev              809 drivers/s390/block/dcssblk.c dcssblk_open(struct block_device *bdev, fmode_t mode)
bdev              814 drivers/s390/block/dcssblk.c 	dev_info = bdev->bd_disk->private_data;
bdev              820 drivers/s390/block/dcssblk.c 	bdev->bd_block_size = 4096;
bdev              146 drivers/s390/block/scm_blk.c static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
bdev              148 drivers/s390/block/scm_blk.c 	return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
bdev              181 drivers/s390/block/scm_blk.c 	struct scm_blk_dev *bdev = scmrq->bdev;
bdev              182 drivers/s390/block/scm_blk.c 	struct scm_device *scmdev = bdev->gendisk->private_data;
bdev              218 drivers/s390/block/scm_blk.c static inline void scm_request_init(struct scm_blk_dev *bdev,
bdev              227 drivers/s390/block/scm_blk.c 	aobrq->scmdev = bdev->scmdev;
bdev              230 drivers/s390/block/scm_blk.c 	scmrq->bdev = bdev;
bdev              239 drivers/s390/block/scm_blk.c 	struct scm_blk_dev *bdev = scmrq->bdev;
bdev              245 drivers/s390/block/scm_blk.c 	atomic_dec(&bdev->queued_reqs);
bdev              247 drivers/s390/block/scm_blk.c 	blk_mq_kick_requeue_list(bdev->rq);
bdev              252 drivers/s390/block/scm_blk.c 	struct scm_blk_dev *bdev = scmrq->bdev;
bdev              262 drivers/s390/block/scm_blk.c 	atomic_dec(&bdev->queued_reqs);
bdev              268 drivers/s390/block/scm_blk.c 	struct scm_blk_dev *bdev = scmrq->bdev;
bdev              270 drivers/s390/block/scm_blk.c 	atomic_inc(&bdev->queued_reqs);
bdev              286 drivers/s390/block/scm_blk.c 	struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
bdev              292 drivers/s390/block/scm_blk.c 	if (!scm_permit_request(bdev, req)) {
bdev              305 drivers/s390/block/scm_blk.c 		scm_request_init(bdev, scmrq);
bdev              373 drivers/s390/block/scm_blk.c 	struct scm_blk_dev *bdev = scmrq->bdev;
bdev              382 drivers/s390/block/scm_blk.c 		spin_lock_irqsave(&bdev->lock, flags);
bdev              383 drivers/s390/block/scm_blk.c 		if (bdev->state != SCM_WR_PROHIBIT)
bdev              385 drivers/s390/block/scm_blk.c 				(unsigned long) bdev->scmdev->address);
bdev              386 drivers/s390/block/scm_blk.c 		bdev->state = SCM_WR_PROHIBIT;
bdev              387 drivers/s390/block/scm_blk.c 		spin_unlock_irqrestore(&bdev->lock, flags);
bdev              435 drivers/s390/block/scm_blk.c int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
bdev              448 drivers/s390/block/scm_blk.c 	bdev->scmdev = scmdev;
bdev              449 drivers/s390/block/scm_blk.c 	bdev->state = SCM_OPER;
bdev              450 drivers/s390/block/scm_blk.c 	spin_lock_init(&bdev->lock);
bdev              451 drivers/s390/block/scm_blk.c 	atomic_set(&bdev->queued_reqs, 0);
bdev              453 drivers/s390/block/scm_blk.c 	bdev->tag_set.ops = &scm_mq_ops;
bdev              454 drivers/s390/block/scm_blk.c 	bdev->tag_set.cmd_size = sizeof(blk_status_t);
bdev              455 drivers/s390/block/scm_blk.c 	bdev->tag_set.nr_hw_queues = nr_requests;
bdev              456 drivers/s390/block/scm_blk.c 	bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests;
bdev              457 drivers/s390/block/scm_blk.c 	bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
bdev              458 drivers/s390/block/scm_blk.c 	bdev->tag_set.numa_node = NUMA_NO_NODE;
bdev              460 drivers/s390/block/scm_blk.c 	ret = blk_mq_alloc_tag_set(&bdev->tag_set);
bdev              464 drivers/s390/block/scm_blk.c 	rq = blk_mq_init_queue(&bdev->tag_set);
bdev              469 drivers/s390/block/scm_blk.c 	bdev->rq = rq;
bdev              479 drivers/s390/block/scm_blk.c 	bdev->gendisk = alloc_disk(SCM_NR_PARTS);
bdev              480 drivers/s390/block/scm_blk.c 	if (!bdev->gendisk) {
bdev              485 drivers/s390/block/scm_blk.c 	bdev->gendisk->private_data = scmdev;
bdev              486 drivers/s390/block/scm_blk.c 	bdev->gendisk->fops = &scm_blk_devops;
bdev              487 drivers/s390/block/scm_blk.c 	bdev->gendisk->queue = rq;
bdev              488 drivers/s390/block/scm_blk.c 	bdev->gendisk->major = scm_major;
bdev              489 drivers/s390/block/scm_blk.c 	bdev->gendisk->first_minor = devindex * SCM_NR_PARTS;
bdev              491 drivers/s390/block/scm_blk.c 	len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm");
bdev              493 drivers/s390/block/scm_blk.c 		len += snprintf(bdev->gendisk->disk_name + len,
bdev              498 drivers/s390/block/scm_blk.c 	snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c",
bdev              502 drivers/s390/block/scm_blk.c 	set_capacity(bdev->gendisk, scmdev->size >> 9);
bdev              503 drivers/s390/block/scm_blk.c 	device_add_disk(&scmdev->dev, bdev->gendisk, NULL);
bdev              509 drivers/s390/block/scm_blk.c 	blk_mq_free_tag_set(&bdev->tag_set);
bdev              515 drivers/s390/block/scm_blk.c void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
bdev              517 drivers/s390/block/scm_blk.c 	del_gendisk(bdev->gendisk);
bdev              518 drivers/s390/block/scm_blk.c 	blk_cleanup_queue(bdev->gendisk->queue);
bdev              519 drivers/s390/block/scm_blk.c 	blk_mq_free_tag_set(&bdev->tag_set);
bdev              520 drivers/s390/block/scm_blk.c 	put_disk(bdev->gendisk);
bdev              523 drivers/s390/block/scm_blk.c void scm_blk_set_available(struct scm_blk_dev *bdev)
bdev              527 drivers/s390/block/scm_blk.c 	spin_lock_irqsave(&bdev->lock, flags);
bdev              528 drivers/s390/block/scm_blk.c 	if (bdev->state == SCM_WR_PROHIBIT)
bdev              530 drivers/s390/block/scm_blk.c 			(unsigned long) bdev->scmdev->address);
bdev              531 drivers/s390/block/scm_blk.c 	bdev->state = SCM_OPER;
bdev              532 drivers/s390/block/scm_blk.c 	spin_unlock_irqrestore(&bdev->lock, flags);
bdev               30 drivers/s390/block/scm_blk.h 	struct scm_blk_dev *bdev;
bdev               19 drivers/s390/block/scm_drv.c 	struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
bdev               31 drivers/s390/block/scm_drv.c 		scm_blk_set_available(bdev);
bdev               38 drivers/s390/block/scm_drv.c 	struct scm_blk_dev *bdev;
bdev               47 drivers/s390/block/scm_drv.c 	bdev = kzalloc(sizeof(*bdev), GFP_KERNEL);
bdev               48 drivers/s390/block/scm_drv.c 	if (!bdev)
bdev               51 drivers/s390/block/scm_drv.c 	dev_set_drvdata(&scmdev->dev, bdev);
bdev               52 drivers/s390/block/scm_drv.c 	ret = scm_blk_dev_setup(bdev, scmdev);
bdev               55 drivers/s390/block/scm_drv.c 		kfree(bdev);
bdev               65 drivers/s390/block/scm_drv.c 	struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
bdev               67 drivers/s390/block/scm_drv.c 	scm_blk_dev_cleanup(bdev);
bdev               69 drivers/s390/block/scm_drv.c 	kfree(bdev);
bdev              233 drivers/s390/block/xpram.c static int xpram_getgeo(struct block_device *bdev, struct hd_geometry *geo)
bdev             1698 drivers/scsi/3w-9xxx.c static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
bdev             1408 drivers/scsi/3w-sas.c static int twl_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
bdev             1343 drivers/scsi/3w-xxxx.c static int tw_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev,
bdev              296 drivers/scsi/aacraid/linit.c static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
bdev              328 drivers/scsi/aacraid/linit.c 	buf = scsi_bios_ptable(bdev);
bdev             7185 drivers/scsi/advansys.c advansys_biosparam(struct scsi_device *sdev, struct block_device *bdev,
bdev             1223 drivers/scsi/aha152x.c static int aha152x_biosparam(struct scsi_device *sdev, struct block_device *bdev,
bdev             1238 drivers/scsi/aha152x.c 		if (scsicam_bios_param(bdev, capacity, info) < 0 ||
bdev              982 drivers/scsi/aha1542.c 		struct block_device *bdev, sector_t capacity, int geom[])
bdev              723 drivers/scsi/aic7xxx/aic79xx_osm.c ahd_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
bdev              736 drivers/scsi/aic7xxx/aic79xx_osm.c 	bh = scsi_bios_ptable(bdev);
bdev              695 drivers/scsi/aic7xxx/aic7xxx_osm.c ahc_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
bdev              710 drivers/scsi/aic7xxx/aic7xxx_osm.c 	bh = scsi_bios_ptable(bdev);
bdev              112 drivers/scsi/arcmsr/arcmsr_hba.c 		struct block_device *bdev, sector_t capacity, int *info);
bdev              354 drivers/scsi/arcmsr/arcmsr_hba.c 		struct block_device *bdev, sector_t capacity, int *geom)
bdev              359 drivers/scsi/arcmsr/arcmsr_hba.c 	buffer = scsi_bios_ptable(bdev);
bdev             1060 drivers/scsi/dc395x.c 		struct block_device *bdev, sector_t capacity, int *info)
bdev             1083 drivers/scsi/dc395x.c 	return scsicam_bios_param(bdev, capacity, info);
bdev              463 drivers/scsi/fdomain.c 			     struct block_device *bdev,	sector_t capacity,
bdev              466 drivers/scsi/fdomain.c 	unsigned char *p = scsi_bios_ptable(bdev);
bdev             3359 drivers/scsi/gdth.c static int gdth_bios_param(struct scsi_device *sdev,struct block_device *bdev,sector_t cap,int *ip)
bdev             1140 drivers/scsi/ips.c static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
bdev              401 drivers/scsi/ips.h    static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
bdev              877 drivers/scsi/libsas/sas_scsi_host.c 			  struct block_device *bdev,
bdev             2794 drivers/scsi/megaraid.c megaraid_biosparam(struct scsi_device *sdev, struct block_device *bdev,
bdev             2829 drivers/scsi/megaraid.c 		bh = scsi_bios_ptable(bdev);
bdev             3073 drivers/scsi/megaraid/megaraid_sas_base.c megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
bdev             2482 drivers/scsi/mpt3sas/mpt3sas_scsih.c scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
bdev             2147 drivers/scsi/mvumi.c mvumi_bios_param(struct scsi_device *sdev, struct block_device *bdev,
bdev             1777 drivers/scsi/myrb.c static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev,
bdev             1034 drivers/scsi/qla1280.c qla1280_biosparam(struct scsi_device *sdev, struct block_device *bdev,
bdev               40 drivers/scsi/scsicam.c 		struct block_device *bdev = dev->bd_contains;
bdev               42 drivers/scsi/scsicam.c 		void *data = read_dev_sector(bdev, 0, &sect);
bdev               68 drivers/scsi/scsicam.c int scsicam_bios_param(struct block_device *bdev, sector_t capacity, int *ip)
bdev               74 drivers/scsi/scsicam.c 	p = scsi_bios_ptable(bdev);
bdev             1334 drivers/scsi/sd.c static int sd_open(struct block_device *bdev, fmode_t mode)
bdev             1336 drivers/scsi/sd.c 	struct scsi_disk *sdkp = scsi_disk_get(bdev->bd_disk);
bdev             1356 drivers/scsi/sd.c 		check_disk_change(bdev);
bdev             1423 drivers/scsi/sd.c static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
bdev             1425 drivers/scsi/sd.c 	struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
bdev             1438 drivers/scsi/sd.c 		host->hostt->bios_param(sdp, bdev, capacity, diskinfo);
bdev             1440 drivers/scsi/sd.c 		scsicam_bios_param(bdev, capacity, diskinfo);
bdev             1462 drivers/scsi/sd.c static int sd_ioctl(struct block_device *bdev, fmode_t mode,
bdev             1465 drivers/scsi/sd.c 	struct gendisk *disk = bdev->bd_disk;
bdev             1474 drivers/scsi/sd.c 	error = scsi_verify_blk_ioctl(bdev, cmd);
bdev             1503 drivers/scsi/sd.c 			error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p);
bdev             1694 drivers/scsi/sd.c static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
bdev             1697 drivers/scsi/sd.c 	struct gendisk *disk = bdev->bd_disk;
bdev             1703 drivers/scsi/sd.c 	error = scsi_verify_blk_ioctl(bdev, cmd);
bdev             1744 drivers/scsi/sd.c static int sd_pr_command(struct block_device *bdev, u8 sa,
bdev             1747 drivers/scsi/sd.c 	struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
bdev             1774 drivers/scsi/sd.c static int sd_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
bdev             1779 drivers/scsi/sd.c 	return sd_pr_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00,
bdev             1784 drivers/scsi/sd.c static int sd_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
bdev             1789 drivers/scsi/sd.c 	return sd_pr_command(bdev, 0x01, key, 0, sd_pr_type(type), 0);
bdev             1792 drivers/scsi/sd.c static int sd_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
bdev             1794 drivers/scsi/sd.c 	return sd_pr_command(bdev, 0x02, key, 0, sd_pr_type(type), 0);
bdev             1797 drivers/scsi/sd.c static int sd_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
bdev             1800 drivers/scsi/sd.c 	return sd_pr_command(bdev, abort ? 0x05 : 0x04, old_key, new_key,
bdev             1804 drivers/scsi/sd.c static int sd_pr_clear(struct block_device *bdev, u64 key)
bdev             1806 drivers/scsi/sd.c 	return sd_pr_command(bdev, 0x03, key, 0, 0, 0);
bdev              524 drivers/scsi/sr.c static int sr_block_open(struct block_device *bdev, fmode_t mode)
bdev              530 drivers/scsi/sr.c 	cd = scsi_cd_get(bdev->bd_disk);
bdev              536 drivers/scsi/sr.c 	check_disk_change(bdev);
bdev              539 drivers/scsi/sr.c 	ret = cdrom_open(&cd->cdi, bdev, mode);
bdev              559 drivers/scsi/sr.c static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
bdev              562 drivers/scsi/sr.c 	struct scsi_cd *cd = scsi_cd(bdev->bd_disk);
bdev              587 drivers/scsi/sr.c 	ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
bdev             1459 drivers/scsi/stex.c 	struct block_device *bdev, sector_t capacity, int geom[])
bdev             1448 drivers/scsi/storvsc_drv.c static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev,
bdev              545 drivers/scsi/wd719x.c static int wd719x_biosparam(struct scsi_device *sdev, struct block_device *bdev,
bdev               81 drivers/staging/comedi/drivers/comedi_bond.c 		struct bonded_device *bdev = *devs++;
bdev               83 drivers/staging/comedi/drivers/comedi_bond.c 		if (base_chan < bdev->nchans) {
bdev               92 drivers/staging/comedi/drivers/comedi_bond.c 			b_chans = bdev->nchans - base_chan;
bdev              100 drivers/staging/comedi/drivers/comedi_bond.c 			ret = comedi_dio_bitfield2(bdev->dev, bdev->subdev,
bdev              117 drivers/staging/comedi/drivers/comedi_bond.c 			base_chan -= bdev->nchans;
bdev              131 drivers/staging/comedi/drivers/comedi_bond.c 	struct bonded_device *bdev;
bdev              138 drivers/staging/comedi/drivers/comedi_bond.c 	for (bdev = *devs++; chan >= bdev->nchans; bdev = *devs++)
bdev              139 drivers/staging/comedi/drivers/comedi_bond.c 		chan -= bdev->nchans;
bdev              154 drivers/staging/comedi/drivers/comedi_bond.c 		ret = comedi_dio_config(bdev->dev, bdev->subdev, chan, data[0]);
bdev              157 drivers/staging/comedi/drivers/comedi_bond.c 		ret = comedi_dio_get_config(bdev->dev, bdev->subdev, chan,
bdev              186 drivers/staging/comedi/drivers/comedi_bond.c 		struct bonded_device *bdev;
bdev              226 drivers/staging/comedi/drivers/comedi_bond.c 			bdev = kmalloc(sizeof(*bdev), GFP_KERNEL);
bdev              227 drivers/staging/comedi/drivers/comedi_bond.c 			if (!bdev)
bdev              230 drivers/staging/comedi/drivers/comedi_bond.c 			bdev->dev = d;
bdev              231 drivers/staging/comedi/drivers/comedi_bond.c 			bdev->minor = minor;
bdev              232 drivers/staging/comedi/drivers/comedi_bond.c 			bdev->subdev = sdev;
bdev              233 drivers/staging/comedi/drivers/comedi_bond.c 			bdev->nchans = nchans;
bdev              248 drivers/staging/comedi/drivers/comedi_bond.c 				kfree(bdev);
bdev              252 drivers/staging/comedi/drivers/comedi_bond.c 			devpriv->devs[devpriv->ndevs++] = bdev;
bdev              258 drivers/staging/comedi/drivers/comedi_bond.c 					 bdev->minor, bdev->subdev);
bdev              323 drivers/staging/comedi/drivers/comedi_bond.c 			struct bonded_device *bdev;
bdev              325 drivers/staging/comedi/drivers/comedi_bond.c 			bdev = devpriv->devs[devpriv->ndevs];
bdev              326 drivers/staging/comedi/drivers/comedi_bond.c 			if (!bdev)
bdev              328 drivers/staging/comedi/drivers/comedi_bond.c 			if (!test_and_set_bit(bdev->minor, devs_closed))
bdev              329 drivers/staging/comedi/drivers/comedi_bond.c 				comedi_close(bdev->dev);
bdev              330 drivers/staging/comedi/drivers/comedi_bond.c 			kfree(bdev);
bdev             3974 drivers/staging/exfat/exfat_super.c 	struct block_device *bdev = sb->s_bdev;
bdev             3992 drivers/staging/exfat/exfat_super.c 			invalidate_bdev(bdev);
bdev              562 drivers/target/target_core_file.c 		struct block_device *bdev = inode->i_bdev;
bdev              565 drivers/target/target_core_file.c 		ret = blkdev_issue_discard(bdev,
bdev              394 drivers/target/target_core_iblock.c 	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
bdev              398 drivers/target/target_core_iblock.c 	ret = blkdev_issue_discard(bdev,
bdev              411 drivers/target/target_core_iblock.c iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
bdev              431 drivers/target/target_core_iblock.c 	ret = blkdev_issue_zeroout(bdev,
bdev              446 drivers/target/target_core_iblock.c 	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
bdev              471 drivers/target/target_core_iblock.c 	if (bdev_write_zeroes_sectors(bdev)) {
bdev              472 drivers/target/target_core_iblock.c 		if (!iblock_execute_zero_out(bdev, cmd))
bdev             2124 drivers/video/fbdev/sh_mobile_lcdcfb.c static int sh_mobile_lcdc_update_bl(struct backlight_device *bdev)
bdev             2126 drivers/video/fbdev/sh_mobile_lcdcfb.c 	struct sh_mobile_lcdc_chan *ch = bl_get_data(bdev);
bdev             2127 drivers/video/fbdev/sh_mobile_lcdcfb.c 	int brightness = bdev->props.brightness;
bdev             2129 drivers/video/fbdev/sh_mobile_lcdcfb.c 	if (bdev->props.power != FB_BLANK_UNBLANK ||
bdev             2130 drivers/video/fbdev/sh_mobile_lcdcfb.c 	    bdev->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
bdev             2137 drivers/video/fbdev/sh_mobile_lcdcfb.c static int sh_mobile_lcdc_get_brightness(struct backlight_device *bdev)
bdev             2139 drivers/video/fbdev/sh_mobile_lcdcfb.c 	struct sh_mobile_lcdc_chan *ch = bl_get_data(bdev);
bdev             2144 drivers/video/fbdev/sh_mobile_lcdcfb.c static int sh_mobile_lcdc_check_fb(struct backlight_device *bdev,
bdev             2147 drivers/video/fbdev/sh_mobile_lcdcfb.c 	return (info->bl_dev == bdev);
bdev             2177 drivers/video/fbdev/sh_mobile_lcdcfb.c static void sh_mobile_lcdc_bl_remove(struct backlight_device *bdev)
bdev             2179 drivers/video/fbdev/sh_mobile_lcdcfb.c 	backlight_device_unregister(bdev);
bdev              505 drivers/video/fbdev/ssd1307fb.c static int ssd1307fb_update_bl(struct backlight_device *bdev)
bdev              507 drivers/video/fbdev/ssd1307fb.c 	struct ssd1307fb_par *par = bl_get_data(bdev);
bdev              509 drivers/video/fbdev/ssd1307fb.c 	int brightness = bdev->props.brightness;
bdev              522 drivers/video/fbdev/ssd1307fb.c static int ssd1307fb_get_brightness(struct backlight_device *bdev)
bdev              524 drivers/video/fbdev/ssd1307fb.c 	struct ssd1307fb_par *par = bl_get_data(bdev);
bdev              529 drivers/video/fbdev/ssd1307fb.c static int ssd1307fb_check_fb(struct backlight_device *bdev,
bdev              532 drivers/video/fbdev/ssd1307fb.c 	return (info->bl_dev == bdev);
bdev              845 drivers/visorbus/visorbus_main.c 	struct visor_device *bdev;
bdev              855 drivers/visorbus/visorbus_main.c 	bdev = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
bdev              856 drivers/visorbus/visorbus_main.c 	if (!bdev)
bdev              858 drivers/visorbus/visorbus_main.c 	hdr_info = (struct visor_vbus_headerinfo *)bdev->vbus_hdr_info;
bdev              877 drivers/visorbus/visorbus_main.c 	write_vbus_dev_info(bdev->visorchannel, hdr_info, &dev_info, dev_no);
bdev              878 drivers/visorbus/visorbus_main.c 	write_vbus_chp_info(bdev->visorchannel, hdr_info, &chipset_driverinfo);
bdev              879 drivers/visorbus/visorbus_main.c 	write_vbus_bus_info(bdev->visorchannel, hdr_info,
bdev               41 fs/block_dev.c 	struct block_device bdev;
bdev               54 fs/block_dev.c 	return &BDEV_I(inode)->bdev;
bdev               58 fs/block_dev.c static void bdev_write_inode(struct block_device *bdev)
bdev               60 fs/block_dev.c 	struct inode *inode = bdev->bd_inode;
bdev               71 fs/block_dev.c 					    bdevname(bdev, name), ret);
bdev               79 fs/block_dev.c void kill_bdev(struct block_device *bdev)
bdev               81 fs/block_dev.c 	struct address_space *mapping = bdev->bd_inode->i_mapping;
bdev               92 fs/block_dev.c void invalidate_bdev(struct block_device *bdev)
bdev               94 fs/block_dev.c 	struct address_space *mapping = bdev->bd_inode->i_mapping;
bdev              108 fs/block_dev.c static void set_init_blocksize(struct block_device *bdev)
bdev              110 fs/block_dev.c 	unsigned bsize = bdev_logical_block_size(bdev);
bdev              111 fs/block_dev.c 	loff_t size = i_size_read(bdev->bd_inode);
bdev              118 fs/block_dev.c 	bdev->bd_block_size = bsize;
bdev              119 fs/block_dev.c 	bdev->bd_inode->i_blkbits = blksize_bits(bsize);
bdev              122 fs/block_dev.c int set_blocksize(struct block_device *bdev, int size)
bdev              129 fs/block_dev.c 	if (size < bdev_logical_block_size(bdev))
bdev              133 fs/block_dev.c 	if (bdev->bd_block_size != size) {
bdev              134 fs/block_dev.c 		sync_blockdev(bdev);
bdev              135 fs/block_dev.c 		bdev->bd_block_size = size;
bdev              136 fs/block_dev.c 		bdev->bd_inode->i_blkbits = blksize_bits(size);
bdev              137 fs/block_dev.c 		kill_bdev(bdev);
bdev              207 fs/block_dev.c 	struct block_device *bdev = I_BDEV(bdev_file_inode(file));
bdev              216 fs/block_dev.c 	    (bdev_logical_block_size(bdev) - 1))
bdev              229 fs/block_dev.c 	bio_set_dev(&bio, bdev);
bdev              258 fs/block_dev.c 		    !blk_poll(bdev_get_queue(bdev), qc, true))
bdev              293 fs/block_dev.c 	struct block_device *bdev = I_BDEV(kiocb->ki_filp->f_mapping->host);
bdev              294 fs/block_dev.c 	struct request_queue *q = bdev_get_queue(bdev);
bdev              343 fs/block_dev.c 	struct block_device *bdev = I_BDEV(inode);
bdev              354 fs/block_dev.c 	    (bdev_logical_block_size(bdev) - 1))
bdev              380 fs/block_dev.c 		bio_set_dev(bio, bdev);
bdev              452 fs/block_dev.c 		    !blk_poll(bdev_get_queue(bdev), qc, true))
bdev              486 fs/block_dev.c int __sync_blockdev(struct block_device *bdev, int wait)
bdev              488 fs/block_dev.c 	if (!bdev)
bdev              491 fs/block_dev.c 		return filemap_flush(bdev->bd_inode->i_mapping);
bdev              492 fs/block_dev.c 	return filemap_write_and_wait(bdev->bd_inode->i_mapping);
bdev              499 fs/block_dev.c int sync_blockdev(struct block_device *bdev)
bdev              501 fs/block_dev.c 	return __sync_blockdev(bdev, 1);
bdev              510 fs/block_dev.c int fsync_bdev(struct block_device *bdev)
bdev              512 fs/block_dev.c 	struct super_block *sb = get_super(bdev);
bdev              518 fs/block_dev.c 	return sync_blockdev(bdev);
bdev              534 fs/block_dev.c struct super_block *freeze_bdev(struct block_device *bdev)
bdev              539 fs/block_dev.c 	mutex_lock(&bdev->bd_fsfreeze_mutex);
bdev              540 fs/block_dev.c 	if (++bdev->bd_fsfreeze_count > 1) {
bdev              546 fs/block_dev.c 		sb = get_super(bdev);
bdev              549 fs/block_dev.c 		mutex_unlock(&bdev->bd_fsfreeze_mutex);
bdev              553 fs/block_dev.c 	sb = get_active_super(bdev);
bdev              562 fs/block_dev.c 		bdev->bd_fsfreeze_count--;
bdev              563 fs/block_dev.c 		mutex_unlock(&bdev->bd_fsfreeze_mutex);
bdev              568 fs/block_dev.c 	sync_blockdev(bdev);
bdev              569 fs/block_dev.c 	mutex_unlock(&bdev->bd_fsfreeze_mutex);
bdev              581 fs/block_dev.c int thaw_bdev(struct block_device *bdev, struct super_block *sb)
bdev              585 fs/block_dev.c 	mutex_lock(&bdev->bd_fsfreeze_mutex);
bdev              586 fs/block_dev.c 	if (!bdev->bd_fsfreeze_count)
bdev              590 fs/block_dev.c 	if (--bdev->bd_fsfreeze_count > 0)
bdev              601 fs/block_dev.c 		bdev->bd_fsfreeze_count++;
bdev              603 fs/block_dev.c 	mutex_unlock(&bdev->bd_fsfreeze_mutex);
bdev              664 fs/block_dev.c 	struct block_device *bdev = I_BDEV(bd_inode);
bdev              676 fs/block_dev.c 	error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL);
bdev              700 fs/block_dev.c int bdev_read_page(struct block_device *bdev, sector_t sector,
bdev              703 fs/block_dev.c 	const struct block_device_operations *ops = bdev->bd_disk->fops;
bdev              706 fs/block_dev.c 	if (!ops->rw_page || bdev_get_integrity(bdev))
bdev              709 fs/block_dev.c 	result = blk_queue_enter(bdev->bd_queue, 0);
bdev              712 fs/block_dev.c 	result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
bdev              714 fs/block_dev.c 	blk_queue_exit(bdev->bd_queue);
bdev              738 fs/block_dev.c int bdev_write_page(struct block_device *bdev, sector_t sector,
bdev              742 fs/block_dev.c 	const struct block_device_operations *ops = bdev->bd_disk->fops;
bdev              744 fs/block_dev.c 	if (!ops->rw_page || bdev_get_integrity(bdev))
bdev              746 fs/block_dev.c 	result = blk_queue_enter(bdev->bd_queue, 0);
bdev              751 fs/block_dev.c 	result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
bdev              759 fs/block_dev.c 	blk_queue_exit(bdev->bd_queue);
bdev              787 fs/block_dev.c 	struct block_device *bdev = &ei->bdev;
bdev              789 fs/block_dev.c 	memset(bdev, 0, sizeof(*bdev));
bdev              790 fs/block_dev.c 	mutex_init(&bdev->bd_mutex);
bdev              791 fs/block_dev.c 	INIT_LIST_HEAD(&bdev->bd_list);
bdev              793 fs/block_dev.c 	INIT_LIST_HEAD(&bdev->bd_holder_disks);
bdev              795 fs/block_dev.c 	bdev->bd_bdi = &noop_backing_dev_info;
bdev              798 fs/block_dev.c 	mutex_init(&bdev->bd_fsfreeze_mutex);
bdev              803 fs/block_dev.c 	struct block_device *bdev = &BDEV_I(inode)->bdev;
bdev              808 fs/block_dev.c 	list_del_init(&bdev->bd_list);
bdev              812 fs/block_dev.c 	if (bdev->bd_bdi != &noop_backing_dev_info) {
bdev              813 fs/block_dev.c 		bdi_put(bdev->bd_bdi);
bdev              814 fs/block_dev.c 		bdev->bd_bdi = &noop_backing_dev_info;
bdev              875 fs/block_dev.c 	return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data;
bdev              880 fs/block_dev.c 	BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data;
bdev              903 fs/block_dev.c 	struct block_device *bdev;
bdev              912 fs/block_dev.c 	bdev = &BDEV_I(inode)->bdev;
bdev              915 fs/block_dev.c 		bdev->bd_contains = NULL;
bdev              916 fs/block_dev.c 		bdev->bd_super = NULL;
bdev              917 fs/block_dev.c 		bdev->bd_inode = inode;
bdev              918 fs/block_dev.c 		bdev->bd_block_size = i_blocksize(inode);
bdev              919 fs/block_dev.c 		bdev->bd_part_count = 0;
bdev              920 fs/block_dev.c 		bdev->bd_invalidated = 0;
bdev              923 fs/block_dev.c 		inode->i_bdev = bdev;
bdev              927 fs/block_dev.c 		list_add(&bdev->bd_list, &all_bdevs);
bdev              931 fs/block_dev.c 	return bdev;
bdev              940 fs/block_dev.c struct block_device *bdgrab(struct block_device *bdev)
bdev              942 fs/block_dev.c 	ihold(bdev->bd_inode);
bdev              943 fs/block_dev.c 	return bdev;
bdev              949 fs/block_dev.c 	struct block_device *bdev;
bdev              952 fs/block_dev.c 	list_for_each_entry(bdev, &all_bdevs, bd_list) {
bdev              953 fs/block_dev.c 		ret += bdev->bd_inode->i_mapping->nrpages;
bdev              959 fs/block_dev.c void bdput(struct block_device *bdev)
bdev              961 fs/block_dev.c 	iput(bdev->bd_inode);
bdev              968 fs/block_dev.c 	struct block_device *bdev;
bdev              971 fs/block_dev.c 	bdev = inode->i_bdev;
bdev              972 fs/block_dev.c 	if (bdev && !inode_unhashed(bdev->bd_inode)) {
bdev              973 fs/block_dev.c 		bdgrab(bdev);
bdev              975 fs/block_dev.c 		return bdev;
bdev              985 fs/block_dev.c 	if (bdev)
bdev              988 fs/block_dev.c 	bdev = bdget(inode->i_rdev);
bdev              989 fs/block_dev.c 	if (bdev) {
bdev              998 fs/block_dev.c 			bdgrab(bdev);
bdev              999 fs/block_dev.c 			inode->i_bdev = bdev;
bdev             1000 fs/block_dev.c 			inode->i_mapping = bdev->bd_inode->i_mapping;
bdev             1004 fs/block_dev.c 	return bdev;
bdev             1011 fs/block_dev.c 	struct block_device *bdev = NULL;
bdev             1015 fs/block_dev.c 		bdev = inode->i_bdev;
bdev             1020 fs/block_dev.c 	if (bdev)
bdev             1021 fs/block_dev.c 		bdput(bdev);
bdev             1038 fs/block_dev.c static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
bdev             1041 fs/block_dev.c 	if (bdev->bd_holder == holder)
bdev             1043 fs/block_dev.c 	else if (bdev->bd_holder != NULL)
bdev             1045 fs/block_dev.c 	else if (whole == bdev)
bdev             1074 fs/block_dev.c static int bd_prepare_to_claim(struct block_device *bdev,
bdev             1079 fs/block_dev.c 	if (!bd_may_claim(bdev, whole, holder))
bdev             1099 fs/block_dev.c static struct gendisk *bdev_get_gendisk(struct block_device *bdev, int *partno)
bdev             1101 fs/block_dev.c 	struct gendisk *disk = get_gendisk(bdev->bd_dev, partno);
bdev             1113 fs/block_dev.c 	if (inode_unhashed(bdev->bd_inode)) {
bdev             1143 fs/block_dev.c struct block_device *bd_start_claiming(struct block_device *bdev, void *holder)
bdev             1155 fs/block_dev.c 	disk = bdev_get_gendisk(bdev, &partno);
bdev             1170 fs/block_dev.c 		whole = bdgrab(bdev);
bdev             1179 fs/block_dev.c 	err = bd_prepare_to_claim(bdev, whole, holder);
bdev             1210 fs/block_dev.c void bd_finish_claiming(struct block_device *bdev, struct block_device *whole,
bdev             1214 fs/block_dev.c 	BUG_ON(!bd_may_claim(bdev, whole, holder));
bdev             1221 fs/block_dev.c 	bdev->bd_holders++;
bdev             1222 fs/block_dev.c 	bdev->bd_holder = holder;
bdev             1238 fs/block_dev.c void bd_abort_claiming(struct block_device *bdev, struct block_device *whole,
bdev             1254 fs/block_dev.c static struct bd_holder_disk *bd_find_holder_disk(struct block_device *bdev,
bdev             1259 fs/block_dev.c 	list_for_each_entry(holder, &bdev->bd_holder_disks, list)
bdev             1303 fs/block_dev.c int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
bdev             1308 fs/block_dev.c 	mutex_lock(&bdev->bd_mutex);
bdev             1310 fs/block_dev.c 	WARN_ON_ONCE(!bdev->bd_holder);
bdev             1313 fs/block_dev.c 	if (WARN_ON(!disk->slave_dir || !bdev->bd_part->holder_dir))
bdev             1316 fs/block_dev.c 	holder = bd_find_holder_disk(bdev, disk);
bdev             1332 fs/block_dev.c 	ret = add_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
bdev             1336 fs/block_dev.c 	ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj);
bdev             1343 fs/block_dev.c 	kobject_get(bdev->bd_part->holder_dir);
bdev             1345 fs/block_dev.c 	list_add(&holder->list, &bdev->bd_holder_disks);
bdev             1349 fs/block_dev.c 	del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
bdev             1353 fs/block_dev.c 	mutex_unlock(&bdev->bd_mutex);
bdev             1368 fs/block_dev.c void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
bdev             1372 fs/block_dev.c 	mutex_lock(&bdev->bd_mutex);
bdev             1374 fs/block_dev.c 	holder = bd_find_holder_disk(bdev, disk);
bdev             1377 fs/block_dev.c 		del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
bdev             1378 fs/block_dev.c 		del_symlink(bdev->bd_part->holder_dir,
bdev             1380 fs/block_dev.c 		kobject_put(bdev->bd_part->holder_dir);
bdev             1385 fs/block_dev.c 	mutex_unlock(&bdev->bd_mutex);
bdev             1400 fs/block_dev.c static void flush_disk(struct block_device *bdev, bool kill_dirty)
bdev             1402 fs/block_dev.c 	if (__invalidate_device(bdev, kill_dirty)) {
bdev             1405 fs/block_dev.c 		       bdev->bd_disk ? bdev->bd_disk->disk_name : "");
bdev             1407 fs/block_dev.c 	bdev->bd_invalidated = 1;
bdev             1420 fs/block_dev.c void check_disk_size_change(struct gendisk *disk, struct block_device *bdev,
bdev             1426 fs/block_dev.c 	bdev_size = i_size_read(bdev->bd_inode);
bdev             1433 fs/block_dev.c 		i_size_write(bdev->bd_inode, disk_size);
bdev             1435 fs/block_dev.c 			flush_disk(bdev, false);
bdev             1459 fs/block_dev.c 		struct block_device *bdev = bdget_disk(disk, 0);
bdev             1461 fs/block_dev.c 		if (!bdev)
bdev             1464 fs/block_dev.c 		mutex_lock(&bdev->bd_mutex);
bdev             1465 fs/block_dev.c 		check_disk_size_change(disk, bdev, ret == 0);
bdev             1466 fs/block_dev.c 		bdev->bd_invalidated = 0;
bdev             1467 fs/block_dev.c 		mutex_unlock(&bdev->bd_mutex);
bdev             1468 fs/block_dev.c 		bdput(bdev);
bdev             1483 fs/block_dev.c int check_disk_change(struct block_device *bdev)
bdev             1485 fs/block_dev.c 	struct gendisk *disk = bdev->bd_disk;
bdev             1494 fs/block_dev.c 	flush_disk(bdev, true);
bdev             1496 fs/block_dev.c 		bdops->revalidate_disk(bdev->bd_disk);
bdev             1502 fs/block_dev.c void bd_set_size(struct block_device *bdev, loff_t size)
bdev             1504 fs/block_dev.c 	inode_lock(bdev->bd_inode);
bdev             1505 fs/block_dev.c 	i_size_write(bdev->bd_inode, size);
bdev             1506 fs/block_dev.c 	inode_unlock(bdev->bd_inode);
bdev             1510 fs/block_dev.c static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
bdev             1512 fs/block_dev.c static void bdev_disk_changed(struct block_device *bdev, bool invalidate)
bdev             1514 fs/block_dev.c 	if (disk_part_scan_enabled(bdev->bd_disk)) {
bdev             1516 fs/block_dev.c 			invalidate_partitions(bdev->bd_disk, bdev);
bdev             1518 fs/block_dev.c 			rescan_partitions(bdev->bd_disk, bdev);
bdev             1520 fs/block_dev.c 		check_disk_size_change(bdev->bd_disk, bdev, !invalidate);
bdev             1521 fs/block_dev.c 		bdev->bd_invalidated = 0;
bdev             1532 fs/block_dev.c static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
bdev             1548 fs/block_dev.c 		ret = devcgroup_inode_permission(bdev->bd_inode, perm);
bdev             1550 fs/block_dev.c 			bdput(bdev);
bdev             1558 fs/block_dev.c 	disk = bdev_get_gendisk(bdev, &partno);
bdev             1563 fs/block_dev.c 	mutex_lock_nested(&bdev->bd_mutex, for_part);
bdev             1564 fs/block_dev.c 	if (!bdev->bd_openers) {
bdev             1566 fs/block_dev.c 		bdev->bd_disk = disk;
bdev             1567 fs/block_dev.c 		bdev->bd_queue = disk->queue;
bdev             1568 fs/block_dev.c 		bdev->bd_contains = bdev;
bdev             1569 fs/block_dev.c 		bdev->bd_partno = partno;
bdev             1573 fs/block_dev.c 			bdev->bd_part = disk_get_part(disk, partno);
bdev             1574 fs/block_dev.c 			if (!bdev->bd_part)
bdev             1579 fs/block_dev.c 				ret = disk->fops->open(bdev, mode);
bdev             1585 fs/block_dev.c 					disk_put_part(bdev->bd_part);
bdev             1586 fs/block_dev.c 					bdev->bd_part = NULL;
bdev             1587 fs/block_dev.c 					bdev->bd_disk = NULL;
bdev             1588 fs/block_dev.c 					bdev->bd_queue = NULL;
bdev             1589 fs/block_dev.c 					mutex_unlock(&bdev->bd_mutex);
bdev             1597 fs/block_dev.c 				bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
bdev             1598 fs/block_dev.c 				set_init_blocksize(bdev);
bdev             1607 fs/block_dev.c 			if (bdev->bd_invalidated &&
bdev             1609 fs/block_dev.c 				bdev_disk_changed(bdev, ret == -ENOMEDIUM);
bdev             1623 fs/block_dev.c 			bdev->bd_contains = whole;
bdev             1624 fs/block_dev.c 			bdev->bd_part = disk_get_part(disk, partno);
bdev             1626 fs/block_dev.c 			    !bdev->bd_part || !bdev->bd_part->nr_sects) {
bdev             1630 fs/block_dev.c 			bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
bdev             1631 fs/block_dev.c 			set_init_blocksize(bdev);
bdev             1634 fs/block_dev.c 		if (bdev->bd_bdi == &noop_backing_dev_info)
bdev             1635 fs/block_dev.c 			bdev->bd_bdi = bdi_get(disk->queue->backing_dev_info);
bdev             1637 fs/block_dev.c 		if (bdev->bd_contains == bdev) {
bdev             1639 fs/block_dev.c 			if (bdev->bd_disk->fops->open)
bdev             1640 fs/block_dev.c 				ret = bdev->bd_disk->fops->open(bdev, mode);
bdev             1642 fs/block_dev.c 			if (bdev->bd_invalidated &&
bdev             1644 fs/block_dev.c 				bdev_disk_changed(bdev, ret == -ENOMEDIUM);
bdev             1649 fs/block_dev.c 	bdev->bd_openers++;
bdev             1651 fs/block_dev.c 		bdev->bd_part_count++;
bdev             1652 fs/block_dev.c 	mutex_unlock(&bdev->bd_mutex);
bdev             1660 fs/block_dev.c 	disk_put_part(bdev->bd_part);
bdev             1661 fs/block_dev.c 	bdev->bd_disk = NULL;
bdev             1662 fs/block_dev.c 	bdev->bd_part = NULL;
bdev             1663 fs/block_dev.c 	bdev->bd_queue = NULL;
bdev             1664 fs/block_dev.c 	if (bdev != bdev->bd_contains)
bdev             1665 fs/block_dev.c 		__blkdev_put(bdev->bd_contains, mode, 1);
bdev             1666 fs/block_dev.c 	bdev->bd_contains = NULL;
bdev             1668 fs/block_dev.c 	mutex_unlock(&bdev->bd_mutex);
bdev             1672 fs/block_dev.c 	bdput(bdev);
bdev             1696 fs/block_dev.c int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
bdev             1704 fs/block_dev.c 		whole = bd_start_claiming(bdev, holder);
bdev             1706 fs/block_dev.c 			bdput(bdev);
bdev             1711 fs/block_dev.c 	res = __blkdev_get(bdev, mode, 0);
bdev             1717 fs/block_dev.c 		mutex_lock(&bdev->bd_mutex);
bdev             1719 fs/block_dev.c 			bd_finish_claiming(bdev, whole, holder);
bdev             1721 fs/block_dev.c 			bd_abort_claiming(bdev, whole, holder);
bdev             1729 fs/block_dev.c 		if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder &&
bdev             1731 fs/block_dev.c 			bdev->bd_write_holder = true;
bdev             1735 fs/block_dev.c 		mutex_unlock(&bdev->bd_mutex);
bdev             1763 fs/block_dev.c 	struct block_device *bdev;
bdev             1766 fs/block_dev.c 	bdev = lookup_bdev(path);
bdev             1767 fs/block_dev.c 	if (IS_ERR(bdev))
bdev             1768 fs/block_dev.c 		return bdev;
bdev             1770 fs/block_dev.c 	err = blkdev_get(bdev, mode, holder);
bdev             1774 fs/block_dev.c 	if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) {
bdev             1775 fs/block_dev.c 		blkdev_put(bdev, mode);
bdev             1779 fs/block_dev.c 	return bdev;
bdev             1807 fs/block_dev.c 	struct block_device *bdev;
bdev             1810 fs/block_dev.c 	bdev = bdget(dev);
bdev             1811 fs/block_dev.c 	if (!bdev)
bdev             1814 fs/block_dev.c 	err = blkdev_get(bdev, mode, holder);
bdev             1818 fs/block_dev.c 	return bdev;
bdev             1824 fs/block_dev.c 	struct block_device *bdev;
bdev             1843 fs/block_dev.c 	bdev = bd_acquire(inode);
bdev             1844 fs/block_dev.c 	if (bdev == NULL)
bdev             1847 fs/block_dev.c 	filp->f_mapping = bdev->bd_inode->i_mapping;
bdev             1850 fs/block_dev.c 	return blkdev_get(bdev, filp->f_mode, filp);
bdev             1853 fs/block_dev.c static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
bdev             1855 fs/block_dev.c 	struct gendisk *disk = bdev->bd_disk;
bdev             1858 fs/block_dev.c 	mutex_lock_nested(&bdev->bd_mutex, for_part);
bdev             1860 fs/block_dev.c 		bdev->bd_part_count--;
bdev             1862 fs/block_dev.c 	if (!--bdev->bd_openers) {
bdev             1863 fs/block_dev.c 		WARN_ON_ONCE(bdev->bd_holders);
bdev             1864 fs/block_dev.c 		sync_blockdev(bdev);
bdev             1865 fs/block_dev.c 		kill_bdev(bdev);
bdev             1867 fs/block_dev.c 		bdev_write_inode(bdev);
bdev             1869 fs/block_dev.c 	if (bdev->bd_contains == bdev) {
bdev             1873 fs/block_dev.c 	if (!bdev->bd_openers) {
bdev             1874 fs/block_dev.c 		disk_put_part(bdev->bd_part);
bdev             1875 fs/block_dev.c 		bdev->bd_part = NULL;
bdev             1876 fs/block_dev.c 		bdev->bd_disk = NULL;
bdev             1877 fs/block_dev.c 		if (bdev != bdev->bd_contains)
bdev             1878 fs/block_dev.c 			victim = bdev->bd_contains;
bdev             1879 fs/block_dev.c 		bdev->bd_contains = NULL;
bdev             1883 fs/block_dev.c 	mutex_unlock(&bdev->bd_mutex);
bdev             1884 fs/block_dev.c 	bdput(bdev);
bdev             1889 fs/block_dev.c void blkdev_put(struct block_device *bdev, fmode_t mode)
bdev             1891 fs/block_dev.c 	mutex_lock(&bdev->bd_mutex);
bdev             1903 fs/block_dev.c 		WARN_ON_ONCE(--bdev->bd_holders < 0);
bdev             1904 fs/block_dev.c 		WARN_ON_ONCE(--bdev->bd_contains->bd_holders < 0);
bdev             1907 fs/block_dev.c 		if ((bdev_free = !bdev->bd_holders))
bdev             1908 fs/block_dev.c 			bdev->bd_holder = NULL;
bdev             1909 fs/block_dev.c 		if (!bdev->bd_contains->bd_holders)
bdev             1910 fs/block_dev.c 			bdev->bd_contains->bd_holder = NULL;
bdev             1918 fs/block_dev.c 		if (bdev_free && bdev->bd_write_holder) {
bdev             1919 fs/block_dev.c 			disk_unblock_events(bdev->bd_disk);
bdev             1920 fs/block_dev.c 			bdev->bd_write_holder = false;
bdev             1929 fs/block_dev.c 	disk_flush_events(bdev->bd_disk, DISK_EVENT_MEDIA_CHANGE);
bdev             1931 fs/block_dev.c 	mutex_unlock(&bdev->bd_mutex);
bdev             1933 fs/block_dev.c 	__blkdev_put(bdev, mode, 0);
bdev             1939 fs/block_dev.c 	struct block_device *bdev = I_BDEV(bdev_file_inode(filp));
bdev             1940 fs/block_dev.c 	blkdev_put(bdev, filp->f_mode);
bdev             1946 fs/block_dev.c 	struct block_device *bdev = I_BDEV(bdev_file_inode(file));
bdev             1958 fs/block_dev.c 	return blkdev_ioctl(bdev, mode, cmd, arg);
bdev             2025 fs/block_dev.c 	struct super_block *super = BDEV_I(page->mapping->host)->bdev.bd_super;
bdev             2059 fs/block_dev.c 	struct block_device *bdev = I_BDEV(bdev_file_inode(file));
bdev             2070 fs/block_dev.c 	isize = i_size_read(bdev->bd_inode);
bdev             2084 fs/block_dev.c 	if ((start | len) & (bdev_logical_block_size(bdev) - 1))
bdev             2088 fs/block_dev.c 	mapping = bdev->bd_inode->i_mapping;
bdev             2094 fs/block_dev.c 		error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9,
bdev             2098 fs/block_dev.c 		error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9,
bdev             2102 fs/block_dev.c 		error = blkdev_issue_discard(bdev, start >> 9, len >> 9,
bdev             2139 fs/block_dev.c int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg)
bdev             2144 fs/block_dev.c 	res = blkdev_ioctl(bdev, 0, cmd, arg);
bdev             2161 fs/block_dev.c 	struct block_device *bdev;
bdev             2181 fs/block_dev.c 	bdev = bd_acquire(inode);
bdev             2182 fs/block_dev.c 	if (!bdev)
bdev             2186 fs/block_dev.c 	return bdev;
bdev             2188 fs/block_dev.c 	bdev = ERR_PTR(error);
bdev             2193 fs/block_dev.c int __invalidate_device(struct block_device *bdev, bool kill_dirty)
bdev             2195 fs/block_dev.c 	struct super_block *sb = get_super(bdev);
bdev             2209 fs/block_dev.c 	invalidate_bdev(bdev);
bdev             2221 fs/block_dev.c 		struct block_device *bdev;
bdev             2242 fs/block_dev.c 		bdev = I_BDEV(inode);
bdev             2244 fs/block_dev.c 		mutex_lock(&bdev->bd_mutex);
bdev             2245 fs/block_dev.c 		if (bdev->bd_openers)
bdev             2246 fs/block_dev.c 			func(bdev, arg);
bdev             2247 fs/block_dev.c 		mutex_unlock(&bdev->bd_mutex);
bdev              188 fs/btrfs/check-integrity.c 	struct block_device *bdev;
bdev              265 fs/btrfs/check-integrity.c 		struct block_device *bdev,
bdev              460 fs/btrfs/check-integrity.c 	ds->bdev = NULL;
bdev              501 fs/btrfs/check-integrity.c 	     ((unsigned int)((uintptr_t)b->dev_state->bdev))) &
bdev              513 fs/btrfs/check-integrity.c 		struct block_device *bdev,
bdev              519 fs/btrfs/check-integrity.c 	     ((unsigned int)((uintptr_t)bdev))) &
bdev              524 fs/btrfs/check-integrity.c 		if (b->dev_state->bdev == bdev && b->dev_bytenr == dev_bytenr)
bdev              547 fs/btrfs/check-integrity.c 	     ((unsigned int)((uintptr_t)l->block_ref_to->dev_state->bdev)) ^
bdev              548 fs/btrfs/check-integrity.c 	     ((unsigned int)((uintptr_t)l->block_ref_from->dev_state->bdev)))
bdev              579 fs/btrfs/check-integrity.c 		if (l->block_ref_to->dev_state->bdev == bdev_ref_to &&
bdev              581 fs/btrfs/check-integrity.c 		    l->block_ref_from->dev_state->bdev == bdev_ref_from &&
bdev              603 fs/btrfs/check-integrity.c 	    (((unsigned int)((uintptr_t)ds->bdev->bd_dev)) &
bdev              622 fs/btrfs/check-integrity.c 		if (ds->bdev->bd_dev == dev)
bdev              650 fs/btrfs/check-integrity.c 		if (!device->bdev || !device->name)
bdev              653 fs/btrfs/check-integrity.c 		dev_state = btrfsic_dev_state_lookup(device->bdev->bd_dev);
bdev              725 fs/btrfs/check-integrity.c 					tmp_next_block_ctx.dev->bdev,
bdev              731 fs/btrfs/check-integrity.c 					tmp_next_block_ctx.dev->bdev,
bdev              734 fs/btrfs/check-integrity.c 					bdev,
bdev              774 fs/btrfs/check-integrity.c 	struct block_device *const superblock_bdev = device->bdev;
bdev             1309 fs/btrfs/check-integrity.c 				next_block_ctx->dev->bdev,
bdev             1311 fs/btrfs/check-integrity.c 				block_ctx->dev->bdev,
bdev             1542 fs/btrfs/check-integrity.c 	    !device->bdev || !device->name)
bdev             1546 fs/btrfs/check-integrity.c 							device->bdev->bd_dev);
bdev             1629 fs/btrfs/check-integrity.c 		bio_set_dev(bio, block_ctx->dev->bdev);
bdev             1755 fs/btrfs/check-integrity.c 	struct block_device *bdev = dev_state->bdev;
bdev             1769 fs/btrfs/check-integrity.c 	block = btrfsic_block_hashtable_lookup(bdev, dev_bytenr,
bdev             2455 fs/btrfs/check-integrity.c 		    state->latest_superblock->dev_state->bdev ==
bdev             2456 fs/btrfs/check-integrity.c 		    l->block_ref_from->dev_state->bdev)
bdev             2503 fs/btrfs/check-integrity.c 	    state->latest_superblock->dev_state->bdev == block->dev_state->bdev)
bdev             2590 fs/btrfs/check-integrity.c 	l = btrfsic_block_link_hashtable_lookup(next_block_ctx->dev->bdev,
bdev             2592 fs/btrfs/check-integrity.c 						from_block->dev_state->bdev,
bdev             2637 fs/btrfs/check-integrity.c 	block = btrfsic_block_hashtable_lookup(block_ctx->dev->bdev,
bdev             2648 fs/btrfs/check-integrity.c 		dev_state = btrfsic_dev_state_lookup(block_ctx->dev->bdev->bd_dev);
bdev             2702 fs/btrfs/check-integrity.c 		if (dev_state->bdev == block_ctx.dev->bdev &&
bdev             2936 fs/btrfs/check-integrity.c 		if (!device->bdev || !device->name)
bdev             2945 fs/btrfs/check-integrity.c 		ds->bdev = device->bdev;
bdev             2947 fs/btrfs/check-integrity.c 		bdevname(ds->bdev, ds->name);
bdev             2987 fs/btrfs/check-integrity.c 		if (!device->bdev || !device->name)
bdev             2991 fs/btrfs/check-integrity.c 				device->bdev->bd_dev,
bdev              323 fs/btrfs/compression.c 	struct block_device *bdev;
bdev              342 fs/btrfs/compression.c 	bdev = fs_info->fs_devices->latest_bdev;
bdev              345 fs/btrfs/compression.c 	bio_set_dev(bio, bdev);
bdev              388 fs/btrfs/compression.c 			bio_set_dev(bio, bdev);
bdev              556 fs/btrfs/compression.c 	struct block_device *bdev;
bdev              607 fs/btrfs/compression.c 	bdev = fs_info->fs_devices->latest_bdev;
bdev              627 fs/btrfs/compression.c 	bio_set_dev(comp_bio, bdev);
bdev              678 fs/btrfs/compression.c 			bio_set_dev(comp_bio, bdev);
bdev              186 fs/btrfs/dev-replace.c 	struct block_device *bdev;
bdev              198 fs/btrfs/dev-replace.c 	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
bdev              200 fs/btrfs/dev-replace.c 	if (IS_ERR(bdev)) {
bdev              202 fs/btrfs/dev-replace.c 		return PTR_ERR(bdev);
bdev              205 fs/btrfs/dev-replace.c 	sync_blockdev(bdev);
bdev              209 fs/btrfs/dev-replace.c 		if (device->bdev == bdev) {
bdev              218 fs/btrfs/dev-replace.c 	if (i_size_read(bdev->bd_inode) <
bdev              252 fs/btrfs/dev-replace.c 	device->bdev = bdev;
bdev              257 fs/btrfs/dev-replace.c 	set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
bdev              270 fs/btrfs/dev-replace.c 	blkdev_put(bdev, FMODE_EXCL);
bdev              931 fs/btrfs/dev-replace.c 	if (!dev_replace->tgtdev || !dev_replace->tgtdev->bdev) {
bdev              216 fs/btrfs/disk-io.c 		em->bdev = fs_info->fs_devices->latest_bdev;
bdev              231 fs/btrfs/disk-io.c 	em->bdev = fs_info->fs_devices->latest_bdev;
bdev             1621 fs/btrfs/disk-io.c 		if (!device->bdev)
bdev             1623 fs/btrfs/disk-io.c 		bdi = device->bdev->bd_bdi;
bdev             3407 fs/btrfs/disk-io.c int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num,
bdev             3415 fs/btrfs/disk-io.c 	if (bytenr + BTRFS_SUPER_INFO_SIZE >= i_size_read(bdev->bd_inode))
bdev             3418 fs/btrfs/disk-io.c 	bh = __bread(bdev, bytenr / BTRFS_BDEV_BLOCKSIZE, BTRFS_SUPER_INFO_SIZE);
bdev             3438 fs/btrfs/disk-io.c struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
bdev             3453 fs/btrfs/disk-io.c 		ret = btrfs_read_dev_one_super(bdev, i, &bh);
bdev             3515 fs/btrfs/disk-io.c 		bh = __getblk(device->bdev, bytenr / BTRFS_BDEV_BLOCKSIZE,
bdev             3573 fs/btrfs/disk-io.c 		bh = __find_get_block(device->bdev,
bdev             3621 fs/btrfs/disk-io.c 	struct request_queue *q = bdev_get_queue(device->bdev);
bdev             3629 fs/btrfs/disk-io.c 	bio_set_dev(bio, device->bdev);
bdev             3678 fs/btrfs/disk-io.c 		if (!dev->bdev)
bdev             3692 fs/btrfs/disk-io.c 		if (!dev->bdev) {
bdev             3790 fs/btrfs/disk-io.c 		if (!dev->bdev) {
bdev             3841 fs/btrfs/disk-io.c 		if (!dev->bdev)
bdev               57 fs/btrfs/disk-io.h struct buffer_head *btrfs_read_dev_super(struct block_device *bdev);
bdev               58 fs/btrfs/disk-io.h int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num,
bdev             1234 fs/btrfs/extent-tree.c static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
bdev             1281 fs/btrfs/extent-tree.c 			ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
bdev             1298 fs/btrfs/extent-tree.c 		ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
bdev             1332 fs/btrfs/extent-tree.c 			if (!stripe->dev->bdev) {
bdev             1336 fs/btrfs/extent-tree.c 			req_q = bdev_get_queue(stripe->dev->bdev);
bdev             1340 fs/btrfs/extent-tree.c 			ret = btrfs_issue_discard(stripe->dev->bdev,
bdev             5565 fs/btrfs/extent-tree.c 	if (!blk_queue_discard(bdev_get_queue(device->bdev)))
bdev             5609 fs/btrfs/extent-tree.c 		ret = btrfs_issue_discard(device->bdev, start, len,
bdev             2233 fs/btrfs/extent_io.c 	if (!dev || !dev->bdev ||
bdev             2239 fs/btrfs/extent_io.c 	bio_set_dev(bio, dev->bdev);
bdev             2938 fs/btrfs/extent_io.c 			      struct block_device *bdev,
bdev             2984 fs/btrfs/extent_io.c 	bio_set_dev(bio, bdev);
bdev             3072 fs/btrfs/extent_io.c 	struct block_device *bdev;
bdev             3149 fs/btrfs/extent_io.c 		bdev = em->bdev;
bdev             3239 fs/btrfs/extent_io.c 					 pg_offset, bdev, bio,
bdev             3427 fs/btrfs/extent_io.c 	struct block_device *bdev;
bdev             3485 fs/btrfs/extent_io.c 		bdev = em->bdev;
bdev             3527 fs/btrfs/extent_io.c 					 bdev, &epd->bio,
bdev             3856 fs/btrfs/extent_io.c 	struct block_device *bdev = fs_info->fs_devices->latest_bdev;
bdev             3891 fs/btrfs/extent_io.c 					 p, offset, PAGE_SIZE, 0, bdev,
bdev              219 fs/btrfs/extent_map.c 	    prev->bdev == next->bdev &&
bdev               46 fs/btrfs/extent_map.h 		struct block_device *bdev;
bdev              952 fs/btrfs/file-item.c 	em->bdev = fs_info->fs_devices->latest_bdev;
bdev              670 fs/btrfs/file.c 			split->bdev = em->bdev;
bdev              683 fs/btrfs/file.c 			split->bdev = em->bdev;
bdev             2374 fs/btrfs/file.c 		hole_em->bdev = fs_info->fs_devices->latest_bdev;
bdev             5265 fs/btrfs/inode.c 			hole_em->bdev = fs_info->fs_devices->latest_bdev;
bdev             7042 fs/btrfs/inode.c 		em->bdev = fs_info->fs_devices->latest_bdev;
bdev             7058 fs/btrfs/inode.c 	em->bdev = fs_info->fs_devices->latest_bdev;
bdev             7317 fs/btrfs/inode.c 		em->bdev = NULL;
bdev             7695 fs/btrfs/inode.c 	em->bdev = root->fs_info->fs_devices->latest_bdev;
bdev             7743 fs/btrfs/inode.c 	bh_result->b_bdev = em->bdev;
bdev             7826 fs/btrfs/inode.c 	bh_result->b_bdev = em->bdev;
bdev             10557 fs/btrfs/inode.c 		em->bdev = fs_info->fs_devices->latest_bdev;
bdev             11046 fs/btrfs/inode.c 		sis->bdev = device->bdev;
bdev              509 fs/btrfs/ioctl.c 		if (!device->bdev)
bdev              511 fs/btrfs/ioctl.c 		q = bdev_get_queue(device->bdev);
bdev             1667 fs/btrfs/ioctl.c 		new_size = device->bdev->bd_inode->i_size;
bdev             1708 fs/btrfs/ioctl.c 	if (new_size > device->bdev->bd_inode->i_size) {
bdev             1100 fs/btrfs/raid56.c 	if (!stripe->dev->bdev)
bdev             1112 fs/btrfs/raid56.c 		if (last_end == disk_start && stripe->dev->bdev &&
bdev             1114 fs/btrfs/raid56.c 		    last->bi_disk == stripe->dev->bdev->bd_disk &&
bdev             1115 fs/btrfs/raid56.c 		    last->bi_partno == stripe->dev->bdev->bd_partno) {
bdev             1125 fs/btrfs/raid56.c 	bio_set_dev(bio, stripe->dev->bdev);
bdev             1375 fs/btrfs/raid56.c 		    stripe->dev->bdev &&
bdev             1376 fs/btrfs/raid56.c 		    bio->bi_disk == stripe->dev->bdev->bd_disk &&
bdev             1377 fs/btrfs/raid56.c 		    bio->bi_partno == stripe->dev->bdev->bd_partno) {
bdev              358 fs/btrfs/reada.c 		if (!dev->bdev)
bdev              421 fs/btrfs/reada.c 		if (!dev->bdev)
bdev             3267 fs/btrfs/relocation.c 	em->bdev = fs_info->fs_devices->latest_bdev;
bdev             1428 fs/btrfs/scrub.c 	if (!first_page->dev->bdev)
bdev             1432 fs/btrfs/scrub.c 	bio_set_dev(bio, first_page->dev->bdev);
bdev             1481 fs/btrfs/scrub.c 		if (page->dev->bdev == NULL) {
bdev             1489 fs/btrfs/scrub.c 		bio_set_dev(bio, page->dev->bdev);
bdev             1563 fs/btrfs/scrub.c 		if (!page_bad->dev->bdev) {
bdev             1570 fs/btrfs/scrub.c 		bio_set_dev(bio, page_bad->dev->bdev);
bdev             1663 fs/btrfs/scrub.c 		bio_set_dev(bio, sbio->dev->bdev);
bdev             2072 fs/btrfs/scrub.c 		bio_set_dev(bio, sbio->dev->bdev);
bdev             3454 fs/btrfs/scrub.c 		if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
bdev             4066 fs/btrfs/scrub.c 	    !bbio->stripes[0].dev->bdev) {
bdev             1479 fs/btrfs/super.c 	struct block_device *bdev = NULL;
bdev             1543 fs/btrfs/super.c 	bdev = fs_devices->latest_bdev;
bdev             1557 fs/btrfs/super.c 		snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
bdev             1951 fs/btrfs/super.c 		    !device->bdev ||
bdev              936 fs/btrfs/sysfs.c 	if (one_device && one_device->bdev) {
bdev              937 fs/btrfs/sysfs.c 		disk = one_device->bdev->bd_part;
bdev              949 fs/btrfs/sysfs.c 		if (!one_device->bdev)
bdev              951 fs/btrfs/sysfs.c 		disk = one_device->bdev->bd_part;
bdev              983 fs/btrfs/sysfs.c 		if (!dev->bdev)
bdev              989 fs/btrfs/sysfs.c 		disk = dev->bdev->bd_part;
bdev             1001 fs/btrfs/sysfs.c void btrfs_kobject_uevent(struct block_device *bdev, enum kobject_action action)
bdev             1005 fs/btrfs/sysfs.c 	ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
bdev             1008 fs/btrfs/sysfs.c 			action, kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
bdev             1009 fs/btrfs/sysfs.c 			&disk_to_dev(bdev->bd_disk)->kobj);
bdev               29 fs/btrfs/sysfs.h void btrfs_kobject_uevent(struct block_device *bdev, enum kobject_action action);
bdev              469 fs/btrfs/volumes.c 		      int flush, struct block_device **bdev,
bdev              474 fs/btrfs/volumes.c 	*bdev = blkdev_get_by_path(device_path, flags, holder);
bdev              476 fs/btrfs/volumes.c 	if (IS_ERR(*bdev)) {
bdev              477 fs/btrfs/volumes.c 		ret = PTR_ERR(*bdev);
bdev              482 fs/btrfs/volumes.c 		filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
bdev              483 fs/btrfs/volumes.c 	ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
bdev              485 fs/btrfs/volumes.c 		blkdev_put(*bdev, flags);
bdev              488 fs/btrfs/volumes.c 	invalidate_bdev(*bdev);
bdev              489 fs/btrfs/volumes.c 	*bh = btrfs_read_dev_super(*bdev);
bdev              492 fs/btrfs/volumes.c 		blkdev_put(*bdev, flags);
bdev              499 fs/btrfs/volumes.c 	*bdev = NULL;
bdev              553 fs/btrfs/volumes.c 	bdi = device->bdev->bd_bdi;
bdev              786 fs/btrfs/volumes.c 	struct block_device *bdev;
bdev              792 fs/btrfs/volumes.c 	if (device->bdev)
bdev              798 fs/btrfs/volumes.c 				    &bdev, &bh);
bdev              823 fs/btrfs/volumes.c 		if (bdev_read_only(bdev))
bdev              829 fs/btrfs/volumes.c 	q = bdev_get_queue(bdev);
bdev              833 fs/btrfs/volumes.c 	device->bdev = bdev;
bdev              849 fs/btrfs/volumes.c 	blkdev_put(bdev, flags);
bdev             1102 fs/btrfs/volumes.c 		if (device->bdev) {
bdev             1111 fs/btrfs/volumes.c 			if (device->bdev != path_bdev) {
bdev             1249 fs/btrfs/volumes.c 		if (device->bdev) {
bdev             1250 fs/btrfs/volumes.c 			blkdev_put(device->bdev, device->mode);
bdev             1251 fs/btrfs/volumes.c 			device->bdev = NULL;
bdev             1271 fs/btrfs/volumes.c 	fs_devices->latest_bdev = latest_dev->bdev;
bdev             1278 fs/btrfs/volumes.c 	if (!device->bdev)
bdev             1282 fs/btrfs/volumes.c 		sync_blockdev(device->bdev);
bdev             1283 fs/btrfs/volumes.c 		invalidate_bdev(device->bdev);
bdev             1286 fs/btrfs/volumes.c 	blkdev_put(device->bdev, device->mode);
bdev             1295 fs/btrfs/volumes.c 	if (device->bdev)
bdev             1393 fs/btrfs/volumes.c 	fs_devices->latest_bdev = latest_dev->bdev;
bdev             1439 fs/btrfs/volumes.c static int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr,
bdev             1447 fs/btrfs/volumes.c 	if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
bdev             1460 fs/btrfs/volumes.c 	*page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
bdev             1506 fs/btrfs/volumes.c 	struct block_device *bdev;
bdev             1521 fs/btrfs/volumes.c 	bdev = blkdev_get_by_path(path, flags, holder);
bdev             1522 fs/btrfs/volumes.c 	if (IS_ERR(bdev))
bdev             1523 fs/btrfs/volumes.c 		return ERR_CAST(bdev);
bdev             1525 fs/btrfs/volumes.c 	if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super)) {
bdev             1539 fs/btrfs/volumes.c 	blkdev_put(bdev, flags);
bdev             2083 fs/btrfs/volumes.c 		    && next_device->bdev)
bdev             2110 fs/btrfs/volumes.c 			(fs_info->sb->s_bdev == device->bdev))
bdev             2111 fs/btrfs/volumes.c 		fs_info->sb->s_bdev = next_device->bdev;
bdev             2113 fs/btrfs/volumes.c 	if (fs_info->fs_devices->latest_bdev == device->bdev)
bdev             2114 fs/btrfs/volumes.c 		fs_info->fs_devices->latest_bdev = next_device->bdev;
bdev             2237 fs/btrfs/volumes.c 	if (device->bdev) {
bdev             2253 fs/btrfs/volumes.c 		btrfs_scratch_superblocks(device->bdev, device->name->str);
bdev             2310 fs/btrfs/volumes.c 	if (srcdev->bdev)
bdev             2321 fs/btrfs/volumes.c 		btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str);
bdev             2363 fs/btrfs/volumes.c 	if (tgtdev->bdev)
bdev             2381 fs/btrfs/volumes.c 	btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str);
bdev             2395 fs/btrfs/volumes.c 	struct block_device *bdev;
bdev             2400 fs/btrfs/volumes.c 				    fs_info->bdev_holder, 0, &bdev, &bh);
bdev             2416 fs/btrfs/volumes.c 	blkdev_put(bdev, FMODE_READ);
bdev             2445 fs/btrfs/volumes.c 				     &device->dev_state) && !device->bdev)
bdev             2598 fs/btrfs/volumes.c 	struct block_device *bdev;
bdev             2611 fs/btrfs/volumes.c 	bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
bdev             2613 fs/btrfs/volumes.c 	if (IS_ERR(bdev))
bdev             2614 fs/btrfs/volumes.c 		return PTR_ERR(bdev);
bdev             2622 fs/btrfs/volumes.c 	filemap_write_and_wait(bdev->bd_inode->i_mapping);
bdev             2626 fs/btrfs/volumes.c 		if (device->bdev == bdev) {
bdev             2655 fs/btrfs/volumes.c 	q = bdev_get_queue(bdev);
bdev             2661 fs/btrfs/volumes.c 	device->total_bytes = round_down(i_size_read(bdev->bd_inode),
bdev             2666 fs/btrfs/volumes.c 	device->bdev = bdev;
bdev             2671 fs/btrfs/volumes.c 	set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
bdev             2802 fs/btrfs/volumes.c 	blkdev_put(bdev, FMODE_EXCL);
bdev             5546 fs/btrfs/volumes.c 		if (map->stripes[preferred_mirror].dev->bdev &&
bdev             5550 fs/btrfs/volumes.c 			if (map->stripes[i].dev->bdev &&
bdev             6423 fs/btrfs/volumes.c 			if (dev->bdev) {
bdev             6525 fs/btrfs/volumes.c 		(u_long)dev->bdev->bd_dev, rcu_str_deref(dev->name), dev->devid,
bdev             6527 fs/btrfs/volumes.c 	bio_set_dev(bio, dev->bdev);
bdev             6610 fs/btrfs/volumes.c 		if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
bdev             6996 fs/btrfs/volumes.c 		if (!device->bdev) {
bdev             7006 fs/btrfs/volumes.c 		if (!device->bdev &&
bdev             7210 fs/btrfs/volumes.c 			if (!dev || !dev->bdev ||
bdev             7610 fs/btrfs/volumes.c void btrfs_scratch_superblocks(struct block_device *bdev, const char *device_path)
bdev             7616 fs/btrfs/volumes.c 	if (!bdev)
bdev             7622 fs/btrfs/volumes.c 		if (btrfs_read_dev_one_super(bdev, copy_num, &bh))
bdev             7634 fs/btrfs/volumes.c 	btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
bdev               78 fs/btrfs/volumes.h 	struct block_device *bdev;
bdev              487 fs/btrfs/volumes.h void btrfs_scratch_superblocks(struct block_device *bdev, const char *device_path);
bdev              194 fs/buffer.c    __find_get_block_slow(struct block_device *bdev, sector_t block)
bdev              196 fs/buffer.c    	struct inode *bd_inode = bdev->bd_inode;
bdev              239 fs/buffer.c    		       bh->b_state, bh->b_size, bdev,
bdev              534 fs/buffer.c    void write_boundary_block(struct block_device *bdev,
bdev              537 fs/buffer.c    	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
bdev              878 fs/buffer.c    static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
bdev              881 fs/buffer.c    	loff_t sz = i_size_read(bdev->bd_inode);
bdev              894 fs/buffer.c    init_page_buffers(struct page *page, struct block_device *bdev,
bdev              900 fs/buffer.c    	sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode), size);
bdev              906 fs/buffer.c    			bh->b_bdev = bdev;
bdev              929 fs/buffer.c    grow_dev_page(struct block_device *bdev, sector_t block,
bdev              932 fs/buffer.c    	struct inode *inode = bdev->bd_inode;
bdev              956 fs/buffer.c    			end_block = init_page_buffers(page, bdev,
bdev              977 fs/buffer.c    	end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits,
bdev              993 fs/buffer.c    grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp)
bdev             1013 fs/buffer.c    			bdev);
bdev             1018 fs/buffer.c    	return grow_dev_page(bdev, block, index, size, sizebits, gfp);
bdev             1022 fs/buffer.c    __getblk_slow(struct block_device *bdev, sector_t block,
bdev             1026 fs/buffer.c    	if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
bdev             1031 fs/buffer.c    					bdev_logical_block_size(bdev));
bdev             1041 fs/buffer.c    		bh = __find_get_block(bdev, block, size);
bdev             1045 fs/buffer.c    		ret = grow_buffers(bdev, block, size, gfp);
bdev             1255 fs/buffer.c    lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
bdev             1265 fs/buffer.c    		if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
bdev             1290 fs/buffer.c    __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
bdev             1292 fs/buffer.c    	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
bdev             1296 fs/buffer.c    		bh = __find_get_block_slow(bdev, block);
bdev             1315 fs/buffer.c    __getblk_gfp(struct block_device *bdev, sector_t block,
bdev             1318 fs/buffer.c    	struct buffer_head *bh = __find_get_block(bdev, block, size);
bdev             1322 fs/buffer.c    		bh = __getblk_slow(bdev, block, size, gfp);
bdev             1330 fs/buffer.c    void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
bdev             1332 fs/buffer.c    	struct buffer_head *bh = __getblk(bdev, block, size);
bdev             1340 fs/buffer.c    void __breadahead_gfp(struct block_device *bdev, sector_t block, unsigned size,
bdev             1343 fs/buffer.c    	struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
bdev             1364 fs/buffer.c    __bread_gfp(struct block_device *bdev, sector_t block,
bdev             1367 fs/buffer.c    	struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
bdev             1574 fs/buffer.c    void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
bdev             1576 fs/buffer.c    	struct inode *bd_inode = bdev->bd_inode;
bdev             1892 fs/buffer.c    	bh->b_bdev = iomap->bdev;
bdev              683 fs/dax.c       static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
bdev              692 fs/dax.c       	rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
bdev              940 fs/dax.c       		struct block_device *bdev, struct writeback_control *wbc)
bdev              956 fs/dax.c       	dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
bdev              999 fs/dax.c       	rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff);
bdev             1047 fs/dax.c       static bool dax_range_is_aligned(struct block_device *bdev,
bdev             1050 fs/dax.c       	unsigned short sector_size = bdev_logical_block_size(bdev);
bdev             1060 fs/dax.c       int __dax_zero_page_range(struct block_device *bdev,
bdev             1064 fs/dax.c       	if (dax_range_is_aligned(bdev, offset, size)) {
bdev             1067 fs/dax.c       		return blkdev_issue_zeroout(bdev, start_sector,
bdev             1074 fs/dax.c       		rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
bdev             1096 fs/dax.c       	struct block_device *bdev = iomap->bdev;
bdev             1141 fs/dax.c       		ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
bdev             1320 fs/dax.c       			error = copy_user_dax(iomap.bdev, iomap.dax_dev,
bdev              429 fs/direct-io.c 	      struct block_device *bdev,
bdev              440 fs/direct-io.c 	bio_set_dev(bio, bdev);
bdev             1165 fs/direct-io.c 		      struct block_device *bdev, struct iov_iter *iter,
bdev             1188 fs/direct-io.c 		if (bdev)
bdev             1189 fs/direct-io.c 			blkbits = blksize_bits(bdev_logical_block_size(bdev));
bdev             1395 fs/direct-io.c 			     struct block_device *bdev, struct iov_iter *iter,
bdev             1408 fs/direct-io.c 	prefetch(&bdev->bd_disk->part_tbl);
bdev             1409 fs/direct-io.c 	prefetch(bdev->bd_queue);
bdev             1410 fs/direct-io.c 	prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES);
bdev             1412 fs/direct-io.c 	return do_blockdev_direct_IO(iocb, inode, bdev, iter, get_block,
bdev              823 fs/ext2/inode.c 	iomap->bdev = inode->i_sb->s_bdev;
bdev             3544 fs/ext4/inode.c 	iomap->bdev = inode->i_sb->s_bdev;
bdev              241 fs/ext4/readpage.c 	struct block_device *bdev = inode->i_sb->s_bdev;
bdev              386 fs/ext4/readpage.c 			bio_set_dev(bio, bdev);
bdev              886 fs/ext4/super.c 	struct block_device *bdev;
bdev              889 fs/ext4/super.c 	bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
bdev              890 fs/ext4/super.c 	if (IS_ERR(bdev))
bdev              892 fs/ext4/super.c 	return bdev;
bdev              896 fs/ext4/super.c 			__bdevname(dev, b), PTR_ERR(bdev));
bdev              903 fs/ext4/super.c static void ext4_blkdev_put(struct block_device *bdev)
bdev              905 fs/ext4/super.c 	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
bdev              910 fs/ext4/super.c 	struct block_device *bdev;
bdev              911 fs/ext4/super.c 	bdev = sbi->journal_bdev;
bdev              912 fs/ext4/super.c 	if (bdev) {
bdev              913 fs/ext4/super.c 		ext4_blkdev_put(bdev);
bdev             4840 fs/ext4/super.c 	struct block_device *bdev;
bdev             4844 fs/ext4/super.c 	bdev = ext4_blkdev_get(j_dev, sb);
bdev             4845 fs/ext4/super.c 	if (bdev == NULL)
bdev             4849 fs/ext4/super.c 	hblock = bdev_logical_block_size(bdev);
bdev             4858 fs/ext4/super.c 	set_blocksize(bdev, blocksize);
bdev             4859 fs/ext4/super.c 	if (!(bh = __bread(bdev, sb_block, blocksize))) {
bdev             4894 fs/ext4/super.c 	journal = jbd2_journal_init_dev(bdev, sb->s_bdev,
bdev             4913 fs/ext4/super.c 	EXT4_SB(sb)->journal_bdev = bdev;
bdev             4920 fs/ext4/super.c 	ext4_blkdev_put(bdev);
bdev              243 fs/f2fs/data.c 	struct block_device *bdev = sbi->sb->s_bdev;
bdev              251 fs/f2fs/data.c 				bdev = FDEV(i).bdev;
bdev              257 fs/f2fs/data.c 		bio_set_dev(bio, bdev);
bdev              260 fs/f2fs/data.c 	return bdev;
bdev             2754 fs/f2fs/data.c 	struct block_device *bdev = inode->i_sb->s_bdev;
bdev             2757 fs/f2fs/data.c 		if (bdev)
bdev             2758 fs/f2fs/data.c 			blkbits = blksize_bits(bdev_logical_block_size(bdev));
bdev              299 fs/f2fs/f2fs.h 	struct block_device *bdev;	/* bdev */
bdev             1086 fs/f2fs/f2fs.h 	struct block_device *bdev;
bdev             3628 fs/f2fs/f2fs.h static inline bool f2fs_bdev_support_discard(struct block_device *bdev)
bdev             3630 fs/f2fs/f2fs.h 	return blk_queue_discard(bdev_get_queue(bdev)) ||
bdev             3631 fs/f2fs/f2fs.h 	       bdev_is_zoned(bdev);
bdev             3642 fs/f2fs/f2fs.h 		if (f2fs_bdev_support_discard(FDEV(i).bdev))
bdev             3661 fs/f2fs/f2fs.h 		if (bdev_read_only(FDEV(i).bdev))
bdev              559 fs/f2fs/segment.c 				struct block_device *bdev)
bdev              569 fs/f2fs/segment.c 	bio_set_dev(bio, bdev);
bdev              573 fs/f2fs/segment.c 	trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
bdev              589 fs/f2fs/segment.c 		ret = __submit_flush_wait(sbi, FDEV(i).bdev);
bdev              764 fs/f2fs/segment.c 		ret = __submit_flush_wait(sbi, FDEV(i).bdev);
bdev              935 fs/f2fs/segment.c 		struct block_device *bdev, block_t lstart,
bdev              948 fs/f2fs/segment.c 	dc->bdev = bdev;
bdev              967 fs/f2fs/segment.c 				struct block_device *bdev, block_t lstart,
bdev              975 fs/f2fs/segment.c 	dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
bdev             1004 fs/f2fs/segment.c 	trace_f2fs_remove_discard(dc->bdev, dc->start, dc->len);
bdev             1110 fs/f2fs/segment.c 				struct block_device *bdev, block_t lstart,
bdev             1118 fs/f2fs/segment.c 	struct block_device *bdev = dc->bdev;
bdev             1119 fs/f2fs/segment.c 	struct request_queue *q = bdev_get_queue(bdev);
bdev             1135 fs/f2fs/segment.c 	trace_f2fs_issue_discard(bdev, dc->start, dc->len);
bdev             1165 fs/f2fs/segment.c 		err = __blkdev_issue_discard(bdev,
bdev             1216 fs/f2fs/segment.c 		__update_discard_tree_range(sbi, bdev, lstart, start, len);
bdev             1221 fs/f2fs/segment.c 				struct block_device *bdev, block_t lstart,
bdev             1241 fs/f2fs/segment.c 	dc = __attach_discard_cmd(sbi, bdev, lstart, start, len, parent,
bdev             1278 fs/f2fs/segment.c 			__insert_discard_tree(sbi, dc->bdev, blkaddr + 1,
bdev             1293 fs/f2fs/segment.c 				struct block_device *bdev, block_t lstart,
bdev             1301 fs/f2fs/segment.c 	struct request_queue *q = bdev_get_queue(bdev);
bdev             1344 fs/f2fs/segment.c 			prev_dc->bdev == bdev &&
bdev             1356 fs/f2fs/segment.c 			next_dc->bdev == bdev &&
bdev             1370 fs/f2fs/segment.c 			__insert_discard_tree(sbi, bdev, di.lstart, di.start,
bdev             1384 fs/f2fs/segment.c 		struct block_device *bdev, block_t blkstart, block_t blklen)
bdev             1388 fs/f2fs/segment.c 	if (!f2fs_bdev_support_discard(bdev))
bdev             1391 fs/f2fs/segment.c 	trace_f2fs_queue_discard(bdev, blkstart, blklen);
bdev             1399 fs/f2fs/segment.c 	__update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
bdev             1754 fs/f2fs/segment.c 		struct block_device *bdev, block_t blkstart, block_t blklen)
bdev             1775 fs/f2fs/segment.c 		if (sector & (bdev_zone_sectors(bdev) - 1) ||
bdev             1776 fs/f2fs/segment.c 				nr_sects != bdev_zone_sectors(bdev)) {
bdev             1782 fs/f2fs/segment.c 		trace_f2fs_issue_reset_zone(bdev, blkstart);
bdev             1783 fs/f2fs/segment.c 		return blkdev_reset_zones(bdev, sector, nr_sects, GFP_NOFS);
bdev             1787 fs/f2fs/segment.c 	return __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
bdev             1792 fs/f2fs/segment.c 		struct block_device *bdev, block_t blkstart, block_t blklen)
bdev             1795 fs/f2fs/segment.c 	if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev))
bdev             1796 fs/f2fs/segment.c 		return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
bdev             1798 fs/f2fs/segment.c 	return __queue_discard_cmd(sbi, bdev, blkstart, blklen);
bdev             1805 fs/f2fs/segment.c 	struct block_device *bdev;
bdev             1811 fs/f2fs/segment.c 	bdev = f2fs_target_device(sbi, blkstart, NULL);
bdev             1818 fs/f2fs/segment.c 			if (bdev2 != bdev) {
bdev             1819 fs/f2fs/segment.c 				err = __issue_discard_async(sbi, bdev,
bdev             1823 fs/f2fs/segment.c 				bdev = bdev2;
bdev             1837 fs/f2fs/segment.c 		err = __issue_discard_async(sbi, bdev, start, len);
bdev             1053 fs/f2fs/super.c 		blkdev_put(FDEV(i).bdev, FMODE_EXCL);
bdev             2867 fs/f2fs/super.c 	struct block_device *bdev = FDEV(devi).bdev;
bdev             2868 fs/f2fs/super.c 	sector_t nr_sectors = bdev->bd_part->nr_sects;
bdev             2879 fs/f2fs/super.c 				SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
bdev             2881 fs/f2fs/super.c 	sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
bdev             2888 fs/f2fs/super.c 	if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
bdev             2911 fs/f2fs/super.c 		err = blkdev_report_zones(bdev, sector, zones, &nr_zones);
bdev             3063 fs/f2fs/super.c 			FDEV(0).bdev =
bdev             3083 fs/f2fs/super.c 			FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
bdev             3086 fs/f2fs/super.c 		if (IS_ERR(FDEV(i).bdev))
bdev             3087 fs/f2fs/super.c 			return PTR_ERR(FDEV(i).bdev);
bdev             3093 fs/f2fs/super.c 		if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
bdev             3098 fs/f2fs/super.c 		if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
bdev             3109 fs/f2fs/super.c 				  bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
bdev              931 fs/gfs2/bmap.c 	iomap->bdev = inode->i_sb->s_bdev;
bdev               25 fs/internal.h  extern int __sync_blockdev(struct block_device *bdev, int wait);
bdev               32 fs/internal.h  static inline int __sync_blockdev(struct block_device *bdev, int wait)
bdev              273 fs/iomap/buffered-io.c 		bio_set_dev(ctx->bio, iomap->bdev);
bdev              544 fs/iomap/buffered-io.c 	bio_set_dev(&bio, iomap->bdev);
bdev              939 fs/iomap/buffered-io.c 	return __dax_zero_page_range(iomap->bdev, iomap->dax_dev,
bdev               69 fs/iomap/direct-io.c 	dio->submit.last_queue = bdev_get_queue(iomap->bdev);
bdev              186 fs/iomap/direct-io.c 	bio_set_dev(bio, iomap->bdev);
bdev              201 fs/iomap/direct-io.c 	unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
bdev              234 fs/iomap/direct-io.c 		    blk_queue_fua(bdev_get_queue(iomap->bdev)))
bdev              264 fs/iomap/direct-io.c 		bio_set_dev(bio, iomap->bdev);
bdev              109 fs/iomap/swapfile.c 	if (iomap->bdev != isi->sis->bdev) {
bdev              549 fs/isofs/inode.c 	struct block_device *bdev = sb->s_bdev;
bdev              558 fs/isofs/inode.c 		i = ioctl_by_bdev(bdev, CDROMREADTOCENTRY, (unsigned long) &Te);
bdev              569 fs/isofs/inode.c 	i = ioctl_by_bdev(bdev, CDROMMULTISESSION, (unsigned long) &ms_info);
bdev             1111 fs/jbd2/journal.c static journal_t *journal_init_common(struct block_device *bdev,
bdev             1156 fs/jbd2/journal.c 	journal->j_dev = bdev;
bdev             1208 fs/jbd2/journal.c journal_t *jbd2_journal_init_dev(struct block_device *bdev,
bdev             1214 fs/jbd2/journal.c 	journal = journal_init_common(bdev, fs_dev, start, len, blocksize);
bdev              331 fs/jbd2/revoke.c 	struct block_device *bdev;
bdev              344 fs/jbd2/revoke.c 	bdev = journal->j_fs_dev;
bdev              348 fs/jbd2/revoke.c 		bh = __find_get_block(bdev, blocknr, journal->j_blocksize);
bdev              358 fs/jbd2/revoke.c 		bh2 = __find_get_block(bdev, blocknr, journal->j_blocksize);
bdev             1069 fs/jfs/jfs_logmgr.c 	struct block_device *bdev;
bdev             1081 fs/jfs/jfs_logmgr.c 		if (log->bdev->bd_dev == sbi->logdev) {
bdev             1111 fs/jfs/jfs_logmgr.c 	bdev = blkdev_get_by_dev(sbi->logdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
bdev             1113 fs/jfs/jfs_logmgr.c 	if (IS_ERR(bdev)) {
bdev             1114 fs/jfs/jfs_logmgr.c 		rc = PTR_ERR(bdev);
bdev             1118 fs/jfs/jfs_logmgr.c 	log->bdev = bdev;
bdev             1152 fs/jfs/jfs_logmgr.c 	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
bdev             1173 fs/jfs/jfs_logmgr.c 	log->bdev = sb->s_bdev;
bdev             1446 fs/jfs/jfs_logmgr.c 	struct block_device *bdev;
bdev             1492 fs/jfs/jfs_logmgr.c 	bdev = log->bdev;
bdev             1495 fs/jfs/jfs_logmgr.c 	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
bdev             1985 fs/jfs/jfs_logmgr.c 	bio_set_dev(bio, log->bdev);
bdev             2129 fs/jfs/jfs_logmgr.c 	bio_set_dev(bio, log->bdev);
bdev              359 fs/jfs/jfs_logmgr.h 	struct block_device *bdev; /* 4: log lv pointer */
bdev              427 fs/jfs/jfs_mount.c 		j_sb->s_logdev = cpu_to_le32(new_encode_dev(sbi->log->bdev->bd_dev));
bdev               71 fs/mpage.c     mpage_alloc(struct block_device *bdev,
bdev               87 fs/mpage.c     		bio_set_dev(bio, bdev);
bdev              170 fs/mpage.c     	struct block_device *bdev = NULL;
bdev              218 fs/mpage.c     		bdev = map_bh->b_bdev;
bdev              273 fs/mpage.c     		bdev = map_bh->b_bdev;
bdev              302 fs/mpage.c     			if (!bdev_read_page(bdev, blocks[0] << (blkbits - 9),
bdev              306 fs/mpage.c     		args->bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
bdev              511 fs/mpage.c     	struct block_device *bdev = NULL;
bdev              556 fs/mpage.c     			bdev = bh->b_bdev;
bdev              596 fs/mpage.c     		bdev = map_bh.b_bdev;
bdev              632 fs/mpage.c     			if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9),
bdev              636 fs/mpage.c     		bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
bdev              119 fs/nfs/blocklayout/blocklayout.c bl_alloc_init_bio(int npg, struct block_device *bdev, sector_t disk_sector,
bdev              133 fs/nfs/blocklayout/blocklayout.c 		bio_set_dev(bio, bdev);
bdev              179 fs/nfs/blocklayout/blocklayout.c 		bio = bl_alloc_init_bio(npg, map->bdev,
bdev               98 fs/nfs/blocklayout/blocklayout.h 	struct block_device		*bdev;
bdev              111 fs/nfs/blocklayout/blocklayout.h 	struct block_device		*bdev;
bdev               28 fs/nfs/blocklayout/dev.c 				dev->bdev->bd_disk->fops->pr_ops;
bdev               31 fs/nfs/blocklayout/dev.c 			error = ops->pr_register(dev->bdev, dev->pr_key, 0,
bdev               37 fs/nfs/blocklayout/dev.c 		if (dev->bdev)
bdev               38 fs/nfs/blocklayout/dev.c 			blkdev_put(dev->bdev, FMODE_READ | FMODE_WRITE);
bdev              172 fs/nfs/blocklayout/dev.c 	map->bdev = dev->bdev;
bdev              239 fs/nfs/blocklayout/dev.c 	struct block_device *bdev;
bdev              246 fs/nfs/blocklayout/dev.c 	bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_WRITE, NULL);
bdev              247 fs/nfs/blocklayout/dev.c 	if (IS_ERR(bdev)) {
bdev              249 fs/nfs/blocklayout/dev.c 			MAJOR(dev), MINOR(dev), PTR_ERR(bdev));
bdev              250 fs/nfs/blocklayout/dev.c 		return PTR_ERR(bdev);
bdev              252 fs/nfs/blocklayout/dev.c 	d->bdev = bdev;
bdev              255 fs/nfs/blocklayout/dev.c 	d->len = i_size_read(d->bdev->bd_inode);
bdev              259 fs/nfs/blocklayout/dev.c 		d->bdev->bd_disk->disk_name);
bdev              311 fs/nfs/blocklayout/dev.c 	struct block_device *bdev;
bdev              319 fs/nfs/blocklayout/dev.c 	bdev = blkdev_get_by_path(devname, FMODE_READ | FMODE_WRITE, NULL);
bdev              320 fs/nfs/blocklayout/dev.c 	if (IS_ERR(bdev)) {
bdev              322 fs/nfs/blocklayout/dev.c 			devname, PTR_ERR(bdev));
bdev              326 fs/nfs/blocklayout/dev.c 	return bdev;
bdev              336 fs/nfs/blocklayout/dev.c 	struct block_device *bdev;
bdev              346 fs/nfs/blocklayout/dev.c 	bdev = blkdev_get_by_path(devname, FMODE_READ | FMODE_WRITE, NULL);
bdev              348 fs/nfs/blocklayout/dev.c 	return bdev;
bdev              356 fs/nfs/blocklayout/dev.c 	struct block_device *bdev;
bdev              363 fs/nfs/blocklayout/dev.c 	bdev = bl_open_dm_mpath_udev_path(v);
bdev              364 fs/nfs/blocklayout/dev.c 	if (IS_ERR(bdev))
bdev              365 fs/nfs/blocklayout/dev.c 		bdev = bl_open_udev_path(v);
bdev              366 fs/nfs/blocklayout/dev.c 	if (IS_ERR(bdev))
bdev              367 fs/nfs/blocklayout/dev.c 		return PTR_ERR(bdev);
bdev              368 fs/nfs/blocklayout/dev.c 	d->bdev = bdev;
bdev              370 fs/nfs/blocklayout/dev.c 	d->len = i_size_read(d->bdev->bd_inode);
bdev              375 fs/nfs/blocklayout/dev.c 		d->bdev->bd_disk->disk_name, d->pr_key);
bdev              377 fs/nfs/blocklayout/dev.c 	ops = d->bdev->bd_disk->fops->pr_ops;
bdev              380 fs/nfs/blocklayout/dev.c 				d->bdev->bd_disk->disk_name);
bdev              385 fs/nfs/blocklayout/dev.c 	error = ops->pr_register(d->bdev, 0, d->pr_key, true);
bdev              388 fs/nfs/blocklayout/dev.c 				d->bdev->bd_disk->disk_name);
bdev              396 fs/nfs/blocklayout/dev.c 	blkdev_put(d->bdev, FMODE_READ | FMODE_WRITE);
bdev              214 fs/nfsd/blocklayout.c static int nfsd4_scsi_identify_device(struct block_device *bdev,
bdev              217 fs/nfsd/blocklayout.c 	struct request_queue *q = bdev->bd_disk->queue;
bdev              408 fs/nfsd/blocklayout.c 	struct block_device *bdev = ls->ls_file->nf_file->f_path.mnt->mnt_sb->s_bdev;
bdev              410 fs/nfsd/blocklayout.c 	bdev->bd_disk->fops->pr_ops->pr_preempt(bdev, NFSD_MDS_PR_KEY,
bdev             1186 fs/nilfs2/super.c 	struct block_device *bdev;
bdev             1276 fs/nilfs2/super.c 	sd.bdev = blkdev_get_by_path(dev_name, mode, fs_type);
bdev             1277 fs/nilfs2/super.c 	if (IS_ERR(sd.bdev))
bdev             1278 fs/nilfs2/super.c 		return ERR_CAST(sd.bdev);
bdev             1292 fs/nilfs2/super.c 	mutex_lock(&sd.bdev->bd_fsfreeze_mutex);
bdev             1293 fs/nilfs2/super.c 	if (sd.bdev->bd_fsfreeze_count > 0) {
bdev             1294 fs/nilfs2/super.c 		mutex_unlock(&sd.bdev->bd_fsfreeze_mutex);
bdev             1299 fs/nilfs2/super.c 		 sd.bdev);
bdev             1300 fs/nilfs2/super.c 	mutex_unlock(&sd.bdev->bd_fsfreeze_mutex);
bdev             1311 fs/nilfs2/super.c 		snprintf(s->s_id, sizeof(s->s_id), "%pg", sd.bdev);
bdev             1312 fs/nilfs2/super.c 		sb_set_blocksize(s, block_size(sd.bdev));
bdev             1348 fs/nilfs2/super.c 		blkdev_put(sd.bdev, mode);
bdev             1357 fs/nilfs2/super.c 		blkdev_put(sd.bdev, mode);
bdev              791 fs/quota/quota.c 	struct block_device *bdev;
bdev              797 fs/quota/quota.c 	bdev = lookup_bdev(tmp->name);
bdev              799 fs/quota/quota.c 	if (IS_ERR(bdev))
bdev              800 fs/quota/quota.c 		return ERR_CAST(bdev);
bdev              802 fs/quota/quota.c 		sb = get_super_exclusive_thawed(bdev);
bdev              804 fs/quota/quota.c 		sb = get_super_thawed(bdev);
bdev              806 fs/quota/quota.c 		sb = get_super(bdev);
bdev              807 fs/quota/quota.c 	bdput(bdev);
bdev              743 fs/super.c     static struct super_block *__get_super(struct block_device *bdev, bool excl)
bdev              747 fs/super.c     	if (!bdev)
bdev              755 fs/super.c     		if (sb->s_bdev == bdev) {
bdev              786 fs/super.c     struct super_block *get_super(struct block_device *bdev)
bdev              788 fs/super.c     	return __get_super(bdev, false);
bdev              792 fs/super.c     static struct super_block *__get_super_thawed(struct block_device *bdev,
bdev              796 fs/super.c     		struct super_block *s = __get_super(bdev, excl);
bdev              818 fs/super.c     struct super_block *get_super_thawed(struct block_device *bdev)
bdev              820 fs/super.c     	return __get_super_thawed(bdev, false);
bdev              833 fs/super.c     struct super_block *get_super_exclusive_thawed(struct block_device *bdev)
bdev              835 fs/super.c     	return __get_super_thawed(bdev, true);
bdev              847 fs/super.c     struct super_block *get_active_super(struct block_device *bdev)
bdev              851 fs/super.c     	if (!bdev)
bdev              859 fs/super.c     		if (sb->s_bdev == bdev) {
bdev             1281 fs/super.c     	struct block_device *bdev;
bdev             1292 fs/super.c     	bdev = blkdev_get_by_path(fc->source, mode, fc->fs_type);
bdev             1293 fs/super.c     	if (IS_ERR(bdev)) {
bdev             1295 fs/super.c     		return PTR_ERR(bdev);
bdev             1302 fs/super.c     	mutex_lock(&bdev->bd_fsfreeze_mutex);
bdev             1303 fs/super.c     	if (bdev->bd_fsfreeze_count > 0) {
bdev             1304 fs/super.c     		mutex_unlock(&bdev->bd_fsfreeze_mutex);
bdev             1305 fs/super.c     		warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
bdev             1306 fs/super.c     		blkdev_put(bdev, mode);
bdev             1311 fs/super.c     	fc->sget_key = bdev;
bdev             1313 fs/super.c     	mutex_unlock(&bdev->bd_fsfreeze_mutex);
bdev             1315 fs/super.c     		blkdev_put(bdev, mode);
bdev             1322 fs/super.c     			warnf(fc, "%pg: Can't mount, would change RO state", bdev);
bdev             1324 fs/super.c     			blkdev_put(bdev, mode);
bdev             1336 fs/super.c     		blkdev_put(bdev, mode);
bdev             1340 fs/super.c     		snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
bdev             1341 fs/super.c     		sb_set_blocksize(s, block_size(bdev));
bdev             1349 fs/super.c     		bdev->bd_super = s;
bdev             1367 fs/super.c     	struct block_device *bdev;
bdev             1375 fs/super.c     	bdev = blkdev_get_by_path(dev_name, mode, fs_type);
bdev             1376 fs/super.c     	if (IS_ERR(bdev))
bdev             1377 fs/super.c     		return ERR_CAST(bdev);
bdev             1384 fs/super.c     	mutex_lock(&bdev->bd_fsfreeze_mutex);
bdev             1385 fs/super.c     	if (bdev->bd_fsfreeze_count > 0) {
bdev             1386 fs/super.c     		mutex_unlock(&bdev->bd_fsfreeze_mutex);
bdev             1391 fs/super.c     		 bdev);
bdev             1392 fs/super.c     	mutex_unlock(&bdev->bd_fsfreeze_mutex);
bdev             1411 fs/super.c     		blkdev_put(bdev, mode);
bdev             1415 fs/super.c     		snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
bdev             1416 fs/super.c     		sb_set_blocksize(s, block_size(bdev));
bdev             1424 fs/super.c     		bdev->bd_super = s;
bdev             1432 fs/super.c     	blkdev_put(bdev, mode);
bdev             1440 fs/super.c     	struct block_device *bdev = sb->s_bdev;
bdev             1443 fs/super.c     	bdev->bd_super = NULL;
bdev             1445 fs/super.c     	sync_blockdev(bdev);
bdev             1447 fs/super.c     	blkdev_put(bdev, mode | FMODE_EXCL);
bdev               83 fs/sync.c      static void fdatawrite_one_bdev(struct block_device *bdev, void *arg)
bdev               85 fs/sync.c      	filemap_fdatawrite(bdev->bd_inode->i_mapping);
bdev               88 fs/sync.c      static void fdatawait_one_bdev(struct block_device *bdev, void *arg)
bdev               95 fs/sync.c      	filemap_fdatawait_keep_errors(bdev->bd_inode->i_mapping);
bdev               32 fs/udf/lowlevel.c 	struct block_device *bdev = sb->s_bdev;
bdev               37 fs/udf/lowlevel.c 	i = ioctl_by_bdev(bdev, CDROMMULTISESSION, (unsigned long)&ms_info);
bdev               52 fs/udf/lowlevel.c 	struct block_device *bdev = sb->s_bdev;
bdev               59 fs/udf/lowlevel.c 	if (ioctl_by_bdev(bdev, CDROM_LAST_WRITTEN, (unsigned long) &lblock) ||
bdev               61 fs/udf/lowlevel.c 		lblock = i_size_read(bdev->bd_inode) >> sb->s_blocksize_bits;
bdev              699 fs/xfs/xfs_aops.c 	struct block_device	*bdev,
bdev              707 fs/xfs/xfs_aops.c 	bio_set_dev(bio, bdev);
bdev              766 fs/xfs/xfs_aops.c 	struct block_device	*bdev = xfs_find_bdev_for_inode(inode);
bdev              783 fs/xfs/xfs_aops.c 				wpc->imap.br_state, offset, bdev, sector, wbc);
bdev             1184 fs/xfs/xfs_aops.c 	sis->bdev = xfs_find_bdev_for_inode(file_inode(swap_file));
bdev               14 fs/xfs/xfs_bio_io.c 	struct block_device	*bdev,
bdev               30 fs/xfs/xfs_bio_io.c 	bio_set_dev(bio, bdev);
bdev             1742 fs/xfs/xfs_buf.c 	struct block_device	*bdev)
bdev             1744 fs/xfs/xfs_buf.c 	return xfs_setsize_buftarg(btp, bdev_logical_block_size(bdev));
bdev             1750 fs/xfs/xfs_buf.c 	struct block_device	*bdev,
bdev             1758 fs/xfs/xfs_buf.c 	btp->bt_dev =  bdev->bd_dev;
bdev             1759 fs/xfs/xfs_buf.c 	btp->bt_bdev = bdev;
bdev             1762 fs/xfs/xfs_buf.c 	if (xfs_setsize_buftarg_early(btp, bdev))
bdev               30 fs/xfs/xfs_discard.c 	struct block_device	*bdev = mp->m_ddev_targp->bt_bdev;
bdev              112 fs/xfs/xfs_discard.c 		error = blkdev_issue_discard(bdev, dbno, dlen, GFP_NOFS, 0);
bdev               80 fs/xfs/xfs_iomap.c 	iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
bdev              102 fs/xfs/xfs_iomap.c 	iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
bdev              222 fs/xfs/xfs_linux.h int xfs_rw_bdev(struct block_device *bdev, sector_t sector, unsigned int count,
bdev              648 fs/xfs/xfs_super.c 	struct block_device	*bdev)
bdev              650 fs/xfs/xfs_super.c 	if (bdev)
bdev              651 fs/xfs/xfs_super.c 		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
bdev               74 include/drm/drm_gem_vram_helper.h 						struct ttm_bo_device *bdev,
bdev               89 include/drm/drm_gem_vram_helper.h 				  struct ttm_bo_device *bdev,
bdev               44 include/drm/drm_vram_mm_helper.h 	struct ttm_bo_device bdev;
bdev               58 include/drm/drm_vram_mm_helper.h 	struct ttm_bo_device *bdev)
bdev               60 include/drm/drm_vram_mm_helper.h 	return container_of(bdev, struct drm_vram_mm, bdev);
bdev              179 include/drm/ttm/ttm_bo_api.h 	struct ttm_bo_device *bdev;
bdev              424 include/drm/ttm/ttm_bo_api.h int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev);
bdev              431 include/drm/ttm/ttm_bo_api.h void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched);
bdev              478 include/drm/ttm/ttm_bo_api.h size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
bdev              481 include/drm/ttm/ttm_bo_api.h size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
bdev              520 include/drm/ttm/ttm_bo_api.h int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
bdev              569 include/drm/ttm/ttm_bo_api.h int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo,
bdev              595 include/drm/ttm/ttm_bo_api.h int ttm_bo_create(struct ttm_bo_device *bdev, unsigned long size,
bdev              615 include/drm/ttm/ttm_bo_api.h int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
bdev              644 include/drm/ttm/ttm_bo_api.h int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
bdev              664 include/drm/ttm/ttm_bo_api.h int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
bdev              735 include/drm/ttm/ttm_bo_api.h 		struct ttm_bo_device *bdev);
bdev              761 include/drm/ttm/ttm_bo_api.h ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
bdev              767 include/drm/ttm/ttm_bo_api.h void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
bdev              171 include/drm/ttm/ttm_bo_driver.h 	struct ttm_bo_device *bdev;
bdev              271 include/drm/ttm/ttm_bo_driver.h 	int (*invalidate_caches)(struct ttm_bo_device *bdev, uint32_t flags);
bdev              272 include/drm/ttm/ttm_bo_driver.h 	int (*init_mem_type)(struct ttm_bo_device *bdev, uint32_t type,
bdev              354 include/drm/ttm/ttm_bo_driver.h 	int (*io_mem_reserve)(struct ttm_bo_device *bdev,
bdev              356 include/drm/ttm/ttm_bo_driver.h 	void (*io_mem_free)(struct ttm_bo_device *bdev,
bdev              559 include/drm/ttm/ttm_bo_driver.h bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
bdev              589 include/drm/ttm/ttm_bo_driver.h int ttm_bo_device_release(struct ttm_bo_device *bdev);
bdev              606 include/drm/ttm/ttm_bo_driver.h int ttm_bo_device_init(struct ttm_bo_device *bdev,
bdev              780 include/drm/ttm/ttm_bo_driver.h 	spin_lock(&bo->bdev->glob->lru_lock);
bdev              785 include/drm/ttm/ttm_bo_driver.h 	spin_unlock(&bo->bdev->glob->lru_lock);
bdev              793 include/drm/ttm/ttm_bo_driver.h int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
bdev              795 include/drm/ttm/ttm_bo_driver.h void ttm_mem_io_free(struct ttm_bo_device *bdev,
bdev              106 include/drm/ttm/ttm_tt.h 	struct ttm_bo_device *bdev;
bdev              485 include/linux/bio.h #define bio_set_dev(bio, bdev) 			\
bdev              487 include/linux/bio.h 	if ((bio)->bi_disk != (bdev)->bd_disk)	\
bdev              489 include/linux/bio.h 	(bio)->bi_disk = (bdev)->bd_disk;	\
bdev              490 include/linux/bio.h 	(bio)->bi_partno = (bdev)->bd_partno;	\
bdev              359 include/linux/blkdev.h extern unsigned int blkdev_nr_zones(struct block_device *bdev);
bdev              360 include/linux/blkdev.h extern int blkdev_report_zones(struct block_device *bdev,
bdev              363 include/linux/blkdev.h extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors,
bdev              367 include/linux/blkdev.h extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
bdev              369 include/linux/blkdev.h extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
bdev              374 include/linux/blkdev.h static inline unsigned int blkdev_nr_zones(struct block_device *bdev)
bdev              384 include/linux/blkdev.h static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
bdev              391 include/linux/blkdev.h static inline int blkdev_reset_zones_ioctl(struct block_device *bdev,
bdev              898 include/linux/blkdev.h static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
bdev              900 include/linux/blkdev.h 	return bdev->bd_disk->queue;	/* this is never NULL */
bdev             1096 include/linux/blkdev.h extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
bdev             1098 include/linux/blkdev.h extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
bdev             1211 include/linux/blkdev.h extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
bdev             1216 include/linux/blkdev.h extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
bdev             1218 include/linux/blkdev.h extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
bdev             1225 include/linux/blkdev.h extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
bdev             1228 include/linux/blkdev.h extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
bdev             1307 include/linux/blkdev.h static inline unsigned int bdev_logical_block_size(struct block_device *bdev)
bdev             1309 include/linux/blkdev.h 	return queue_logical_block_size(bdev_get_queue(bdev));
bdev             1317 include/linux/blkdev.h static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
bdev             1319 include/linux/blkdev.h 	return queue_physical_block_size(bdev_get_queue(bdev));
bdev             1327 include/linux/blkdev.h static inline int bdev_io_min(struct block_device *bdev)
bdev             1329 include/linux/blkdev.h 	return queue_io_min(bdev_get_queue(bdev));
bdev             1337 include/linux/blkdev.h static inline int bdev_io_opt(struct block_device *bdev)
bdev             1339 include/linux/blkdev.h 	return queue_io_opt(bdev_get_queue(bdev));
bdev             1359 include/linux/blkdev.h static inline int bdev_alignment_offset(struct block_device *bdev)
bdev             1361 include/linux/blkdev.h 	struct request_queue *q = bdev_get_queue(bdev);
bdev             1366 include/linux/blkdev.h 	if (bdev != bdev->bd_contains)
bdev             1367 include/linux/blkdev.h 		return bdev->bd_part->alignment_offset;
bdev             1403 include/linux/blkdev.h static inline int bdev_discard_alignment(struct block_device *bdev)
bdev             1405 include/linux/blkdev.h 	struct request_queue *q = bdev_get_queue(bdev);
bdev             1407 include/linux/blkdev.h 	if (bdev != bdev->bd_contains)
bdev             1408 include/linux/blkdev.h 		return bdev->bd_part->discard_alignment;
bdev             1413 include/linux/blkdev.h static inline unsigned int bdev_write_same(struct block_device *bdev)
bdev             1415 include/linux/blkdev.h 	struct request_queue *q = bdev_get_queue(bdev);
bdev             1423 include/linux/blkdev.h static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
bdev             1425 include/linux/blkdev.h 	struct request_queue *q = bdev_get_queue(bdev);
bdev             1433 include/linux/blkdev.h static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
bdev             1435 include/linux/blkdev.h 	struct request_queue *q = bdev_get_queue(bdev);
bdev             1443 include/linux/blkdev.h static inline bool bdev_is_zoned(struct block_device *bdev)
bdev             1445 include/linux/blkdev.h 	struct request_queue *q = bdev_get_queue(bdev);
bdev             1453 include/linux/blkdev.h static inline sector_t bdev_zone_sectors(struct block_device *bdev)
bdev             1455 include/linux/blkdev.h 	struct request_queue *q = bdev_get_queue(bdev);
bdev             1485 include/linux/blkdev.h static inline unsigned int block_size(struct block_device *bdev)
bdev             1487 include/linux/blkdev.h 	return bdev->bd_block_size;
bdev             1560 include/linux/blkdev.h struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
bdev             1562 include/linux/blkdev.h 	return blk_get_integrity(bdev->bd_disk);
bdev             1815 include/linux/blkdev.h static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
bdev               81 include/linux/blktrace_api.h 			   struct block_device *bdev,
bdev               91 include/linux/blktrace_api.h # define blk_trace_ioctl(bdev, cmd, arg)		(-ENOTTY)
bdev               94 include/linux/blktrace_api.h # define blk_trace_setup(q, name, dev, bdev, arg)	(-ENOTTY)
bdev              175 include/linux/buffer_head.h void clean_bdev_aliases(struct block_device *bdev, sector_t block,
bdev              185 include/linux/buffer_head.h struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
bdev              187 include/linux/buffer_head.h struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
bdev              206 include/linux/buffer_head.h void write_boundary_block(struct block_device *bdev,
bdev              377 include/linux/buffer_head.h static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
bdev              381 include/linux/buffer_head.h 	return __getblk_gfp(bdev, block, size, 0);
bdev              384 include/linux/buffer_head.h static inline struct buffer_head *__getblk(struct block_device *bdev,
bdev              388 include/linux/buffer_head.h 	return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
bdev              402 include/linux/buffer_head.h __bread(struct block_device *bdev, sector_t block, unsigned size)
bdev              404 include/linux/buffer_head.h 	return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
bdev               98 include/linux/cdrom.h extern int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
bdev              101 include/linux/cdrom.h extern int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
bdev               39 include/linux/cmdline-parser.h 					 const char *bdev);
bdev              115 include/linux/dax.h bool __bdev_dax_supported(struct block_device *bdev, int blocksize);
bdev              116 include/linux/dax.h static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize)
bdev              118 include/linux/dax.h 	return __bdev_dax_supported(bdev, blocksize);
bdev              122 include/linux/dax.h 		struct block_device *bdev, int blocksize, sector_t start,
bdev              125 include/linux/dax.h 		struct block_device *bdev, int blocksize, sector_t start,
bdev              128 include/linux/dax.h 	return __generic_fsdax_supported(dax_dev, bdev, blocksize, start,
bdev              142 include/linux/dax.h struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev);
bdev              144 include/linux/dax.h 		struct block_device *bdev, struct writeback_control *wbc);
bdev              150 include/linux/dax.h static inline bool bdev_dax_supported(struct block_device *bdev,
bdev              157 include/linux/dax.h 		struct block_device *bdev, int blocksize, sector_t start,
bdev              172 include/linux/dax.h static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
bdev              183 include/linux/dax.h 		struct block_device *bdev, struct writeback_control *wbc)
bdev              206 include/linux/dax.h bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
bdev              225 include/linux/dax.h int __dax_zero_page_range(struct block_device *bdev,
bdev              229 include/linux/dax.h static inline int __dax_zero_page_range(struct block_device *bdev,
bdev               94 include/linux/device-mapper.h typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
bdev              148 include/linux/device-mapper.h 	struct block_device *bdev;
bdev               24 include/linux/dm-bufio.h dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
bdev               18 include/linux/dm-io.h 	struct block_device *bdev;
bdev             2574 include/linux/fs.h extern struct block_device *bdgrab(struct block_device *bdev);
bdev             2580 include/linux/fs.h extern int sync_blockdev(struct block_device *bdev);
bdev             2585 include/linux/fs.h extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
bdev             2596 include/linux/fs.h static inline int sync_blockdev(struct block_device *bdev) { return 0; }
bdev             2597 include/linux/fs.h static inline void kill_bdev(struct block_device *bdev) {}
bdev             2598 include/linux/fs.h static inline void invalidate_bdev(struct block_device *bdev) {}
bdev             2605 include/linux/fs.h static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb)
bdev             2631 include/linux/fs.h extern int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder);
bdev             2636 include/linux/fs.h extern struct block_device *bd_start_claiming(struct block_device *bdev,
bdev             2638 include/linux/fs.h extern void bd_finish_claiming(struct block_device *bdev,
bdev             2640 include/linux/fs.h extern void bd_abort_claiming(struct block_device *bdev,
bdev             2642 include/linux/fs.h extern void blkdev_put(struct block_device *bdev, fmode_t mode);
bdev             2643 include/linux/fs.h extern int __blkdev_reread_part(struct block_device *bdev);
bdev             2644 include/linux/fs.h extern int blkdev_reread_part(struct block_device *bdev);
bdev             2647 include/linux/fs.h extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
bdev             2648 include/linux/fs.h extern void bd_unlink_disk_holder(struct block_device *bdev,
bdev             2651 include/linux/fs.h static inline int bd_link_disk_holder(struct block_device *bdev,
bdev             2656 include/linux/fs.h static inline void bd_unlink_disk_holder(struct block_device *bdev,
bdev             2699 include/linux/fs.h extern const char *bdevname(struct block_device *bdev, char *buffer);
bdev             2715 include/linux/fs.h 		struct block_device *bdev, bool verbose);
bdev             3167 include/linux/fs.h 			     struct block_device *bdev, struct iov_iter *iter,
bdev             3286 include/linux/fs.h extern struct super_block *get_super_exclusive_thawed(struct block_device *bdev);
bdev             3287 include/linux/fs.h extern struct super_block *get_active_super(struct block_device *bdev);
bdev              441 include/linux/genhd.h extern void set_device_ro(struct block_device *bdev, int flag);
bdev              458 include/linux/genhd.h static inline sector_t get_start_sect(struct block_device *bdev)
bdev              460 include/linux/genhd.h 	return bdev->bd_part->start_sect;
bdev              625 include/linux/genhd.h extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
bdev              626 include/linux/genhd.h extern int invalidate_partitions(struct gendisk *disk, struct block_device *bdev);
bdev               66 include/linux/iomap.h 	struct block_device	*bdev;	/* block device for I/O */
bdev             1384 include/linux/jbd2.h extern journal_t * jbd2_journal_init_dev(struct block_device *bdev,
bdev             1180 include/linux/libata.h 			      struct block_device *bdev,
bdev              165 include/linux/pktcdvd.h 	struct block_device	*bdev;		/* dev attached */
bdev                8 include/linux/pr.h 	int (*pr_register)(struct block_device *bdev, u64 old_key, u64 new_key,
bdev               10 include/linux/pr.h 	int (*pr_reserve)(struct block_device *bdev, u64 key,
bdev               12 include/linux/pr.h 	int (*pr_release)(struct block_device *bdev, u64 key,
bdev               14 include/linux/pr.h 	int (*pr_preempt)(struct block_device *bdev, u64 old_key, u64 new_key,
bdev               16 include/linux/pr.h 	int (*pr_clear)(struct block_device *bdev, u64 key);
bdev              252 include/linux/swap.h 	struct block_device *bdev;	/* swap device or bdev of swap file */
bdev               16 include/scsi/scsicam.h extern int scsicam_bios_param (struct block_device *bdev, sector_t capacity, int *ip);
bdev               19 include/scsi/scsicam.h extern unsigned char *scsi_bios_ptable(struct block_device *bdev);
bdev              438 include/trace/events/bcache.h 		__entry->dev		= ca->bdev->bd_dev;
bdev              458 include/trace/events/bcache.h 		__entry->dev		= ca->bdev->bd_dev;
bdev              478 include/trace/events/bcache.h 		__entry->dev		= ca->bdev->bd_dev;
bdev              456 kernel/trace/blktrace.c 				struct block_device *bdev)
bdev              460 kernel/trace/blktrace.c 	if (bdev)
bdev              461 kernel/trace/blktrace.c 		part = bdev->bd_part;
bdev              476 kernel/trace/blktrace.c 			      struct block_device *bdev,
bdev              536 kernel/trace/blktrace.c 	blk_trace_setup_lba(bt, bdev);
bdev              563 kernel/trace/blktrace.c 			     struct block_device *bdev, char __user *arg)
bdev              572 kernel/trace/blktrace.c 	ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
bdev              584 kernel/trace/blktrace.c 		    struct block_device *bdev,
bdev              590 kernel/trace/blktrace.c 	ret = __blk_trace_setup(q, name, dev, bdev, arg);
bdev              599 kernel/trace/blktrace.c 				  dev_t dev, struct block_device *bdev,
bdev              618 kernel/trace/blktrace.c 	ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
bdev              698 kernel/trace/blktrace.c int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
bdev              704 kernel/trace/blktrace.c 	q = bdev_get_queue(bdev);
bdev              712 kernel/trace/blktrace.c 		bdevname(bdev, b);
bdev              713 kernel/trace/blktrace.c 		ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
bdev              717 kernel/trace/blktrace.c 		bdevname(bdev, b);
bdev              718 kernel/trace/blktrace.c 		ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
bdev             1646 kernel/trace/blktrace.c 				 struct block_device *bdev)
bdev             1659 kernel/trace/blktrace.c 	bt->dev = bdev->bd_dev;
bdev             1662 kernel/trace/blktrace.c 	blk_trace_setup_lba(bt, bdev);
bdev             1784 kernel/trace/blktrace.c static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
bdev             1786 kernel/trace/blktrace.c 	if (bdev->bd_disk == NULL)
bdev             1789 kernel/trace/blktrace.c 	return bdev_get_queue(bdev);
bdev             1798 kernel/trace/blktrace.c 	struct block_device *bdev;
bdev             1802 kernel/trace/blktrace.c 	bdev = bdget(part_devt(p));
bdev             1803 kernel/trace/blktrace.c 	if (bdev == NULL)
bdev             1806 kernel/trace/blktrace.c 	q = blk_trace_get_queue(bdev);
bdev             1833 kernel/trace/blktrace.c 	bdput(bdev);
bdev             1842 kernel/trace/blktrace.c 	struct block_device *bdev;
bdev             1866 kernel/trace/blktrace.c 	bdev = bdget(part_devt(p));
bdev             1867 kernel/trace/blktrace.c 	if (bdev == NULL)
bdev             1870 kernel/trace/blktrace.c 	q = blk_trace_get_queue(bdev);
bdev             1884 kernel/trace/blktrace.c 			ret = blk_trace_setup_queue(q, bdev);
bdev             1892 kernel/trace/blktrace.c 		ret = blk_trace_setup_queue(q, bdev);
bdev             1911 kernel/trace/blktrace.c 	bdput(bdev);
bdev              890 lib/vsprintf.c char *bdev_name(char *buf, char *end, struct block_device *bdev,
bdev              895 lib/vsprintf.c 	if (check_pointer(&buf, end, bdev, spec))
bdev              898 lib/vsprintf.c 	hd = bdev->bd_disk;
bdev              900 lib/vsprintf.c 	if (bdev->bd_part->partno) {
bdev              906 lib/vsprintf.c 		buf = number(buf, end, bdev->bd_part->partno, spec);
bdev               36 mm/page_io.c   		struct block_device *bdev;
bdev               38 mm/page_io.c   		bio->bi_iter.bi_sector = map_swap_page(page, &bdev);
bdev               39 mm/page_io.c   		bio_set_dev(bio, bdev);
bdev              107 mm/page_io.c   	disk = sis->bdev->bd_disk;
bdev              115 mm/page_io.c   		disk->fops->swap_slot_free_notify(sis->bdev,
bdev              326 mm/page_io.c   	ret = bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc);
bdev              377 mm/page_io.c   	ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
bdev              183 mm/swapfile.c  		err = blkdev_issue_discard(si->bdev, start_block,
bdev              194 mm/swapfile.c  		err = blkdev_issue_discard(si->bdev, start_block,
bdev              245 mm/swapfile.c  		if (blkdev_issue_discard(si->bdev, start_block,
bdev              721 mm/swapfile.c  			si->bdev->bd_disk->fops->swap_slot_free_notify;
bdev              727 mm/swapfile.c  			swap_slot_free_notify(si->bdev, offset);
bdev             1762 mm/swapfile.c  	struct block_device *bdev = NULL;
bdev             1766 mm/swapfile.c  		bdev = bdget(device);
bdev             1775 mm/swapfile.c  		if (!bdev) {
bdev             1777 mm/swapfile.c  				*bdev_p = bdgrab(sis->bdev);
bdev             1782 mm/swapfile.c  		if (bdev == sis->bdev) {
bdev             1787 mm/swapfile.c  					*bdev_p = bdgrab(sis->bdev);
bdev             1790 mm/swapfile.c  				bdput(bdev);
bdev             1796 mm/swapfile.c  	if (bdev)
bdev             1797 mm/swapfile.c  		bdput(bdev);
bdev             1808 mm/swapfile.c  	struct block_device *bdev;
bdev             1813 mm/swapfile.c  	return map_swap_entry(swp_entry(type, offset), &bdev);
bdev             2257 mm/swapfile.c  static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
bdev             2264 mm/swapfile.c  	*bdev = sis->bdev;
bdev             2274 mm/swapfile.c  sector_t map_swap_page(struct page *page, struct block_device **bdev)
bdev             2278 mm/swapfile.c  	return map_swap_entry(entry, bdev);
bdev             2411 mm/swapfile.c  	struct block_device *bdev;
bdev             2413 mm/swapfile.c  	if (p->bdev)
bdev             2414 mm/swapfile.c  		bdev = p->bdev;
bdev             2416 mm/swapfile.c  		bdev = p->swap_file->f_inode->i_sb->s_bdev;
bdev             2418 mm/swapfile.c  	return bdev ? bdev->bd_disk->node_id : NUMA_NO_NODE;
bdev             2619 mm/swapfile.c  	if (!p->bdev || !blk_queue_nonrot(bdev_get_queue(p->bdev)))
bdev             2662 mm/swapfile.c  		struct block_device *bdev = I_BDEV(inode);
bdev             2664 mm/swapfile.c  		set_blocksize(bdev, old_block_size);
bdev             2665 mm/swapfile.c  		blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
bdev             2879 mm/swapfile.c  		p->bdev = bdgrab(I_BDEV(inode));
bdev             2880 mm/swapfile.c  		error = blkdev_get(p->bdev,
bdev             2883 mm/swapfile.c  			p->bdev = NULL;
bdev             2886 mm/swapfile.c  		p->old_block_size = block_size(p->bdev);
bdev             2887 mm/swapfile.c  		error = set_blocksize(p->bdev, PAGE_SIZE);
bdev             2892 mm/swapfile.c  		p->bdev = inode->i_sb->s_bdev;
bdev             3090 mm/swapfile.c  	struct request_queue *q = bdev_get_queue(si->bdev);
bdev             3192 mm/swapfile.c  	if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) {
bdev             3245 mm/swapfile.c  	if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) {
bdev             3317 mm/swapfile.c  	if (inode && S_ISBLK(inode->i_mode) && p->bdev) {
bdev             3318 mm/swapfile.c  		set_blocksize(p->bdev, p->old_block_size);
bdev             3319 mm/swapfile.c  		blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
bdev             3760 mm/swapfile.c  		if (si->bdev) {
bdev             3761 mm/swapfile.c  			blkcg_schedule_throttle(bdev_get_queue(si->bdev),
bdev               79 security/loadpin/loadpin.c 		char bdev[BDEVNAME_SIZE];
bdev               82 security/loadpin/loadpin.c 		bdevname(mnt_sb->s_bdev, bdev);
bdev               83 security/loadpin/loadpin.c 		pr_info("%s (%u:%u): %s\n", bdev,