free_blocks      1642 drivers/block/pktcdvd.c 		if (ti.free_blocks)
free_blocks      1643 drivers/block/pktcdvd.c 			*last_written -= (be32_to_cpu(ti.free_blocks) + 7);
free_blocks      2882 drivers/cdrom/cdrom.c 		if (ti.free_blocks)
free_blocks      2883 drivers/cdrom/cdrom.c 			*last_written -= (be32_to_cpu(ti.free_blocks) + 7);
free_blocks       698 drivers/lightnvm/pblk-init.c 	atomic_set(&pblk->rl.free_blocks, nr_free_chks);
free_blocks       100 drivers/lightnvm/pblk-rl.c 	return atomic_read(&rl->free_blocks);
free_blocks       109 drivers/lightnvm/pblk-rl.c 				   unsigned long free_blocks)
free_blocks       115 drivers/lightnvm/pblk-rl.c 	if (free_blocks >= rl->high) {
free_blocks       128 drivers/lightnvm/pblk-rl.c 	} else if (free_blocks < rl->high) {
free_blocks       130 drivers/lightnvm/pblk-rl.c 		int user_windows = free_blocks >> shift;
free_blocks       136 drivers/lightnvm/pblk-rl.c 		if (free_blocks <= rl->rsv_blocks) {
free_blocks       162 drivers/lightnvm/pblk-rl.c 	int free_blocks;
free_blocks       164 drivers/lightnvm/pblk-rl.c 	atomic_add(blk_in_line, &rl->free_blocks);
free_blocks       165 drivers/lightnvm/pblk-rl.c 	free_blocks = atomic_add_return(blk_in_line, &rl->free_user_blocks);
free_blocks       167 drivers/lightnvm/pblk-rl.c 	__pblk_rl_update_rates(rl, free_blocks);
free_blocks       174 drivers/lightnvm/pblk-rl.c 	int free_blocks;
free_blocks       176 drivers/lightnvm/pblk-rl.c 	atomic_sub(blk_in_line, &rl->free_blocks);
free_blocks       179 drivers/lightnvm/pblk-rl.c 		free_blocks = atomic_sub_return(blk_in_line,
free_blocks       182 drivers/lightnvm/pblk-rl.c 		free_blocks = atomic_read(&rl->free_user_blocks);
free_blocks       184 drivers/lightnvm/pblk-rl.c 	__pblk_rl_update_rates(rl, free_blocks);
free_blocks        53 drivers/lightnvm/pblk-sysfs.c 	int free_blocks, free_user_blocks, total_blocks;
free_blocks        57 drivers/lightnvm/pblk-sysfs.c 	free_blocks = pblk_rl_nr_free_blks(&pblk->rl);
free_blocks        77 drivers/lightnvm/pblk-sysfs.c 				free_blocks,
free_blocks       296 drivers/lightnvm/pblk.h 	atomic_t free_blocks;		/* Total number of free blocks (+ OP) */
free_blocks      1513 drivers/md/dm-thin.c static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
free_blocks      1517 drivers/md/dm-thin.c 	if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
free_blocks      1530 drivers/md/dm-thin.c 	dm_block_t free_blocks;
free_blocks      1536 drivers/md/dm-thin.c 	r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
free_blocks      1542 drivers/md/dm-thin.c 	check_low_water_mark(pool, free_blocks);
free_blocks      1544 drivers/md/dm-thin.c 	if (!free_blocks) {
free_blocks      1553 drivers/md/dm-thin.c 		r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
free_blocks      1559 drivers/md/dm-thin.c 		if (!free_blocks) {
free_blocks      1574 drivers/md/dm-thin.c 	r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks);
free_blocks      1580 drivers/md/dm-thin.c 	if (!free_blocks) {
free_blocks        32 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c 					 sizeof((wqs)->free_blocks[0]))
free_blocks       250 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c 	*page_idx = wqs->free_blocks[pos].page_idx;
free_blocks       251 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c 	*block_idx = wqs->free_blocks[pos].block_idx;
free_blocks       253 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c 	wqs->free_blocks[pos].page_idx = -1;
free_blocks       254 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c 	wqs->free_blocks[pos].block_idx = -1;
free_blocks       270 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c 	wqs->free_blocks[pos].page_idx = page_idx;
free_blocks       271 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c 	wqs->free_blocks[pos].block_idx = block_idx;
free_blocks       284 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c 			wqs->free_blocks[pos].page_idx = page_idx;
free_blocks       285 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c 			wqs->free_blocks[pos].block_idx = blk_idx;
free_blocks       334 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c 	wqs->free_blocks = devm_kzalloc(&pdev->dev, WQS_FREE_BLOCKS_SIZE(wqs),
free_blocks       336 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c 	if (!wqs->free_blocks) {
free_blocks       363 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c 	devm_kfree(&pdev->dev, wqs->free_blocks);
free_blocks        59 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h 	struct hinic_free_block *free_blocks;
free_blocks       169 fs/ext2/balloc.c 		unsigned free_blocks;
free_blocks       172 fs/ext2/balloc.c 		free_blocks = le16_to_cpu(desc->bg_free_blocks_count);
free_blocks       173 fs/ext2/balloc.c 		desc->bg_free_blocks_count = cpu_to_le16(free_blocks + count);
free_blocks      1183 fs/ext2/balloc.c 	ext2_fsblk_t free_blocks, root_blocks;
free_blocks      1185 fs/ext2/balloc.c 	free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
free_blocks      1187 fs/ext2/balloc.c 	if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
free_blocks      1242 fs/ext2/balloc.c 	ext2_grpblk_t free_blocks;	/* number of free blocks in a group */
free_blocks      1303 fs/ext2/balloc.c 	free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
free_blocks      1308 fs/ext2/balloc.c 	if (my_rsv && (free_blocks < windowsz)
free_blocks      1309 fs/ext2/balloc.c 		&& (free_blocks > 0)
free_blocks      1313 fs/ext2/balloc.c 	if (free_blocks > 0) {
free_blocks      1341 fs/ext2/balloc.c 		free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
free_blocks      1346 fs/ext2/balloc.c 		if (!free_blocks)
free_blocks      1353 fs/ext2/balloc.c 		if (my_rsv && (free_blocks <= (windowsz/2)))
free_blocks       264 fs/ext2/ialloc.c 	int free_blocks;
free_blocks       274 fs/ext2/ialloc.c 	free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
free_blocks       275 fs/ext2/ialloc.c 	avefreeb = free_blocks / ngroups;
free_blocks       312 fs/ext2/ialloc.c 	blocks_per_dir = (le32_to_cpu(es->s_blocks_count)-free_blocks) / ndirs;
free_blocks      1379 fs/ext4/resize.c 	ext4_fsblk_t free_blocks = 0;
free_blocks      1399 fs/ext4/resize.c 		free_blocks += EXT4_C2B(sbi, group_data[i].free_clusters_count);
free_blocks      1408 fs/ext4/resize.c 	ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + free_blocks);
free_blocks      1447 fs/ext4/resize.c 			   EXT4_NUM_B2C(sbi, free_blocks));
free_blocks      1459 fs/ext4/resize.c 		atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
free_blocks      1473 fs/ext4/resize.c 		       blocks_count, free_blocks, reserved_blocks);
free_blocks       306 fs/f2fs/recovery.c 	unsigned int free_blocks = MAIN_SEGS(sbi) * sbi->blocks_per_seg -
free_blocks       370 fs/f2fs/recovery.c 		if (++loop_cnt >= free_blocks ||
free_blocks       360 fs/gfs2/log.c  	unsigned int free_blocks;
free_blocks       367 fs/gfs2/log.c  	free_blocks = atomic_read(&sdp->sd_log_blks_free);
free_blocks       368 fs/gfs2/log.c  	if (unlikely(free_blocks <= wanted)) {
free_blocks       376 fs/gfs2/log.c  			free_blocks = atomic_read(&sdp->sd_log_blks_free);
free_blocks       377 fs/gfs2/log.c  		} while(free_blocks <= wanted);
free_blocks       381 fs/gfs2/log.c  	if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks,
free_blocks       382 fs/gfs2/log.c  				free_blocks - blks) != free_blocks) {
free_blocks      1570 fs/gfs2/rgrp.c 	u32 free_blocks = rgd_free(rgd, rs);
free_blocks      1578 fs/gfs2/rgrp.c 		extlen = clamp(extlen, (u32)RGRP_RSRV_MINBLKS, free_blocks);
free_blocks      1580 fs/gfs2/rgrp.c 	if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
free_blocks      2047 fs/gfs2/rgrp.c 	u32 free_blocks, skip = 0;
free_blocks      2118 fs/gfs2/rgrp.c 		free_blocks = rgd_free(rs->rs_rbm.rgd, rs);
free_blocks      2119 fs/gfs2/rgrp.c 		if (free_blocks >= ap->target ||
free_blocks      2121 fs/gfs2/rgrp.c 		     free_blocks >= ap->min_target)) {
free_blocks      2122 fs/gfs2/rgrp.c 			ap->allowed = free_blocks;
free_blocks       156 fs/hfsplus/bitmap.c 	sbi->free_blocks -= *max;
free_blocks       235 fs/hfsplus/bitmap.c 	sbi->free_blocks += len;
free_blocks       449 fs/hfsplus/extents.c 	    sbi->total_blocks - sbi->free_blocks + 8) {
free_blocks       453 fs/hfsplus/extents.c 		       sbi->total_blocks, sbi->free_blocks);
free_blocks       170 fs/hfsplus/hfsplus_fs.h 	u32 free_blocks;
free_blocks       121 fs/hfsplus/hfsplus_raw.h 	__be32 free_blocks;
free_blocks       212 fs/hfsplus/super.c 	vhdr->free_blocks = cpu_to_be32(sbi->free_blocks);
free_blocks       319 fs/hfsplus/super.c 	buf->f_bfree = sbi->free_blocks << sbi->fs_shift;
free_blocks       429 fs/hfsplus/super.c 	sbi->free_blocks = be32_to_cpu(vhdr->free_blocks);
free_blocks       188 fs/hfsplus/xattr.c 	if (sbi->free_blocks <= (hip->clump_blocks << 1)) {
free_blocks       879 fs/nilfs2/sysfs.c 	sector_t free_blocks = 0;
free_blocks       881 fs/nilfs2/sysfs.c 	nilfs_count_free_blocks(nilfs, &free_blocks);
free_blocks       883 fs/nilfs2/sysfs.c 			(unsigned long long)free_blocks);
free_blocks       927 fs/nilfs2/sysfs.c NILFS_DEV_RO_ATTR(free_blocks);
free_blocks       936 fs/nilfs2/sysfs.c 	NILFS_DEV_ATTR_LIST(free_blocks),
free_blocks        34 fs/reiserfs/resize.c 	unsigned long int block_count, free_blocks;
free_blocks       219 fs/reiserfs/resize.c 	free_blocks = SB_FREE_BLOCKS(s);
free_blocks       221 fs/reiserfs/resize.c 			   free_blocks + (block_count_new - block_count -
free_blocks      1553 fs/xfs/xfs_log.c 	int		free_blocks;
free_blocks      1562 fs/xfs/xfs_log.c 	free_blocks = BTOBBT(free_bytes);
free_blocks      1572 fs/xfs/xfs_log.c 	if (free_blocks >= free_threshold)
free_blocks       890 include/uapi/linux/cdrom.h 	__be32 free_blocks;