sdp               220 arch/mips/include/asm/asm.h #define LONG_SP		sdp
sdp                49 arch/sh/kernel/cpu/shmobile/pm.c 	struct sh_sleep_data *sdp = onchip_mem;
sdp                53 arch/sh/kernel/cpu/shmobile/pm.c 	standby_onchip_mem = (void *)(sdp + 1);
sdp                83 arch/sh/kernel/cpu/shmobile/pm.c 	struct sh_sleep_data *sdp;
sdp                87 arch/sh/kernel/cpu/shmobile/pm.c 	sdp = onchip_mem;
sdp                88 arch/sh/kernel/cpu/shmobile/pm.c 	sdp->addr.stbcr = 0xa4150020; /* STBCR */
sdp                89 arch/sh/kernel/cpu/shmobile/pm.c 	sdp->addr.bar = 0xa4150040; /* BAR */
sdp                90 arch/sh/kernel/cpu/shmobile/pm.c 	sdp->addr.pteh = 0xff000000; /* PTEH */
sdp                91 arch/sh/kernel/cpu/shmobile/pm.c 	sdp->addr.ptel = 0xff000004; /* PTEL */
sdp                92 arch/sh/kernel/cpu/shmobile/pm.c 	sdp->addr.ttb = 0xff000008; /* TTB */
sdp                93 arch/sh/kernel/cpu/shmobile/pm.c 	sdp->addr.tea = 0xff00000c; /* TEA */
sdp                94 arch/sh/kernel/cpu/shmobile/pm.c 	sdp->addr.mmucr = 0xff000010; /* MMUCR */
sdp                95 arch/sh/kernel/cpu/shmobile/pm.c 	sdp->addr.ptea = 0xff000034; /* PTEA */
sdp                96 arch/sh/kernel/cpu/shmobile/pm.c 	sdp->addr.pascr = 0xff000070; /* PASCR */
sdp                97 arch/sh/kernel/cpu/shmobile/pm.c 	sdp->addr.irmcr = 0xff000078; /* IRMCR */
sdp                98 arch/sh/kernel/cpu/shmobile/pm.c 	sdp->addr.ccr = 0xff00001c; /* CCR */
sdp                99 arch/sh/kernel/cpu/shmobile/pm.c 	sdp->addr.ramcr = 0xff000074; /* RAMCR */
sdp               100 arch/sh/kernel/cpu/shmobile/pm.c 	vp = sdp + 1;
sdp               110 arch/sh/kernel/cpu/shmobile/pm.c 	sdp->sf_pre = (unsigned long)vp;
sdp               116 arch/sh/kernel/cpu/shmobile/pm.c 	sdp->sf_post = (unsigned long)vp;
sdp               124 arch/sh/kernel/cpu/shmobile/pm.c 	sdp->resume = (unsigned long)vp;
sdp              1950 arch/x86/platform/uv/tlb_uv.c 	struct socket_desc *sdp;
sdp              1980 arch/x86/platform/uv/tlb_uv.c 		sdp = &bdp->socket[socket];
sdp              1981 arch/x86/platform/uv/tlb_uv.c 		sdp->cpu_number[sdp->num_cpus] = cpu;
sdp              1982 arch/x86/platform/uv/tlb_uv.c 		sdp->num_cpus++;
sdp              1983 arch/x86/platform/uv/tlb_uv.c 		if (sdp->num_cpus > MAX_CPUS_PER_SOCKET) {
sdp              1985 arch/x86/platform/uv/tlb_uv.c 				sdp->num_cpus);
sdp              2022 arch/x86/platform/uv/tlb_uv.c static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
sdp              2029 arch/x86/platform/uv/tlb_uv.c 	for (i = 0; i < sdp->num_cpus; i++) {
sdp              2030 arch/x86/platform/uv/tlb_uv.c 		cpu = sdp->cpu_number[i];
sdp              2039 arch/x86/platform/uv/tlb_uv.c 		bcp->cpus_in_socket = sdp->num_cpus;
sdp              2103 arch/x86/platform/uv/tlb_uv.c 			struct socket_desc *sdp;
sdp              2105 arch/x86/platform/uv/tlb_uv.c 				sdp = &bdp->socket[socket];
sdp              2106 arch/x86/platform/uv/tlb_uv.c 				if (scan_sock(sdp, bdp, &smaster, &hmaster))
sdp              3408 drivers/ata/libata-scsi.c 	struct scsi_device *sdp = cmd->device;
sdp              3409 drivers/ata/libata-scsi.c 	size_t len = sdp->sector_size;
sdp              3453 drivers/ata/libata-scsi.c 	struct scsi_device *sdp = scmd->device;
sdp              3454 drivers/ata/libata-scsi.c 	size_t len = sdp->sector_size;
sdp              3338 drivers/net/ethernet/intel/i40e/i40e_common.c 				p->sdp[phys_id] = true;
sdp               320 drivers/net/ethernet/intel/i40e/i40e_type.h 	bool sdp[I40E_HW_CAP_MAX_GPIO];
sdp                52 drivers/s390/scsi/zfcp_scsi.c static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
sdp                54 drivers/s390/scsi/zfcp_scsi.c 	if (sdp->tagged_supported)
sdp                55 drivers/s390/scsi/zfcp_scsi.c 		scsi_change_queue_depth(sdp, default_depth);
sdp               278 drivers/scsi/isci/request.c static u8 scu_bg_blk_size(struct scsi_device *sdp)
sdp               280 drivers/scsi/isci/request.c 	switch (sdp->sector_size) {
sdp              3356 drivers/scsi/scsi_debug.c 	struct scsi_device *sdp = scp->device;
sdp              3375 drivers/scsi/scsi_debug.c 			if (dp->target == sdp->id) {
sdp              3387 drivers/scsi/scsi_debug.c 			if (dp->target == sdp->id)
sdp              3864 drivers/scsi/scsi_debug.c static int scsi_debug_slave_alloc(struct scsi_device *sdp)
sdp              3868 drivers/scsi/scsi_debug.c 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
sdp              3872 drivers/scsi/scsi_debug.c static int scsi_debug_slave_configure(struct scsi_device *sdp)
sdp              3875 drivers/scsi/scsi_debug.c 			(struct sdebug_dev_info *)sdp->hostdata;
sdp              3879 drivers/scsi/scsi_debug.c 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
sdp              3880 drivers/scsi/scsi_debug.c 	if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
sdp              3881 drivers/scsi/scsi_debug.c 		sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
sdp              3883 drivers/scsi/scsi_debug.c 		devip = find_build_dev_info(sdp);
sdp              3887 drivers/scsi/scsi_debug.c 	sdp->hostdata = devip;
sdp              3889 drivers/scsi/scsi_debug.c 		sdp->no_uld_attach = 1;
sdp              3890 drivers/scsi/scsi_debug.c 	config_cdb_len(sdp);
sdp              3894 drivers/scsi/scsi_debug.c static void scsi_debug_slave_destroy(struct scsi_device *sdp)
sdp              3897 drivers/scsi/scsi_debug.c 		(struct sdebug_dev_info *)sdp->hostdata;
sdp              3901 drivers/scsi/scsi_debug.c 		       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
sdp              3905 drivers/scsi/scsi_debug.c 		sdp->hostdata = NULL;
sdp              4040 drivers/scsi/scsi_debug.c 		struct scsi_device *sdp = SCpnt->device;
sdp              4042 drivers/scsi/scsi_debug.c 				(struct sdebug_dev_info *)sdp->hostdata;
sdp              4045 drivers/scsi/scsi_debug.c 			sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
sdp              4056 drivers/scsi/scsi_debug.c 	struct scsi_device *sdp;
sdp              4063 drivers/scsi/scsi_debug.c 	sdp = SCpnt->device;
sdp              4064 drivers/scsi/scsi_debug.c 	if (!sdp)
sdp              4067 drivers/scsi/scsi_debug.c 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
sdp              4068 drivers/scsi/scsi_debug.c 	hp = sdp->host;
sdp              4076 drivers/scsi/scsi_debug.c 			if (devip->target == sdp->id) {
sdp              4082 drivers/scsi/scsi_debug.c 		sdev_printk(KERN_INFO, sdp,
sdp              4092 drivers/scsi/scsi_debug.c 	struct scsi_device *sdp;
sdp              4099 drivers/scsi/scsi_debug.c 	sdp = SCpnt->device;
sdp              4101 drivers/scsi/scsi_debug.c 		sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
sdp              4102 drivers/scsi/scsi_debug.c 	hp = sdp->host;
sdp              4115 drivers/scsi/scsi_debug.c 		sdev_printk(KERN_INFO, sdp,
sdp              4264 drivers/scsi/scsi_debug.c 	struct scsi_device *sdp;
sdp              4272 drivers/scsi/scsi_debug.c 	sdp = cmnd->device;
sdp              4313 drivers/scsi/scsi_debug.c 			sdev_printk(KERN_INFO, sdp,
sdp              4350 drivers/scsi/scsi_debug.c 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
sdp              4388 drivers/scsi/scsi_debug.c 			sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
sdp              4395 drivers/scsi/scsi_debug.c 		sdev_printk(KERN_INFO, sdp,
sdp              5593 drivers/scsi/scsi_debug.c 	struct scsi_device *sdp = scp->device;
sdp              5624 drivers/scsi/scsi_debug.c 		sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
sdp              5629 drivers/scsi/scsi_debug.c 	has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
sdp              5630 drivers/scsi/scsi_debug.c 	if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
sdp              5635 drivers/scsi/scsi_debug.c 	devip = (struct sdebug_dev_info *)sdp->hostdata;
sdp              5637 drivers/scsi/scsi_debug.c 		devip = find_build_dev_info(sdp);
sdp              5677 drivers/scsi/scsi_debug.c 			sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
sdp              5708 drivers/scsi/scsi_debug.c 			sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
sdp               488 drivers/scsi/scsi_sysfs.c 	struct scsi_device *sdp = to_scsi_device(dev);
sdp               490 drivers/scsi/scsi_sysfs.c 				   &sdp->ew);
sdp               501 drivers/scsi/scsi_sysfs.c 	struct scsi_device *sdp;
sdp               506 drivers/scsi/scsi_sysfs.c 	sdp = to_scsi_device(dev);
sdp               507 drivers/scsi/scsi_sysfs.c 	if (sdp->no_uld_attach)
sdp               509 drivers/scsi/scsi_sysfs.c 	return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0;
sdp               163 drivers/scsi/sd.c 	struct scsi_device *sdp = sdkp->device;
sdp               171 drivers/scsi/sd.c 	if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
sdp               198 drivers/scsi/sd.c 	if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT,
sdp               216 drivers/scsi/sd.c 	if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
sdp               231 drivers/scsi/sd.c 	struct scsi_device *sdp = sdkp->device;
sdp               233 drivers/scsi/sd.c 	return sprintf(buf, "%u\n", sdp->manage_start_stop);
sdp               241 drivers/scsi/sd.c 	struct scsi_device *sdp = sdkp->device;
sdp               250 drivers/scsi/sd.c 	sdp->manage_start_stop = v;
sdp               270 drivers/scsi/sd.c 	struct scsi_device *sdp = sdkp->device;
sdp               275 drivers/scsi/sd.c 	if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
sdp               281 drivers/scsi/sd.c 	sdp->allow_restart = v;
sdp               343 drivers/scsi/sd.c 	struct scsi_device *sdp = sdkp->device;
sdp               346 drivers/scsi/sd.c 	dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
sdp               347 drivers/scsi/sd.c 	dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type);
sdp               349 drivers/scsi/sd.c 	if (!dix && scsi_host_dix_capable(sdp->host, T10_PI_TYPE0_PROTECTION)) {
sdp               404 drivers/scsi/sd.c 	struct scsi_device *sdp = sdkp->device;
sdp               415 drivers/scsi/sd.c 	if (sdp->type != TYPE_DISK)
sdp               505 drivers/scsi/sd.c 	struct scsi_device *sdp = sdkp->device;
sdp               512 drivers/scsi/sd.c 	if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
sdp               521 drivers/scsi/sd.c 		sdp->no_write_same = 1;
sdp               523 drivers/scsi/sd.c 		sdp->no_write_same = 0;
sdp               826 drivers/scsi/sd.c 	struct scsi_device *sdp = cmd->device;
sdp               828 drivers/scsi/sd.c 	u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
sdp               829 drivers/scsi/sd.c 	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
sdp               861 drivers/scsi/sd.c 	struct scsi_device *sdp = cmd->device;
sdp               863 drivers/scsi/sd.c 	u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
sdp               864 drivers/scsi/sd.c 	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
sdp               865 drivers/scsi/sd.c 	u32 data_len = sdp->sector_size;
sdp               892 drivers/scsi/sd.c 	struct scsi_device *sdp = cmd->device;
sdp               894 drivers/scsi/sd.c 	u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
sdp               895 drivers/scsi/sd.c 	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
sdp               896 drivers/scsi/sd.c 	u32 data_len = sdp->sector_size;
sdp               923 drivers/scsi/sd.c 	struct scsi_device *sdp = cmd->device;
sdp               925 drivers/scsi/sd.c 	u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
sdp               926 drivers/scsi/sd.c 	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
sdp               937 drivers/scsi/sd.c 	if (sdp->no_write_same)
sdp              1018 drivers/scsi/sd.c 	struct scsi_device *sdp = cmd->device;
sdp              1021 drivers/scsi/sd.c 	u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
sdp              1022 drivers/scsi/sd.c 	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
sdp              1028 drivers/scsi/sd.c 	BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
sdp              1044 drivers/scsi/sd.c 	cmd->transfersize = sdp->sector_size;
sdp              1057 drivers/scsi/sd.c 	rq->__data_len = sdp->sector_size;
sdp              1164 drivers/scsi/sd.c 	struct scsi_device *sdp = cmd->device;
sdp              1166 drivers/scsi/sd.c 	sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq));
sdp              1168 drivers/scsi/sd.c 	unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
sdp              1169 drivers/scsi/sd.c 	unsigned int mask = logical_to_sectors(sdp, 1) - 1;
sdp              1180 drivers/scsi/sd.c 	if (!scsi_device_online(sdp) || sdp->changed) {
sdp              1201 drivers/scsi/sd.c 	if (unlikely(sdp->last_sector_bug && lba + nr_blocks > threshold)) {
sdp              1223 drivers/scsi/sd.c 	} else if (sdp->use_16_for_rw || (nr_blocks > 0xffff)) {
sdp              1227 drivers/scsi/sd.c 		   sdp->use_10_for_rw || protect) {
sdp              1243 drivers/scsi/sd.c 	cmd->transfersize = sdp->sector_size;
sdp              1246 drivers/scsi/sd.c 	cmd->sdb.length = nr_blocks * sdp->sector_size;
sdp              1426 drivers/scsi/sd.c 	struct scsi_device *sdp = sdkp->device;
sdp              1427 drivers/scsi/sd.c 	struct Scsi_Host *host = sdp->host;
sdp              1428 drivers/scsi/sd.c 	sector_t capacity = logical_to_sectors(sdp, sdkp->capacity);
sdp              1438 drivers/scsi/sd.c 		host->hostt->bios_param(sdp, bdev, capacity, diskinfo);
sdp              1467 drivers/scsi/sd.c 	struct scsi_device *sdp = sdkp->device;
sdp              1484 drivers/scsi/sd.c 	error = scsi_ioctl_block_when_processing_errors(sdp, cmd,
sdp              1500 drivers/scsi/sd.c 			error = scsi_ioctl(sdp, cmd, p);
sdp              1506 drivers/scsi/sd.c 			error = scsi_ioctl(sdp, cmd, p);
sdp              1555 drivers/scsi/sd.c 	struct scsi_device *sdp;
sdp              1561 drivers/scsi/sd.c 	sdp = sdkp->device;
sdp              1570 drivers/scsi/sd.c 	if (!scsi_device_online(sdp)) {
sdp              1584 drivers/scsi/sd.c 	if (scsi_block_when_processing_errors(sdp)) {
sdp              1587 drivers/scsi/sd.c 		retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES,
sdp              1605 drivers/scsi/sd.c 		sdp->changed = 1;
sdp              1614 drivers/scsi/sd.c 	retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
sdp              1615 drivers/scsi/sd.c 	sdp->changed = 0;
sdp              1623 drivers/scsi/sd.c 	struct scsi_device *sdp = sdkp->device;
sdp              1624 drivers/scsi/sd.c 	const int timeout = sdp->request_queue->rq_timeout
sdp              1628 drivers/scsi/sd.c 	if (!scsi_device_online(sdp))
sdp              1643 drivers/scsi/sd.c 		res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, sshdr,
sdp              2201 drivers/scsi/sd.c 	struct scsi_device *sdp = sdkp->device;
sdp              2205 drivers/scsi/sd.c 	if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) {
sdp              2214 drivers/scsi/sd.c 	else if (scsi_host_dif_capable(sdp->host, type))
sdp              2239 drivers/scsi/sd.c static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
sdp              2252 drivers/scsi/sd.c 	if (sdp->removable &&
sdp              2271 drivers/scsi/sd.c static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
sdp              2283 drivers/scsi/sd.c 	if (sdp->no_read_capacity_16)
sdp              2293 drivers/scsi/sd.c 		the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
sdp              2324 drivers/scsi/sd.c 		read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
sdp              2344 drivers/scsi/sd.c 	blk_queue_alignment_offset(sdp->request_queue, alignment);
sdp              2362 drivers/scsi/sd.c static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
sdp              2378 drivers/scsi/sd.c 		the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
sdp              2401 drivers/scsi/sd.c 		read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
sdp              2408 drivers/scsi/sd.c 	if (sdp->no_read_capacity_16 && (lba == 0xffffffff)) {
sdp              2422 drivers/scsi/sd.c static int sd_try_rc16_first(struct scsi_device *sdp)
sdp              2424 drivers/scsi/sd.c 	if (sdp->host->max_cmd_len < 16)
sdp              2426 drivers/scsi/sd.c 	if (sdp->try_rc_10_first)
sdp              2428 drivers/scsi/sd.c 	if (sdp->scsi_level > SCSI_SPC_2)
sdp              2430 drivers/scsi/sd.c 	if (scsi_device_protection(sdp))
sdp              2442 drivers/scsi/sd.c 	struct scsi_device *sdp = sdkp->device;
sdp              2444 drivers/scsi/sd.c 	if (sd_try_rc16_first(sdp)) {
sdp              2445 drivers/scsi/sd.c 		sector_size = read_capacity_16(sdkp, sdp, buffer);
sdp              2451 drivers/scsi/sd.c 			sector_size = read_capacity_10(sdkp, sdp, buffer);
sdp              2455 drivers/scsi/sd.c 		sector_size = read_capacity_10(sdkp, sdp, buffer);
sdp              2465 drivers/scsi/sd.c 			sector_size = read_capacity_16(sdkp, sdp, buffer);
sdp              2474 drivers/scsi/sd.c 			sdp->try_rc_10_first = 0;
sdp              2488 drivers/scsi/sd.c 	if (sdp->fix_capacity ||
sdp              2489 drivers/scsi/sd.c 	    (sdp->guess_capacity && (sdkp->capacity & 0x01))) {
sdp              2524 drivers/scsi/sd.c 	blk_queue_logical_block_size(sdp->request_queue, sector_size);
sdp              2525 drivers/scsi/sd.c 	blk_queue_physical_block_size(sdp->request_queue,
sdp              2530 drivers/scsi/sd.c 		sdp->use_16_for_rw = 1;
sdp              2567 drivers/scsi/sd.c sd_do_mode_sense(struct scsi_device *sdp, int dbd, int modepage,
sdp              2571 drivers/scsi/sd.c 	return scsi_mode_sense(sdp, dbd, modepage, buffer, len,
sdp              2584 drivers/scsi/sd.c 	struct scsi_device *sdp = sdkp->device;
sdp              2589 drivers/scsi/sd.c 	if (sdp->skip_ms_page_3f) {
sdp              2594 drivers/scsi/sd.c 	if (sdp->use_192_bytes_for_3f) {
sdp              2595 drivers/scsi/sd.c 		res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 192, &data, NULL);
sdp              2602 drivers/scsi/sd.c 		res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 4, &data, NULL);
sdp              2611 drivers/scsi/sd.c 			res = sd_do_mode_sense(sdp, 0, 0, buffer, 4, &data, NULL);
sdp              2617 drivers/scsi/sd.c 			res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 255,
sdp              2643 drivers/scsi/sd.c 	struct scsi_device *sdp = sdkp->device;
sdp              2659 drivers/scsi/sd.c 	if (sdp->skip_ms_page_8) {
sdp              2660 drivers/scsi/sd.c 		if (sdp->type == TYPE_RBC)
sdp              2663 drivers/scsi/sd.c 			if (sdp->skip_ms_page_3f)
sdp              2666 drivers/scsi/sd.c 			if (sdp->use_192_bytes_for_3f)
sdp              2670 drivers/scsi/sd.c 	} else if (sdp->type == TYPE_RBC) {
sdp              2679 drivers/scsi/sd.c 	res = sd_do_mode_sense(sdp, dbd, modepage, buffer, first_len,
sdp              2706 drivers/scsi/sd.c 	if (modepage == 0x3F && sdp->use_192_bytes_for_3f)
sdp              2711 drivers/scsi/sd.c 		res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len,
sdp              2762 drivers/scsi/sd.c 		if (sdp->broken_fua) {
sdp              2799 drivers/scsi/sd.c 	if (sdp->wce_default_on) {
sdp              2819 drivers/scsi/sd.c 	struct scsi_device *sdp = sdkp->device;
sdp              2823 drivers/scsi/sd.c 	if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
sdp              2829 drivers/scsi/sd.c 	res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT,
sdp              3049 drivers/scsi/sd.c 	struct scsi_device *sdp = sdkp->device;
sdp              3051 drivers/scsi/sd.c 		logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
sdp              3101 drivers/scsi/sd.c 	struct scsi_device *sdp = sdkp->device;
sdp              3114 drivers/scsi/sd.c 	if (!scsi_device_online(sdp))
sdp              3142 drivers/scsi/sd.c 		if (scsi_device_supports_vpd(sdp)) {
sdp              3165 drivers/scsi/sd.c 	dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS;
sdp              3169 drivers/scsi/sd.c 	q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
sdp              3172 drivers/scsi/sd.c 		q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
sdp              3173 drivers/scsi/sd.c 		rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
sdp              3176 drivers/scsi/sd.c 		rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
sdp              3194 drivers/scsi/sd.c 	set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity));
sdp              3288 drivers/scsi/sd.c 	struct scsi_device *sdp = to_scsi_device(dev);
sdp              3294 drivers/scsi/sd.c 	scsi_autopm_get_device(sdp);
sdp              3296 drivers/scsi/sd.c 	if (sdp->type != TYPE_DISK &&
sdp              3297 drivers/scsi/sd.c 	    sdp->type != TYPE_ZBC &&
sdp              3298 drivers/scsi/sd.c 	    sdp->type != TYPE_MOD &&
sdp              3299 drivers/scsi/sd.c 	    sdp->type != TYPE_RBC)
sdp              3303 drivers/scsi/sd.c 	if (sdp->type == TYPE_ZBC)
sdp              3306 drivers/scsi/sd.c 	SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp,
sdp              3320 drivers/scsi/sd.c 		sdev_printk(KERN_WARNING, sdp, "sd_probe: memory exhausted.\n");
sdp              3326 drivers/scsi/sd.c 		sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n");
sdp              3330 drivers/scsi/sd.c 	sdkp->device = sdp;
sdp              3337 drivers/scsi/sd.c 	if (!sdp->request_queue->rq_timeout) {
sdp              3338 drivers/scsi/sd.c 		if (sdp->type != TYPE_MOD)
sdp              3339 drivers/scsi/sd.c 			blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
sdp              3341 drivers/scsi/sd.c 			blk_queue_rq_timeout(sdp->request_queue,
sdp              3365 drivers/scsi/sd.c 	sdp->sector_size = 512;
sdp              3379 drivers/scsi/sd.c 	if (sdp->removable) {
sdp              3385 drivers/scsi/sd.c 	blk_pm_runtime_init(sdp->request_queue, dev);
sdp              3393 drivers/scsi/sd.c 		sdkp->opal_dev = init_opal_dev(sdp, &sd_sec_submit);
sdp              3399 drivers/scsi/sd.c 		  sdp->removable ? "removable " : "");
sdp              3400 drivers/scsi/sd.c 	scsi_autopm_put_device(sdp);
sdp              3411 drivers/scsi/sd.c 	scsi_autopm_put_device(sdp);
sdp              3492 drivers/scsi/sd.c 	struct scsi_device *sdp = sdkp->device;
sdp              3498 drivers/scsi/sd.c 	if (sdp->start_stop_pwr_cond)
sdp              3501 drivers/scsi/sd.c 	if (!scsi_device_online(sdp))
sdp              3504 drivers/scsi/sd.c 	res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
sdp                29 drivers/scsi/sd_dif.c 	struct scsi_device *sdp = sdkp->device;
sdp                35 drivers/scsi/sd_dif.c 	dif = scsi_host_dif_capable(sdp->host, type);
sdp                36 drivers/scsi/sd_dif.c 	dix = scsi_host_dix_capable(sdp->host, type);
sdp                38 drivers/scsi/sd_dif.c 	if (!dix && scsi_host_dix_capable(sdp->host, 0)) {
sdp                33 drivers/scsi/sd_zbc.c 	struct scsi_device *sdp = sdkp->device;
sdp                44 drivers/scsi/sd_zbc.c 	zone->len = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
sdp                45 drivers/scsi/sd_zbc.c 	zone->start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16]));
sdp                46 drivers/scsi/sd_zbc.c 	zone->wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
sdp                70 drivers/scsi/sd_zbc.c 	struct scsi_device *sdp = sdkp->device;
sdp                71 drivers/scsi/sd_zbc.c 	const int timeout = sdp->request_queue->rq_timeout;
sdp                85 drivers/scsi/sd_zbc.c 	result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
sdp               191 drivers/scsi/sg.c static Sg_fd *sg_add_sfp(Sg_device * sdp);
sdp               204 drivers/scsi/sg.c #define sg_printk(prefix, sdp, fmt, a...) \
sdp               205 drivers/scsi/sg.c 	sdev_prefix_printk(prefix, (sdp)->device,		\
sdp               206 drivers/scsi/sg.c 			   (sdp)->disk->disk_name, fmt, ##a)
sdp               246 drivers/scsi/sg.c open_wait(Sg_device *sdp, int flags)
sdp               251 drivers/scsi/sg.c 		while (sdp->open_cnt > 0) {
sdp               252 drivers/scsi/sg.c 			mutex_unlock(&sdp->open_rel_lock);
sdp               253 drivers/scsi/sg.c 			retval = wait_event_interruptible(sdp->open_wait,
sdp               254 drivers/scsi/sg.c 					(atomic_read(&sdp->detaching) ||
sdp               255 drivers/scsi/sg.c 					 !sdp->open_cnt));
sdp               256 drivers/scsi/sg.c 			mutex_lock(&sdp->open_rel_lock);
sdp               260 drivers/scsi/sg.c 			if (atomic_read(&sdp->detaching))
sdp               264 drivers/scsi/sg.c 		while (sdp->exclude) {
sdp               265 drivers/scsi/sg.c 			mutex_unlock(&sdp->open_rel_lock);
sdp               266 drivers/scsi/sg.c 			retval = wait_event_interruptible(sdp->open_wait,
sdp               267 drivers/scsi/sg.c 					(atomic_read(&sdp->detaching) ||
sdp               268 drivers/scsi/sg.c 					 !sdp->exclude));
sdp               269 drivers/scsi/sg.c 			mutex_lock(&sdp->open_rel_lock);
sdp               273 drivers/scsi/sg.c 			if (atomic_read(&sdp->detaching))
sdp               288 drivers/scsi/sg.c 	Sg_device *sdp;
sdp               295 drivers/scsi/sg.c 	sdp = sg_get_dev(dev);
sdp               296 drivers/scsi/sg.c 	if (IS_ERR(sdp))
sdp               297 drivers/scsi/sg.c 		return PTR_ERR(sdp);
sdp               299 drivers/scsi/sg.c 	SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
sdp               304 drivers/scsi/sg.c 	retval = scsi_device_get(sdp->device);
sdp               308 drivers/scsi/sg.c 	retval = scsi_autopm_get_device(sdp->device);
sdp               316 drivers/scsi/sg.c 	      scsi_block_when_processing_errors(sdp->device))) {
sdp               322 drivers/scsi/sg.c 	mutex_lock(&sdp->open_rel_lock);
sdp               325 drivers/scsi/sg.c 			if (sdp->open_cnt > 0) {
sdp               330 drivers/scsi/sg.c 			if (sdp->exclude) {
sdp               336 drivers/scsi/sg.c 		retval = open_wait(sdp, flags);
sdp               343 drivers/scsi/sg.c 		sdp->exclude = true;
sdp               345 drivers/scsi/sg.c 	if (sdp->open_cnt < 1) {  /* no existing opens */
sdp               346 drivers/scsi/sg.c 		sdp->sgdebug = 0;
sdp               347 drivers/scsi/sg.c 		q = sdp->device->request_queue;
sdp               348 drivers/scsi/sg.c 		sdp->sg_tablesize = queue_max_segments(q);
sdp               350 drivers/scsi/sg.c 	sfp = sg_add_sfp(sdp);
sdp               357 drivers/scsi/sg.c 	sdp->open_cnt++;
sdp               358 drivers/scsi/sg.c 	mutex_unlock(&sdp->open_rel_lock);
sdp               362 drivers/scsi/sg.c 	kref_put(&sdp->d_ref, sg_device_destroy);
sdp               367 drivers/scsi/sg.c 		sdp->exclude = false;   /* undo if error */
sdp               368 drivers/scsi/sg.c 		wake_up_interruptible(&sdp->open_wait);
sdp               371 drivers/scsi/sg.c 	mutex_unlock(&sdp->open_rel_lock);
sdp               373 drivers/scsi/sg.c 	scsi_autopm_put_device(sdp->device);
sdp               375 drivers/scsi/sg.c 	scsi_device_put(sdp->device);
sdp               384 drivers/scsi/sg.c 	Sg_device *sdp;
sdp               387 drivers/scsi/sg.c 	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
sdp               389 drivers/scsi/sg.c 	SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_release\n"));
sdp               391 drivers/scsi/sg.c 	mutex_lock(&sdp->open_rel_lock);
sdp               392 drivers/scsi/sg.c 	scsi_autopm_put_device(sdp->device);
sdp               394 drivers/scsi/sg.c 	sdp->open_cnt--;
sdp               398 drivers/scsi/sg.c 	if (sdp->exclude) {
sdp               399 drivers/scsi/sg.c 		sdp->exclude = false;
sdp               400 drivers/scsi/sg.c 		wake_up_interruptible_all(&sdp->open_wait);
sdp               401 drivers/scsi/sg.c 	} else if (0 == sdp->open_cnt) {
sdp               402 drivers/scsi/sg.c 		wake_up_interruptible(&sdp->open_wait);
sdp               404 drivers/scsi/sg.c 	mutex_unlock(&sdp->open_rel_lock);
sdp               411 drivers/scsi/sg.c 	Sg_device *sdp;
sdp               427 drivers/scsi/sg.c 	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
sdp               429 drivers/scsi/sg.c 	SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
sdp               464 drivers/scsi/sg.c 		if (atomic_read(&sdp->detaching)) {
sdp               473 drivers/scsi/sg.c 			(atomic_read(&sdp->detaching) ||
sdp               475 drivers/scsi/sg.c 		if (atomic_read(&sdp->detaching)) {
sdp               608 drivers/scsi/sg.c 	Sg_device *sdp;
sdp               620 drivers/scsi/sg.c 	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
sdp               622 drivers/scsi/sg.c 	SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
sdp               624 drivers/scsi/sg.c 	if (atomic_read(&sdp->detaching))
sdp               627 drivers/scsi/sg.c 	      scsi_block_when_processing_errors(sdp->device)))
sdp               644 drivers/scsi/sg.c 		SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sdp,
sdp               660 drivers/scsi/sg.c 	SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
sdp               793 drivers/scsi/sg.c 	Sg_device *sdp = sfp->parentdp;
sdp               821 drivers/scsi/sg.c 	if (atomic_read(&sdp->detaching)) {
sdp               842 drivers/scsi/sg.c 	blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
sdp               906 drivers/scsi/sg.c 	Sg_device *sdp;
sdp               911 drivers/scsi/sg.c 	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
sdp               914 drivers/scsi/sg.c 	SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
sdp               920 drivers/scsi/sg.c 		if (atomic_read(&sdp->detaching))
sdp               922 drivers/scsi/sg.c 		if (!scsi_block_when_processing_errors(sdp->device))
sdp               931 drivers/scsi/sg.c 			(srp_done(sfp, srp) || atomic_read(&sdp->detaching)));
sdp               932 drivers/scsi/sg.c 		if (atomic_read(&sdp->detaching))
sdp               968 drivers/scsi/sg.c 		return put_user((int) sdp->device->host->unchecked_isa_dma, ip);
sdp               975 drivers/scsi/sg.c 			if (atomic_read(&sdp->detaching))
sdp               977 drivers/scsi/sg.c 			__put_user((int) sdp->device->host->host_no,
sdp               979 drivers/scsi/sg.c 			__put_user((int) sdp->device->channel,
sdp               981 drivers/scsi/sg.c 			__put_user((int) sdp->device->id, &sg_idp->scsi_id);
sdp               982 drivers/scsi/sg.c 			__put_user((int) sdp->device->lun, &sg_idp->lun);
sdp               983 drivers/scsi/sg.c 			__put_user((int) sdp->device->type, &sg_idp->scsi_type);
sdp               984 drivers/scsi/sg.c 			__put_user((short) sdp->device->host->cmd_per_lun,
sdp               986 drivers/scsi/sg.c 			__put_user((short) sdp->device->queue_depth,
sdp              1023 drivers/scsi/sg.c 		return put_user(sdp->sg_tablesize, ip);
sdp              1031 drivers/scsi/sg.c 			    max_sectors_bytes(sdp->device->request_queue));
sdp              1047 drivers/scsi/sg.c 			    max_sectors_bytes(sdp->device->request_queue));
sdp              1077 drivers/scsi/sg.c 		val = (sdp->device ? 1 : 0);
sdp              1099 drivers/scsi/sg.c 		if (atomic_read(&sdp->detaching))
sdp              1101 drivers/scsi/sg.c 		return put_user(sdp->device->host->hostt->emulated, ip);
sdp              1103 drivers/scsi/sg.c 		if (atomic_read(&sdp->detaching))
sdp              1105 drivers/scsi/sg.c 		return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p);
sdp              1110 drivers/scsi/sg.c 		sdp->sgdebug = (char) val;
sdp              1113 drivers/scsi/sg.c 		return put_user(max_sectors_bytes(sdp->device->request_queue),
sdp              1116 drivers/scsi/sg.c 		return blk_trace_setup(sdp->device->request_queue,
sdp              1117 drivers/scsi/sg.c 				       sdp->disk->disk_name,
sdp              1118 drivers/scsi/sg.c 				       MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
sdp              1121 drivers/scsi/sg.c 		return blk_trace_startstop(sdp->device->request_queue, 1);
sdp              1123 drivers/scsi/sg.c 		return blk_trace_startstop(sdp->device->request_queue, 0);
sdp              1125 drivers/scsi/sg.c 		return blk_trace_remove(sdp->device->request_queue);
sdp              1131 drivers/scsi/sg.c 		if (atomic_read(&sdp->detaching))
sdp              1140 drivers/scsi/sg.c 	result = scsi_ioctl_block_when_processing_errors(sdp->device,
sdp              1144 drivers/scsi/sg.c 	return scsi_ioctl(sdp->device, cmd_in, p);
sdp              1150 drivers/scsi/sg.c 	Sg_device *sdp;
sdp              1154 drivers/scsi/sg.c 	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
sdp              1157 drivers/scsi/sg.c 	sdev = sdp->device;
sdp              1174 drivers/scsi/sg.c 	Sg_device *sdp;
sdp              1183 drivers/scsi/sg.c 	sdp = sfp->parentdp;
sdp              1184 drivers/scsi/sg.c 	if (!sdp)
sdp              1196 drivers/scsi/sg.c 	if (atomic_read(&sdp->detaching))
sdp              1203 drivers/scsi/sg.c 	SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
sdp              1211 drivers/scsi/sg.c 	Sg_device *sdp;
sdp              1214 drivers/scsi/sg.c 	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
sdp              1216 drivers/scsi/sg.c 	SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
sdp              1324 drivers/scsi/sg.c 	Sg_device *sdp;
sdp              1338 drivers/scsi/sg.c 	sdp = sfp->parentdp;
sdp              1339 drivers/scsi/sg.c 	if (unlikely(atomic_read(&sdp->detaching)))
sdp              1346 drivers/scsi/sg.c 	SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
sdp              1361 drivers/scsi/sg.c 		if ((sdp->sgdebug > 0) &&
sdp              1364 drivers/scsi/sg.c 			__scsi_print_sense(sdp->device, __func__, sense,
sdp              1372 drivers/scsi/sg.c 		    && sdp->device->removable) {
sdp              1375 drivers/scsi/sg.c 			sdp->device->changed = 1;
sdp              1441 drivers/scsi/sg.c 	Sg_device *sdp;
sdp              1446 drivers/scsi/sg.c 	sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL);
sdp              1447 drivers/scsi/sg.c 	if (!sdp) {
sdp              1456 drivers/scsi/sg.c 	error = idr_alloc(&sg_index_idr, sdp, 0, SG_MAX_DEVS, GFP_NOWAIT);
sdp              1476 drivers/scsi/sg.c 	sdp->disk = disk;
sdp              1477 drivers/scsi/sg.c 	sdp->device = scsidp;
sdp              1478 drivers/scsi/sg.c 	mutex_init(&sdp->open_rel_lock);
sdp              1479 drivers/scsi/sg.c 	INIT_LIST_HEAD(&sdp->sfds);
sdp              1480 drivers/scsi/sg.c 	init_waitqueue_head(&sdp->open_wait);
sdp              1481 drivers/scsi/sg.c 	atomic_set(&sdp->detaching, 0);
sdp              1482 drivers/scsi/sg.c 	rwlock_init(&sdp->sfd_lock);
sdp              1483 drivers/scsi/sg.c 	sdp->sg_tablesize = queue_max_segments(q);
sdp              1484 drivers/scsi/sg.c 	sdp->index = k;
sdp              1485 drivers/scsi/sg.c 	kref_init(&sdp->d_ref);
sdp              1493 drivers/scsi/sg.c 		kfree(sdp);
sdp              1496 drivers/scsi/sg.c 	return sdp;
sdp              1504 drivers/scsi/sg.c 	Sg_device *sdp = NULL;
sdp              1525 drivers/scsi/sg.c 	sdp = sg_alloc(disk, scsidp);
sdp              1526 drivers/scsi/sg.c 	if (IS_ERR(sdp)) {
sdp              1528 drivers/scsi/sg.c 		error = PTR_ERR(sdp);
sdp              1532 drivers/scsi/sg.c 	error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), 1);
sdp              1536 drivers/scsi/sg.c 	sdp->cdev = cdev;
sdp              1542 drivers/scsi/sg.c 						      sdp->index),
sdp              1543 drivers/scsi/sg.c 						sdp, "%s", disk->disk_name);
sdp              1553 drivers/scsi/sg.c 			       "to sg%d\n", __func__, sdp->index);
sdp              1558 drivers/scsi/sg.c 		    "type %d\n", sdp->index, scsidp->type);
sdp              1560 drivers/scsi/sg.c 	dev_set_drvdata(cl_dev, sdp);
sdp              1566 drivers/scsi/sg.c 	idr_remove(&sg_index_idr, sdp->index);
sdp              1568 drivers/scsi/sg.c 	kfree(sdp);
sdp              1580 drivers/scsi/sg.c 	struct sg_device *sdp = container_of(kref, struct sg_device, d_ref);
sdp              1589 drivers/scsi/sg.c 	idr_remove(&sg_index_idr, sdp->index);
sdp              1593 drivers/scsi/sg.c 		sg_printk(KERN_INFO, sdp, "sg_device_destroy\n"));
sdp              1595 drivers/scsi/sg.c 	put_disk(sdp->disk);
sdp              1596 drivers/scsi/sg.c 	kfree(sdp);
sdp              1603 drivers/scsi/sg.c 	Sg_device *sdp = dev_get_drvdata(cl_dev);
sdp              1608 drivers/scsi/sg.c 	if (!sdp)
sdp              1611 drivers/scsi/sg.c 	val = atomic_inc_return(&sdp->detaching);
sdp              1615 drivers/scsi/sg.c 	SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
sdp              1618 drivers/scsi/sg.c 	read_lock_irqsave(&sdp->sfd_lock, iflags);
sdp              1619 drivers/scsi/sg.c 	list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) {
sdp              1623 drivers/scsi/sg.c 	wake_up_interruptible_all(&sdp->open_wait);
sdp              1624 drivers/scsi/sg.c 	read_unlock_irqrestore(&sdp->sfd_lock, iflags);
sdp              1627 drivers/scsi/sg.c 	device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index));
sdp              1628 drivers/scsi/sg.c 	cdev_del(sdp->cdev);
sdp              1629 drivers/scsi/sg.c 	sdp->cdev = NULL;
sdp              1631 drivers/scsi/sg.c 	kref_put(&sdp->d_ref, sg_device_destroy);
sdp              1878 drivers/scsi/sg.c 	struct sg_device *sdp = sfp->parentdp;
sdp              1904 drivers/scsi/sg.c 	if (sdp->device->host->unchecked_isa_dma)
sdp              2147 drivers/scsi/sg.c sg_add_sfp(Sg_device * sdp)
sdp              2167 drivers/scsi/sg.c 	sfp->parentdp = sdp;
sdp              2168 drivers/scsi/sg.c 	write_lock_irqsave(&sdp->sfd_lock, iflags);
sdp              2169 drivers/scsi/sg.c 	if (atomic_read(&sdp->detaching)) {
sdp              2170 drivers/scsi/sg.c 		write_unlock_irqrestore(&sdp->sfd_lock, iflags);
sdp              2174 drivers/scsi/sg.c 	list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
sdp              2175 drivers/scsi/sg.c 	write_unlock_irqrestore(&sdp->sfd_lock, iflags);
sdp              2176 drivers/scsi/sg.c 	SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
sdp              2182 drivers/scsi/sg.c 			max_sectors_bytes(sdp->device->request_queue));
sdp              2184 drivers/scsi/sg.c 	SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
sdp              2189 drivers/scsi/sg.c 	kref_get(&sdp->d_ref);
sdp              2198 drivers/scsi/sg.c 	struct sg_device *sdp = sfp->parentdp;
sdp              2213 drivers/scsi/sg.c 		SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
sdp              2220 drivers/scsi/sg.c 	SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
sdp              2224 drivers/scsi/sg.c 	scsi_device_put(sdp->device);
sdp              2225 drivers/scsi/sg.c 	kref_put(&sdp->d_ref, sg_device_destroy);
sdp              2233 drivers/scsi/sg.c 	struct sg_device *sdp = sfp->parentdp;
sdp              2236 drivers/scsi/sg.c 	write_lock_irqsave(&sdp->sfd_lock, iflags);
sdp              2238 drivers/scsi/sg.c 	write_unlock_irqrestore(&sdp->sfd_lock, iflags);
sdp              2278 drivers/scsi/sg.c 	struct sg_device *sdp;
sdp              2282 drivers/scsi/sg.c 	sdp = sg_lookup_dev(dev);
sdp              2283 drivers/scsi/sg.c 	if (!sdp)
sdp              2284 drivers/scsi/sg.c 		sdp = ERR_PTR(-ENXIO);
sdp              2285 drivers/scsi/sg.c 	else if (atomic_read(&sdp->detaching)) {
sdp              2289 drivers/scsi/sg.c 		sdp = ERR_PTR(-ENODEV);
sdp              2291 drivers/scsi/sg.c 		kref_get(&sdp->d_ref);
sdp              2294 drivers/scsi/sg.c 	return sdp;
sdp              2474 drivers/scsi/sg.c 	Sg_device *sdp;
sdp              2479 drivers/scsi/sg.c 	sdp = it ? sg_lookup_dev(it->index) : NULL;
sdp              2480 drivers/scsi/sg.c 	if ((NULL == sdp) || (NULL == sdp->device) ||
sdp              2481 drivers/scsi/sg.c 	    (atomic_read(&sdp->detaching)))
sdp              2484 drivers/scsi/sg.c 		scsidp = sdp->device;
sdp              2500 drivers/scsi/sg.c 	Sg_device *sdp;
sdp              2505 drivers/scsi/sg.c 	sdp = it ? sg_lookup_dev(it->index) : NULL;
sdp              2506 drivers/scsi/sg.c 	scsidp = sdp ? sdp->device : NULL;
sdp              2507 drivers/scsi/sg.c 	if (sdp && scsidp && (!atomic_read(&sdp->detaching)))
sdp              2517 drivers/scsi/sg.c static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
sdp              2527 drivers/scsi/sg.c 	list_for_each_entry(fp, &sdp->sfds, sfd_siblings) {
sdp              2535 drivers/scsi/sg.c 			   (int) sdp->device->host->unchecked_isa_dma);
sdp              2583 drivers/scsi/sg.c 	Sg_device *sdp;
sdp              2591 drivers/scsi/sg.c 	sdp = it ? sg_lookup_dev(it->index) : NULL;
sdp              2592 drivers/scsi/sg.c 	if (NULL == sdp)
sdp              2594 drivers/scsi/sg.c 	read_lock(&sdp->sfd_lock);
sdp              2595 drivers/scsi/sg.c 	if (!list_empty(&sdp->sfds)) {
sdp              2596 drivers/scsi/sg.c 		seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
sdp              2597 drivers/scsi/sg.c 		if (atomic_read(&sdp->detaching))
sdp              2599 drivers/scsi/sg.c 		else if (sdp->device) {
sdp              2600 drivers/scsi/sg.c 			struct scsi_device *scsidp = sdp->device;
sdp              2609 drivers/scsi/sg.c 			   sdp->sg_tablesize, sdp->exclude, sdp->open_cnt);
sdp              2610 drivers/scsi/sg.c 		sg_proc_debug_helper(s, sdp);
sdp              2612 drivers/scsi/sg.c 	read_unlock(&sdp->sfd_lock);
sdp              7510 drivers/scsi/ufs/ufshcd.c ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
sdp              7527 drivers/scsi/ufs/ufshcd.c 	ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
sdp              7552 drivers/scsi/ufs/ufshcd.c 	struct scsi_device *sdp;
sdp              7557 drivers/scsi/ufs/ufshcd.c 	sdp = hba->sdev_ufs_device;
sdp              7558 drivers/scsi/ufs/ufshcd.c 	if (sdp) {
sdp              7559 drivers/scsi/ufs/ufshcd.c 		ret = scsi_device_get(sdp);
sdp              7560 drivers/scsi/ufs/ufshcd.c 		if (!ret && !scsi_device_online(sdp)) {
sdp              7562 drivers/scsi/ufs/ufshcd.c 			scsi_device_put(sdp);
sdp              7580 drivers/scsi/ufs/ufshcd.c 		ret = ufshcd_send_request_sense(hba, sdp);
sdp              7594 drivers/scsi/ufs/ufshcd.c 	ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
sdp              7597 drivers/scsi/ufs/ufshcd.c 		sdev_printk(KERN_WARNING, sdp,
sdp              7601 drivers/scsi/ufs/ufshcd.c 			scsi_print_sense_hdr(sdp, NULL, &sshdr);
sdp              7607 drivers/scsi/ufs/ufshcd.c 	scsi_device_put(sdp);
sdp                12 fs/gfs2/acl.h  #define GFS2_ACL_MAX_ENTRIES(sdp) ((300 << (sdp)->sd_sb.sb_bsize_shift) >> 12)
sdp                93 fs/gfs2/aops.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp                98 fs/gfs2/aops.c 	if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
sdp               159 fs/gfs2/aops.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp               167 fs/gfs2/aops.c 		gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize);
sdp               185 fs/gfs2/aops.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp               188 fs/gfs2/aops.c 	if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
sdp               212 fs/gfs2/aops.c 	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
sdp               222 fs/gfs2/aops.c 		set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
sdp               245 fs/gfs2/aops.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp               250 fs/gfs2/aops.c 	ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
sdp               319 fs/gfs2/aops.c 	gfs2_trans_end(sdp);
sdp               418 fs/gfs2/aops.c 	struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
sdp               423 fs/gfs2/aops.c 		gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
sdp               486 fs/gfs2/aops.c 	struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
sdp               500 fs/gfs2/aops.c 	if (unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags)))
sdp               604 fs/gfs2/aops.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp               617 fs/gfs2/aops.c 	if (unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags)))
sdp               628 fs/gfs2/aops.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp               629 fs/gfs2/aops.c 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
sdp               630 fs/gfs2/aops.c 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
sdp               631 fs/gfs2/aops.c 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
sdp               632 fs/gfs2/aops.c 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
sdp               636 fs/gfs2/aops.c 	if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
sdp               640 fs/gfs2/aops.c 	fs_total = gfs2_ri_total(sdp);
sdp               644 fs/gfs2/aops.c 	spin_lock(&sdp->sd_statfs_spin);
sdp               651 fs/gfs2/aops.c 	spin_unlock(&sdp->sd_statfs_spin);
sdp               652 fs/gfs2/aops.c 	fs_warn(sdp, "File system extended by %llu blocks.\n",
sdp               654 fs/gfs2/aops.c 	gfs2_statfs_change(sdp, new_free, new_free, 0);
sdp               658 fs/gfs2/aops.c 	update_statfs(sdp, m_bh, l_bh);
sdp               663 fs/gfs2/aops.c 	sdp->sd_rindex_uptodate = 0;
sdp               664 fs/gfs2/aops.c 	gfs2_trans_end(sdp);
sdp               707 fs/gfs2/aops.c static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
sdp               712 fs/gfs2/aops.c 	gfs2_log_lock(sdp);
sdp               725 fs/gfs2/aops.c 	gfs2_log_unlock(sdp);
sdp               732 fs/gfs2/aops.c 	struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
sdp               750 fs/gfs2/aops.c 			gfs2_discard(sdp, bh);
sdp               773 fs/gfs2/aops.c 	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
sdp               790 fs/gfs2/aops.c 	gfs2_log_lock(sdp);
sdp               791 fs/gfs2/aops.c 	spin_lock(&sdp->sd_ail_lock);
sdp               803 fs/gfs2/aops.c 	spin_unlock(&sdp->sd_ail_lock);
sdp               809 fs/gfs2/aops.c 			gfs2_assert_warn(sdp, bd->bd_bh == bh);
sdp               819 fs/gfs2/aops.c 	gfs2_log_unlock(sdp);
sdp               824 fs/gfs2/aops.c 	spin_unlock(&sdp->sd_ail_lock);
sdp               825 fs/gfs2/aops.c 	gfs2_log_unlock(sdp);
sdp               238 fs/gfs2/bmap.c static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
sdp               245 fs/gfs2/bmap.c 		mp->mp_list[i] = do_div(block, sdp->sd_inptrs);
sdp               393 fs/gfs2/bmap.c static sector_t metapath_to_block(struct gfs2_sbd *sdp, struct metapath *mp)
sdp               401 fs/gfs2/bmap.c 		factor *= sdp->sd_inptrs;
sdp               474 fs/gfs2/bmap.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp               487 fs/gfs2/bmap.c 		factor *= sdp->sd_inptrs;
sdp               497 fs/gfs2/bmap.c 		ptrs = (hgt >= 1 ? sdp->sd_inptrs : sdp->sd_diptrs) - start;
sdp               527 fs/gfs2/bmap.c 		factor *= sdp->sd_inptrs;
sdp               532 fs/gfs2/bmap.c 			if (mp->mp_list[hgt] >= sdp->sd_inptrs)
sdp               535 fs/gfs2/bmap.c 			if (mp->mp_list[hgt] >= sdp->sd_diptrs)
sdp               546 fs/gfs2/bmap.c 			do_div(factor, sdp->sd_inptrs);
sdp               667 fs/gfs2/bmap.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp               715 fs/gfs2/bmap.c 			gfs2_trans_remove_revoke(sdp, bn, n);
sdp               800 fs/gfs2/bmap.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp               813 fs/gfs2/bmap.c 			sdp->sd_inptrs : sdp->sd_diptrs;
sdp               847 fs/gfs2/bmap.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp               903 fs/gfs2/bmap.c 	while ((lblock + 1) * sdp->sd_sb.sb_bsize > sdp->sd_heightsize[height])
sdp               905 fs/gfs2/bmap.c 	find_metapath(sdp, lblock, mp, height);
sdp               993 fs/gfs2/bmap.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp              1000 fs/gfs2/bmap.c 	if (&ip->i_inode == sdp->sd_rindex) {
sdp              1001 fs/gfs2/bmap.c 		struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
sdp              1020 fs/gfs2/bmap.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp              1022 fs/gfs2/bmap.c 	if (&ip->i_inode == sdp->sd_rindex) {
sdp              1023 fs/gfs2/bmap.c 		struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
sdp              1034 fs/gfs2/bmap.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp              1038 fs/gfs2/bmap.c 	return gfs2_trans_begin(sdp, RES_DINODE + blocks, 0);
sdp              1047 fs/gfs2/bmap.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp              1055 fs/gfs2/bmap.c 	gfs2_trans_end(sdp);
sdp              1069 fs/gfs2/bmap.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp              1098 fs/gfs2/bmap.c 		if (inode == sdp->sd_rindex)
sdp              1102 fs/gfs2/bmap.c 		ret = gfs2_trans_begin(sdp, rblocks,
sdp              1121 fs/gfs2/bmap.c 				gfs2_trans_end(sdp);
sdp              1132 fs/gfs2/bmap.c 		gfs2_trans_end(sdp);
sdp              1140 fs/gfs2/bmap.c 	gfs2_trans_end(sdp);
sdp              1208 fs/gfs2/bmap.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp              1226 fs/gfs2/bmap.c 	if (inode == sdp->sd_rindex)
sdp              1374 fs/gfs2/bmap.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp              1375 fs/gfs2/bmap.c 	u64 max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
sdp              1398 fs/gfs2/bmap.c 		gfs2_trans_end(sdp);
sdp              1399 fs/gfs2/bmap.c 		error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
sdp              1410 fs/gfs2/bmap.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp              1417 fs/gfs2/bmap.c 		error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES);
sdp              1419 fs/gfs2/bmap.c 		error = gfs2_trans_begin(sdp, RES_DINODE, 0);
sdp              1455 fs/gfs2/bmap.c 		gfs2_trans_end(sdp);
sdp              1498 fs/gfs2/bmap.c 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp              1512 fs/gfs2/bmap.c 		gfs2_assert_withdraw(sdp,
sdp              1530 fs/gfs2/bmap.c 			rgd = gfs2_blk2rgrpd(sdp, bn, true);
sdp              1557 fs/gfs2/bmap.c 			if (isize_blks > atomic_read(&sdp->sd_log_thresh2))
sdp              1559 fs/gfs2/bmap.c 					atomic_read(&sdp->sd_log_thresh2);
sdp              1566 fs/gfs2/bmap.c 				revokes += sdp->sd_inptrs;
sdp              1567 fs/gfs2/bmap.c 			ret = gfs2_trans_begin(sdp, jblocks_rqsted, revokes);
sdp              1575 fs/gfs2/bmap.c 		    RES_QUOTA >= atomic_read(&sdp->sd_log_thresh2)) {
sdp              1636 fs/gfs2/bmap.c 			gfs2_trans_end(sdp);
sdp              1663 fs/gfs2/bmap.c static bool find_nonnull_ptr(struct gfs2_sbd *sdp, struct metapath *mp,
sdp              1720 fs/gfs2/bmap.c static inline bool walk_done(struct gfs2_sbd *sdp,
sdp              1732 fs/gfs2/bmap.c 		end = (height > 0) ? sdp->sd_inptrs : sdp->sd_diptrs;
sdp              1754 fs/gfs2/bmap.c 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp              1755 fs/gfs2/bmap.c 	u64 maxsize = sdp->sd_heightsize[ip->i_height];
sdp              1759 fs/gfs2/bmap.c 	unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
sdp              1807 fs/gfs2/bmap.c 		find_metapath(sdp, lend, &mp, ip->i_height);
sdp              1818 fs/gfs2/bmap.c 	find_metapath(sdp, lblock, &mp, ip->i_height);
sdp              1848 fs/gfs2/bmap.c 	ret = gfs2_rindex_update(sdp);
sdp              1865 fs/gfs2/bmap.c 			gfs2_assert_withdraw(sdp, bh);
sdp              1866 fs/gfs2/bmap.c 			if (gfs2_assert_withdraw(sdp,
sdp              1868 fs/gfs2/bmap.c 				fs_emerg(sdp, "inode %llu, block:%llu, i_h:%u,"
sdp              1875 fs/gfs2/bmap.c 			if (gfs2_metatype_check(sdp, bh,
sdp              1927 fs/gfs2/bmap.c 			if (walk_done(sdp, &mp, mp_h, end_list, end_aligned))
sdp              1932 fs/gfs2/bmap.c 			if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned)) {
sdp              1975 fs/gfs2/bmap.c 			if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned))
sdp              1985 fs/gfs2/bmap.c 			ret = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS +
sdp              1991 fs/gfs2/bmap.c 		gfs2_statfs_change(sdp, 0, +btotal, 0);
sdp              1998 fs/gfs2/bmap.c 		gfs2_trans_end(sdp);
sdp              2006 fs/gfs2/bmap.c 		gfs2_trans_end(sdp);
sdp              2017 fs/gfs2/bmap.c 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp              2021 fs/gfs2/bmap.c 	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
sdp              2046 fs/gfs2/bmap.c 	gfs2_trans_end(sdp);
sdp              2110 fs/gfs2/bmap.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp              2127 fs/gfs2/bmap.c 	error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT +
sdp              2130 fs/gfs2/bmap.c 				 (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ?
sdp              2152 fs/gfs2/bmap.c 	gfs2_trans_end(sdp);
sdp              2286 fs/gfs2/bmap.c int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
sdp              2292 fs/gfs2/bmap.c 	unsigned int shift = sdp->sd_sb.sb_bsize_shift;
sdp              2318 fs/gfs2/bmap.c 	fs_info(sdp, "journal %d mapped with %u extents in %lldms\n", jd->jd_jid,
sdp              2323 fs/gfs2/bmap.c 	fs_warn(sdp, "error %d mapping journal %u at offset %llu (extent %u)\n",
sdp              2327 fs/gfs2/bmap.c 	fs_warn(sdp, "bmap=%d lblock=%llu block=%llu, state=0x%08lx, size=%llu\n",
sdp              2346 fs/gfs2/bmap.c 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp              2361 fs/gfs2/bmap.c 	shift = sdp->sd_sb.sb_bsize_shift;
sdp              2363 fs/gfs2/bmap.c 	end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
sdp              2365 fs/gfs2/bmap.c 	lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
sdp              2366 fs/gfs2/bmap.c 	if (lblock_stop > end_of_file && ip != GFS2_I(sdp->sd_rindex))
sdp              2407 fs/gfs2/bmap.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp              2408 fs/gfs2/bmap.c 	loff_t max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
sdp              2432 fs/gfs2/bmap.c 		gfs2_trans_end(sdp);
sdp              2433 fs/gfs2/bmap.c 		error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
sdp              2444 fs/gfs2/bmap.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp              2448 fs/gfs2/bmap.c 		error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA,
sdp              2451 fs/gfs2/bmap.c 		error = gfs2_trans_begin(sdp, RES_DINODE, 0);
sdp              2493 fs/gfs2/bmap.c 		gfs2_trans_end(sdp);
sdp              2500 fs/gfs2/bmap.c 		gfs2_trans_end(sdp);
sdp                33 fs/gfs2/bmap.h 	const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp                37 fs/gfs2/bmap.h 	*data_blocks = (len >> sdp->sd_sb.sb_bsize_shift) + 3;
sdp                38 fs/gfs2/bmap.h 	*ind_blocks = 3 * (sdp->sd_max_height - 1);
sdp                40 fs/gfs2/bmap.h 	for (tmp = *data_blocks; tmp > sdp->sd_diptrs;) {
sdp                41 fs/gfs2/bmap.h 		tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
sdp                61 fs/gfs2/bmap.h extern int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd);
sdp                36 fs/gfs2/dentry.c 	struct gfs2_sbd *sdp;
sdp                48 fs/gfs2/dentry.c 	sdp = GFS2_SB(d_inode(parent));
sdp                58 fs/gfs2/dentry.c 	if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) {
sdp               158 fs/gfs2/dir.c  	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp               174 fs/gfs2/dir.c  	if (gfs2_assert_warn(sdp, gfs2_is_jdata(ip)))
sdp               184 fs/gfs2/dir.c  	o = do_div(lblock, sdp->sd_jbsize) + sizeof(struct gfs2_meta_header);
sdp               191 fs/gfs2/dir.c  		if (amount > sdp->sd_sb.sb_bsize - o)
sdp               192 fs/gfs2/dir.c  			amount = sdp->sd_sb.sb_bsize - o;
sdp               201 fs/gfs2/dir.c  			if (gfs2_assert_withdraw(sdp, dblock))
sdp               205 fs/gfs2/dir.c  		if (amount == sdp->sd_jbsize || new)
sdp               273 fs/gfs2/dir.c  	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp               283 fs/gfs2/dir.c  	if (gfs2_assert_warn(sdp, gfs2_is_jdata(ip)))
sdp               287 fs/gfs2/dir.c  	o = do_div(lblock, sdp->sd_jbsize) + sizeof(struct gfs2_meta_header);
sdp               295 fs/gfs2/dir.c  		if (amount > sdp->sd_sb.sb_bsize - o)
sdp               296 fs/gfs2/dir.c  			amount = sdp->sd_sb.sb_bsize - o;
sdp               311 fs/gfs2/dir.c  		error = gfs2_metatype_check(sdp, bh, GFS2_METATYPE_JD);
sdp               506 fs/gfs2/dir.c  static int gfs2_check_dirent(struct gfs2_sbd *sdp,
sdp               529 fs/gfs2/dir.c  	fs_warn(sdp, "%s: %s (%s)\n",
sdp               534 fs/gfs2/dir.c  static int gfs2_dirent_offset(struct gfs2_sbd *sdp, const void *buf)
sdp               553 fs/gfs2/dir.c  	fs_warn(sdp, "%s: wrong block type %u\n", __func__,
sdp               912 fs/gfs2/dir.c  	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp               934 fs/gfs2/dir.c  	gfs2_assert(sdp, dip->i_entries < BIT(16));
sdp               978 fs/gfs2/dir.c  	for (x = sdp->sd_hash_ptrs; x--; lp++)
sdp               981 fs/gfs2/dir.c  	i_size_write(inode, sdp->sd_sb.sb_bsize / 2);
sdp               985 fs/gfs2/dir.c  	for (x = sdp->sd_hash_ptrs, y = -1; x; x >>= 1, y++) ;
sdp              1338 fs/gfs2/dir.c  static int gfs2_set_cookies(struct gfs2_sbd *sdp, struct buffer_head *bh,
sdp              1351 fs/gfs2/dir.c  		if (!sdp->sd_args.ar_loccookie)
sdp              1354 fs/gfs2/dir.c  			(bh->b_data + gfs2_dirent_offset(sdp, bh->b_data));
sdp              1356 fs/gfs2/dir.c  		offset += leaf_nr * sdp->sd_max_dents_per_leaf;
sdp              1376 fs/gfs2/dir.c  	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp              1400 fs/gfs2/dir.c  	if (*depth < GFS2_DIR_MAX_DEPTH || !sdp->sd_args.ar_loccookie) {
sdp              1438 fs/gfs2/dir.c  				fs_warn(sdp, "Number of entries corrupt in dir "
sdp              1448 fs/gfs2/dir.c  			sort_id = gfs2_set_cookies(sdp, bh, leaf, &darr[offset],
sdp              1572 fs/gfs2/dir.c  	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp              1607 fs/gfs2/dir.c  			fs_warn(sdp, "Number of entries corrupt in dir %llu, "
sdp              1616 fs/gfs2/dir.c  		gfs2_set_cookies(sdp, dibh, 0, darr, dip->i_entries);
sdp              1975 fs/gfs2/dir.c  	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
sdp              1985 fs/gfs2/dir.c  	error = gfs2_rindex_update(sdp);
sdp              2032 fs/gfs2/dir.c  	error = gfs2_trans_begin(sdp,
sdp              2033 fs/gfs2/dir.c  			rg_blocks + (DIV_ROUND_UP(size, sdp->sd_jbsize) + 1) +
sdp              2053 fs/gfs2/dir.c  		rgd = gfs2_blk2rgrpd(sdp, blk, true);
sdp              2078 fs/gfs2/dir.c  	gfs2_trans_end(sdp);
sdp              2159 fs/gfs2/dir.c  	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp              2170 fs/gfs2/dir.c  		da->nr_blocks = sdp->sd_max_dirres;
sdp               134 fs/gfs2/export.c 	struct gfs2_sbd *sdp = sb->s_fs_info;
sdp               137 fs/gfs2/export.c 	inode = gfs2_lookup_by_inum(sdp, inum->no_addr, &inum->no_formal_ino,
sdp               221 fs/gfs2/file.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp               265 fs/gfs2/file.c 			gfs2_log_flush(sdp, ip->i_gl,
sdp               277 fs/gfs2/file.c 	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
sdp               291 fs/gfs2/file.c 	gfs2_trans_end(sdp);
sdp               334 fs/gfs2/file.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp               336 fs/gfs2/file.c 	if (copy_to_user(label, sdp->sd_sb.sb_locktable, GFS2_LOCKNAME_LEN))
sdp               398 fs/gfs2/file.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp               400 fs/gfs2/file.c 	size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift;
sdp               450 fs/gfs2/file.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp               487 fs/gfs2/file.c 	ret = gfs2_rindex_update(sdp);
sdp               507 fs/gfs2/file.c 	ret = gfs2_trans_begin(sdp, rblocks, 0);
sdp               536 fs/gfs2/file.c 	gfs2_trans_end(sdp);
sdp               969 fs/gfs2/file.c 	const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp               970 fs/gfs2/file.c 	unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
sdp               972 fs/gfs2/file.c 	for (tmp = max_data; tmp > sdp->sd_diptrs;) {
sdp               973 fs/gfs2/file.c 		tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
sdp               979 fs/gfs2/file.c 	*len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
sdp               989 fs/gfs2/file.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp               997 fs/gfs2/file.c 	loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
sdp               998 fs/gfs2/file.c 	loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
sdp              1001 fs/gfs2/file.c 	next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
sdp              1006 fs/gfs2/file.c 	bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
sdp              1011 fs/gfs2/file.c 		bytes = sdp->sd_sb.sb_bsize;
sdp              1068 fs/gfs2/file.c 		error = gfs2_trans_begin(sdp, rblocks,
sdp              1074 fs/gfs2/file.c 		gfs2_trans_end(sdp);
sdp              1105 fs/gfs2/file.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp              1113 fs/gfs2/file.c 	if (gfs2_is_jdata(ip) && inode != sdp->sd_rindex)
sdp              1187 fs/gfs2/file.c 	struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
sdp              1188 fs/gfs2/file.c 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
sdp              1200 fs/gfs2/file.c 	if (unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags))) {
sdp                52 fs/gfs2/glock.c 	struct gfs2_sbd *sdp;		/* incore superblock           */
sdp               138 fs/gfs2/glock.c 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
sdp               145 fs/gfs2/glock.c 	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
sdp               146 fs/gfs2/glock.c 		wake_up(&sdp->sd_glock_wait);
sdp               239 fs/gfs2/glock.c 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
sdp               249 fs/gfs2/glock.c 	sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
sdp               309 fs/gfs2/glock.c 		struct gfs2_sbd *sdp = gh->gh_gl->gl_name.ln_sbd;
sdp               311 fs/gfs2/glock.c 		wake_up(&sdp->sd_async_glock_wait);
sdp               548 fs/gfs2/glock.c 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
sdp               552 fs/gfs2/glock.c 	if (unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags)) &&
sdp               578 fs/gfs2/glock.c 	if (sdp->sd_lockstruct.ls_ops->lm_lock)	{
sdp               580 fs/gfs2/glock.c 		ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
sdp               583 fs/gfs2/glock.c 		    test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
sdp               588 fs/gfs2/glock.c 			fs_err(sdp, "lm_lock ret %d\n", ret);
sdp               590 fs/gfs2/glock.c 						   &sdp->sd_flags));
sdp               678 fs/gfs2/glock.c 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
sdp               688 fs/gfs2/glock.c 	inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
sdp               791 fs/gfs2/glock.c int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
sdp               795 fs/gfs2/glock.c 	struct super_block *s = sdp->sd_vfs;
sdp               798 fs/gfs2/glock.c 				    .ln_sbd = sdp };
sdp               830 fs/gfs2/glock.c 	atomic_inc(&sdp->sd_glock_disposal);
sdp               842 fs/gfs2/glock.c 	gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
sdp               876 fs/gfs2/glock.c 	atomic_dec(&sdp->sd_glock_disposal);
sdp               989 fs/gfs2/glock.c 	struct gfs2_sbd *sdp = ghs[0].gh_gl->gl_name.ln_sbd;
sdp              1003 fs/gfs2/glock.c 	if (!wait_event_timeout(sdp->sd_async_glock_wait,
sdp              1113 fs/gfs2/glock.c 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
sdp              1160 fs/gfs2/glock.c 		if (sdp->sd_lockstruct.ls_ops->lm_cancel)
sdp              1161 fs/gfs2/glock.c 			sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
sdp              1167 fs/gfs2/glock.c 	fs_err(sdp, "original: %pSR\n", (void *)gh2->gh_ip);
sdp              1168 fs/gfs2/glock.c 	fs_err(sdp, "pid: %d\n", pid_nr(gh2->gh_owner_pid));
sdp              1169 fs/gfs2/glock.c 	fs_err(sdp, "lock type: %d req lock state : %d\n",
sdp              1171 fs/gfs2/glock.c 	fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip);
sdp              1172 fs/gfs2/glock.c 	fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid));
sdp              1173 fs/gfs2/glock.c 	fs_err(sdp, "lock type: %d req lock state : %d\n",
sdp              1191 fs/gfs2/glock.c 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
sdp              1194 fs/gfs2/glock.c 	if (unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags)))
sdp              1308 fs/gfs2/glock.c int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
sdp              1315 fs/gfs2/glock.c 	error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
sdp              1645 fs/gfs2/glock.c static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
sdp              1656 fs/gfs2/glock.c 			if (gl->gl_name.ln_sbd == sdp &&
sdp              1705 fs/gfs2/glock.c void gfs2_glock_thaw(struct gfs2_sbd *sdp)
sdp              1707 fs/gfs2/glock.c 	glock_hash_walk(thaw_glock, sdp);
sdp              1730 fs/gfs2/glock.c void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
sdp              1732 fs/gfs2/glock.c 	set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
sdp              1734 fs/gfs2/glock.c 	glock_hash_walk(clear_glock, sdp);
sdp              1736 fs/gfs2/glock.c 	wait_event_timeout(sdp->sd_glock_wait,
sdp              1737 fs/gfs2/glock.c 			   atomic_read(&sdp->sd_glock_disposal) == 0,
sdp              1739 fs/gfs2/glock.c 	glock_hash_walk(dump_glock_func, sdp);
sdp              1888 fs/gfs2/glock.c 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
sdp              1889 fs/gfs2/glock.c 	char fs_id_buf[sizeof(sdp->sd_fsname) + 7];
sdp              1892 fs/gfs2/glock.c 	if (fsid && sdp) /* safety precaution */
sdp              1893 fs/gfs2/glock.c 		sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname);
sdp              1963 fs/gfs2/glock.c 	struct gfs2_sbd *sdp = seq->private;
sdp              1976 fs/gfs2/glock.c                 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
sdp              2053 fs/gfs2/glock.c 		if (gl->gl_name.ln_sbd != gi->sdp)
sdp              2171 fs/gfs2/glock.c 		gi->sdp = inode->i_private;
sdp              2241 fs/gfs2/glock.c void gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
sdp              2243 fs/gfs2/glock.c 	sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
sdp              2245 fs/gfs2/glock.c 	debugfs_create_file("glocks", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
sdp              2248 fs/gfs2/glock.c 	debugfs_create_file("glstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
sdp              2251 fs/gfs2/glock.c 	debugfs_create_file("sbstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
sdp              2255 fs/gfs2/glock.c void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
sdp              2257 fs/gfs2/glock.c 	debugfs_remove_recursive(sdp->debugfs_dir);
sdp              2258 fs/gfs2/glock.c 	sdp->debugfs_dir = NULL;
sdp               122 fs/gfs2/glock.h 	int (*lm_mount) (struct gfs2_sbd *sdp, const char *table);
sdp               123 fs/gfs2/glock.h 	void (*lm_first_done) (struct gfs2_sbd *sdp);
sdp               124 fs/gfs2/glock.h 	void (*lm_recovery_result) (struct gfs2_sbd *sdp, unsigned int jid,
sdp               126 fs/gfs2/glock.h 	void (*lm_unmount) (struct gfs2_sbd *sdp);
sdp               127 fs/gfs2/glock.h 	void (*lm_withdraw) (struct gfs2_sbd *sdp);
sdp               179 fs/gfs2/glock.h extern int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
sdp               197 fs/gfs2/glock.h extern int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
sdp               238 fs/gfs2/glock.h extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
sdp               240 fs/gfs2/glock.h extern void gfs2_glock_thaw(struct gfs2_sbd *sdp);
sdp               247 fs/gfs2/glock.h extern void gfs2_create_debugfs_file(struct gfs2_sbd *sdp);
sdp               248 fs/gfs2/glock.h extern void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp);
sdp                56 fs/gfs2/glops.c 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
sdp                62 fs/gfs2/glops.c 	gfs2_log_lock(sdp);
sdp                63 fs/gfs2/glops.c 	spin_lock(&sdp->sd_ail_lock);
sdp                73 fs/gfs2/glops.c 		gfs2_trans_add_revoke(sdp, bd);
sdp                77 fs/gfs2/glops.c 	spin_unlock(&sdp->sd_ail_lock);
sdp                78 fs/gfs2/glops.c 	gfs2_log_unlock(sdp);
sdp                84 fs/gfs2/glops.c 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
sdp               108 fs/gfs2/glops.c 		gfs2_log_lock(sdp);
sdp               109 fs/gfs2/glops.c 		have_revokes = !list_empty(&sdp->sd_log_revokes);
sdp               110 fs/gfs2/glops.c 		log_in_flight = atomic_read(&sdp->sd_log_in_flight);
sdp               111 fs/gfs2/glops.c 		gfs2_log_unlock(sdp);
sdp               115 fs/gfs2/glops.c 			log_flush_wait(sdp);
sdp               122 fs/gfs2/glops.c 	tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
sdp               124 fs/gfs2/glops.c 	if (gfs2_log_reserve(sdp, tr.tr_reserved) < 0)
sdp               131 fs/gfs2/glops.c 	gfs2_trans_end(sdp);
sdp               133 fs/gfs2/glops.c 	gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
sdp               139 fs/gfs2/glops.c 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
sdp               141 fs/gfs2/glops.c 	unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
sdp               148 fs/gfs2/glops.c 		max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
sdp               150 fs/gfs2/glops.c 	ret = gfs2_trans_begin(sdp, 0, max_revokes);
sdp               154 fs/gfs2/glops.c 	gfs2_trans_end(sdp);
sdp               155 fs/gfs2/glops.c 	gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
sdp               170 fs/gfs2/glops.c 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
sdp               171 fs/gfs2/glops.c 	struct address_space *mapping = &sdp->sd_aspace;
sdp               185 fs/gfs2/glops.c 	gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
sdp               211 fs/gfs2/glops.c 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
sdp               212 fs/gfs2/glops.c 	struct address_space *mapping = &sdp->sd_aspace;
sdp               219 fs/gfs2/glops.c 	gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
sdp               353 fs/gfs2/glops.c 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
sdp               355 fs/gfs2/glops.c 	if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
sdp               455 fs/gfs2/glops.c 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
sdp               474 fs/gfs2/glops.c 		spin_lock(&sdp->sd_trunc_lock);
sdp               476 fs/gfs2/glops.c 			list_add(&ip->i_trunc_list, &sdp->sd_trunc_list);
sdp               477 fs/gfs2/glops.c 		spin_unlock(&sdp->sd_trunc_lock);
sdp               478 fs/gfs2/glops.c 		wake_up(&sdp->sd_quota_wait);
sdp               527 fs/gfs2/glops.c 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
sdp               530 fs/gfs2/glops.c 	    test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
sdp               531 fs/gfs2/glops.c 		atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
sdp               532 fs/gfs2/glops.c 		error = freeze_super(sdp->sd_vfs);
sdp               534 fs/gfs2/glops.c 			fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
sdp               536 fs/gfs2/glops.c 			gfs2_assert_withdraw(sdp, 0);
sdp               538 fs/gfs2/glops.c 		queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
sdp               539 fs/gfs2/glops.c 		gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
sdp               552 fs/gfs2/glops.c 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
sdp               553 fs/gfs2/glops.c 	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
sdp               558 fs/gfs2/glops.c 	if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
sdp               561 fs/gfs2/glops.c 		error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
sdp               563 fs/gfs2/glops.c 			gfs2_consist(sdp);
sdp               565 fs/gfs2/glops.c 			gfs2_consist(sdp);
sdp               568 fs/gfs2/glops.c 		if (!test_bit(SDF_WITHDRAWN, &sdp->sd_flags)) {
sdp               569 fs/gfs2/glops.c 			sdp->sd_log_sequence = head.lh_sequence + 1;
sdp               570 fs/gfs2/glops.c 			gfs2_log_pointers_init(sdp, head.lh_blkno);
sdp               597 fs/gfs2/glops.c 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
sdp               599 fs/gfs2/glops.c 	if (!remote || sb_rdonly(sdp->sd_vfs))
sdp                52 fs/gfs2/incore.h 	void (*lo_before_commit) (struct gfs2_sbd *sdp, struct gfs2_trans *tr);
sdp                53 fs/gfs2/incore.h 	void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_trans *tr);
sdp               864 fs/gfs2/incore.h 	const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
sdp               866 fs/gfs2/incore.h 	this_cpu_ptr(sdp->sd_lkstats)->lkstats[gl->gl_name.ln_type].stats[which]++;
sdp                79 fs/gfs2/inode.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp                84 fs/gfs2/inode.c 		if (gfs2_localflocks(sdp))
sdp                90 fs/gfs2/inode.c 		if (gfs2_localflocks(sdp))
sdp               138 fs/gfs2/inode.c 		struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp               141 fs/gfs2/inode.c 		error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
sdp               146 fs/gfs2/inode.c 		error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
sdp               162 fs/gfs2/inode.c 				error = gfs2_check_blk_type(sdp, no_addr,
sdp               215 fs/gfs2/inode.c struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
sdp               218 fs/gfs2/inode.c 	struct super_block *sb = sdp->sd_vfs;
sdp               374 fs/gfs2/inode.c 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp               386 fs/gfs2/inode.c 	error = gfs2_trans_begin(sdp, (*dblocks * RES_RG_BIT) + RES_STATFS + RES_QUOTA, 0);
sdp               395 fs/gfs2/inode.c 	gfs2_trans_end(sdp);
sdp               432 fs/gfs2/inode.c 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp               442 fs/gfs2/inode.c 	ea->ea_rec_len = cpu_to_be32(sdp->sd_jbsize);
sdp               516 fs/gfs2/inode.c 	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
sdp               529 fs/gfs2/inode.c 		error = gfs2_trans_begin(sdp, gfs2_trans_da_blks(dip, da, 2), 0);
sdp               533 fs/gfs2/inode.c 		error = gfs2_trans_begin(sdp, RES_LEAF + 2 * RES_DINODE, 0);
sdp               540 fs/gfs2/inode.c 	gfs2_trans_end(sdp);
sdp               587 fs/gfs2/inode.c 	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
sdp               601 fs/gfs2/inode.c 	error = gfs2_rindex_update(sdp);
sdp               640 fs/gfs2/inode.c 	inode = new_inode(sdp->sd_vfs);
sdp               673 fs/gfs2/inode.c 		    gfs2_tune_get(sdp, gt_new_files_jdata))
sdp               690 fs/gfs2/inode.c 	if ((GFS2_I(d_inode(sdp->sd_root_dir)) == dip) ||
sdp               703 fs/gfs2/inode.c 	error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
sdp               713 fs/gfs2/inode.c 	error = gfs2_trans_begin(sdp, blocks, 0);
sdp               722 fs/gfs2/inode.c 	gfs2_trans_end(sdp);
sdp               724 fs/gfs2/inode.c 	error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
sdp               897 fs/gfs2/inode.c 	struct gfs2_sbd *sdp = GFS2_SB(dir);
sdp               971 fs/gfs2/inode.c 		error = gfs2_trans_begin(sdp, gfs2_trans_da_blks(dip, &da, 2), 0);
sdp               975 fs/gfs2/inode.c 		error = gfs2_trans_begin(sdp, 2 * RES_DINODE + RES_LEAF, 0);
sdp               998 fs/gfs2/inode.c 	gfs2_trans_end(sdp);
sdp              1100 fs/gfs2/inode.c 	struct gfs2_sbd *sdp = GFS2_SB(dir);
sdp              1107 fs/gfs2/inode.c 	error = gfs2_rindex_update(sdp);
sdp              1116 fs/gfs2/inode.c 	rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1);
sdp              1149 fs/gfs2/inode.c 	error = gfs2_trans_begin(sdp, 2*RES_DINODE + 3*RES_LEAF + RES_RG_BIT, 0);
sdp              1154 fs/gfs2/inode.c 	gfs2_trans_end(sdp);
sdp              1350 fs/gfs2/inode.c 	struct gfs2_sbd *sdp = GFS2_SB(odir);
sdp              1367 fs/gfs2/inode.c 	error = gfs2_rindex_update(sdp);
sdp              1376 fs/gfs2/inode.c 		error = gfs2_glock_nq_init(sdp->sd_rename_gl, LM_ST_EXCLUSIVE,
sdp              1420 fs/gfs2/inode.c 		nrgd = gfs2_blk2rgrpd(sdp, nip->i_no_addr, 1);
sdp              1521 fs/gfs2/inode.c 		error = gfs2_trans_begin(sdp, gfs2_trans_da_blks(ndip, &da, 4) +
sdp              1526 fs/gfs2/inode.c 		error = gfs2_trans_begin(sdp, 4 * RES_DINODE +
sdp              1550 fs/gfs2/inode.c 	gfs2_trans_end(sdp);
sdp              1593 fs/gfs2/inode.c 	struct gfs2_sbd *sdp = GFS2_SB(odir);
sdp              1602 fs/gfs2/inode.c 	error = gfs2_rindex_update(sdp);
sdp              1607 fs/gfs2/inode.c 		error = gfs2_glock_nq_init(sdp->sd_rename_gl, LM_ST_EXCLUSIVE,
sdp              1671 fs/gfs2/inode.c 	error = gfs2_trans_begin(sdp, 4 * RES_DINODE + 4 * RES_LEAF, 0);
sdp              1707 fs/gfs2/inode.c 	gfs2_trans_end(sdp);
sdp              1867 fs/gfs2/inode.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp              1887 fs/gfs2/inode.c 	error = gfs2_rindex_update(sdp);
sdp              1904 fs/gfs2/inode.c 	error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_QUOTA, 0);
sdp              1919 fs/gfs2/inode.c 	gfs2_trans_end(sdp);
sdp                30 fs/gfs2/inode.h static inline bool gfs2_is_ordered(const struct gfs2_sbd *sdp)
sdp                32 fs/gfs2/inode.h 	return sdp->sd_args.ar_data == GFS2_DATA_ORDERED;
sdp                35 fs/gfs2/inode.h static inline bool gfs2_is_writeback(const struct gfs2_sbd *sdp)
sdp                37 fs/gfs2/inode.h 	return sdp->sd_args.ar_data == GFS2_DATA_WRITEBACK;
sdp                94 fs/gfs2/inode.h extern struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
sdp               122 fs/gfs2/inode.h static inline int gfs2_localflocks(const struct gfs2_sbd *sdp)
sdp               124 fs/gfs2/inode.h 	return sdp->sd_args.ar_localflocks;
sdp               130 fs/gfs2/inode.h static inline int gfs2_localflocks(const struct gfs2_sbd *sdp)
sdp               185 fs/gfs2/lock_dlm.c static int make_mode(struct gfs2_sbd *sdp, const unsigned int lmstate)
sdp               197 fs/gfs2/lock_dlm.c 	fs_err(sdp, "unknown LM state %d\n", lmstate);
sdp               281 fs/gfs2/lock_dlm.c 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
sdp               282 fs/gfs2/lock_dlm.c 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
sdp               301 fs/gfs2/lock_dlm.c 	if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
sdp               310 fs/gfs2/lock_dlm.c 		fs_err(sdp, "gdlm_unlock %x,%llx err=%d\n",
sdp               495 fs/gfs2/lock_dlm.c static int sync_unlock(struct gfs2_sbd *sdp, struct dlm_lksb *lksb, char *name)
sdp               497 fs/gfs2/lock_dlm.c 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
sdp               502 fs/gfs2/lock_dlm.c 		fs_err(sdp, "%s lkid %x error %d\n",
sdp               510 fs/gfs2/lock_dlm.c 		fs_err(sdp, "%s lkid %x status %d\n",
sdp               517 fs/gfs2/lock_dlm.c static int sync_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags,
sdp               520 fs/gfs2/lock_dlm.c 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
sdp               531 fs/gfs2/lock_dlm.c 		fs_err(sdp, "%s lkid %x flags %x mode %d error %d\n",
sdp               541 fs/gfs2/lock_dlm.c 		fs_err(sdp, "%s lkid %x flags %x mode %d status %d\n",
sdp               548 fs/gfs2/lock_dlm.c static int mounted_unlock(struct gfs2_sbd *sdp)
sdp               550 fs/gfs2/lock_dlm.c 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
sdp               551 fs/gfs2/lock_dlm.c 	return sync_unlock(sdp, &ls->ls_mounted_lksb, "mounted_lock");
sdp               554 fs/gfs2/lock_dlm.c static int mounted_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags)
sdp               556 fs/gfs2/lock_dlm.c 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
sdp               557 fs/gfs2/lock_dlm.c 	return sync_lock(sdp, mode, flags, GFS2_MOUNTED_LOCK,
sdp               561 fs/gfs2/lock_dlm.c static int control_unlock(struct gfs2_sbd *sdp)
sdp               563 fs/gfs2/lock_dlm.c 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
sdp               564 fs/gfs2/lock_dlm.c 	return sync_unlock(sdp, &ls->ls_control_lksb, "control_lock");
sdp               567 fs/gfs2/lock_dlm.c static int control_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags)
sdp               569 fs/gfs2/lock_dlm.c 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
sdp               570 fs/gfs2/lock_dlm.c 	return sync_lock(sdp, mode, flags, GFS2_CONTROL_LOCK,
sdp               576 fs/gfs2/lock_dlm.c 	struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work);
sdp               577 fs/gfs2/lock_dlm.c 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
sdp               625 fs/gfs2/lock_dlm.c 	error = control_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_VALBLK);
sdp               627 fs/gfs2/lock_dlm.c 		fs_err(sdp, "control lock EX error %d\n", error);
sdp               636 fs/gfs2/lock_dlm.c 		fs_info(sdp, "recover generation %u block1 %u %u\n",
sdp               639 fs/gfs2/lock_dlm.c 		control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT);
sdp               708 fs/gfs2/lock_dlm.c 	error = control_lock(sdp, DLM_LOCK_NL, flags);
sdp               710 fs/gfs2/lock_dlm.c 		fs_err(sdp, "control lock NL error %d\n", error);
sdp               723 fs/gfs2/lock_dlm.c 			fs_info(sdp, "recover generation %u jid %d\n",
sdp               725 fs/gfs2/lock_dlm.c 			gfs2_recover_set(sdp, i);
sdp               743 fs/gfs2/lock_dlm.c 		fs_info(sdp, "recover generation %u done\n", start_gen);
sdp               744 fs/gfs2/lock_dlm.c 		gfs2_glock_thaw(sdp);
sdp               746 fs/gfs2/lock_dlm.c 		fs_info(sdp, "recover generation %u block2 %u %u\n",
sdp               752 fs/gfs2/lock_dlm.c static int control_mount(struct gfs2_sbd *sdp)
sdp               754 fs/gfs2/lock_dlm.c 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
sdp               768 fs/gfs2/lock_dlm.c 	error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_VALBLK);
sdp               770 fs/gfs2/lock_dlm.c 		fs_err(sdp, "control_mount control_lock NL error %d\n", error);
sdp               774 fs/gfs2/lock_dlm.c 	error = mounted_lock(sdp, DLM_LOCK_NL, 0);
sdp               776 fs/gfs2/lock_dlm.c 		fs_err(sdp, "control_mount mounted_lock NL error %d\n", error);
sdp               777 fs/gfs2/lock_dlm.c 		control_unlock(sdp);
sdp               794 fs/gfs2/lock_dlm.c 		error = mounted_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT);
sdp               814 fs/gfs2/lock_dlm.c 	error = control_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE|DLM_LKF_VALBLK);
sdp               818 fs/gfs2/lock_dlm.c 		fs_err(sdp, "control_mount control_lock EX error %d\n", error);
sdp               826 fs/gfs2/lock_dlm.c 	if (sdp->sd_args.ar_spectator)
sdp               829 fs/gfs2/lock_dlm.c 	error = mounted_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE);
sdp               834 fs/gfs2/lock_dlm.c 		fs_err(sdp, "control_mount mounted_lock EX error %d\n", error);
sdp               838 fs/gfs2/lock_dlm.c 	error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE);
sdp               844 fs/gfs2/lock_dlm.c 		fs_err(sdp, "control_mount mounted_lock PR error %d\n", error);
sdp               864 fs/gfs2/lock_dlm.c 		fs_err(sdp, "control_mount control_lock disabled\n");
sdp               876 fs/gfs2/lock_dlm.c 		fs_info(sdp, "first mounter control generation %u\n", lvb_gen);
sdp               880 fs/gfs2/lock_dlm.c 	error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT);
sdp               892 fs/gfs2/lock_dlm.c 		fs_info(sdp, "control_mount wait for journal recovery\n");
sdp               904 fs/gfs2/lock_dlm.c 		if (sdp->sd_args.ar_spectator) {
sdp               905 fs/gfs2/lock_dlm.c 			fs_info(sdp, "Recovery is required. Waiting for a "
sdp               909 fs/gfs2/lock_dlm.c 			fs_info(sdp, "control_mount wait1 block %u start %u "
sdp               921 fs/gfs2/lock_dlm.c 		fs_info(sdp, "control_mount wait2 block %u start %u mount %u "
sdp               930 fs/gfs2/lock_dlm.c 		fs_info(sdp, "control_mount wait3 block %u start %u mount %u "
sdp               945 fs/gfs2/lock_dlm.c 	mounted_unlock(sdp);
sdp               946 fs/gfs2/lock_dlm.c 	control_unlock(sdp);
sdp               950 fs/gfs2/lock_dlm.c static int control_first_done(struct gfs2_sbd *sdp)
sdp               952 fs/gfs2/lock_dlm.c 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
sdp               965 fs/gfs2/lock_dlm.c 		fs_err(sdp, "control_first_done start %u block %u flags %lx\n",
sdp               968 fs/gfs2/lock_dlm.c 		control_unlock(sdp);
sdp               981 fs/gfs2/lock_dlm.c 		fs_info(sdp, "control_first_done wait gen %u\n", start_gen);
sdp               997 fs/gfs2/lock_dlm.c 	error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT);
sdp               999 fs/gfs2/lock_dlm.c 		fs_err(sdp, "control_first_done mounted PR error %d\n", error);
sdp              1001 fs/gfs2/lock_dlm.c 	error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT|DLM_LKF_VALBLK);
sdp              1003 fs/gfs2/lock_dlm.c 		fs_err(sdp, "control_first_done control NL error %d\n", error);
sdp              1016 fs/gfs2/lock_dlm.c static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots,
sdp              1019 fs/gfs2/lock_dlm.c 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
sdp              1079 fs/gfs2/lock_dlm.c 	struct gfs2_sbd *sdp = arg;
sdp              1080 fs/gfs2/lock_dlm.c 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
sdp              1100 fs/gfs2/lock_dlm.c 	struct gfs2_sbd *sdp = arg;
sdp              1101 fs/gfs2/lock_dlm.c 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
sdp              1106 fs/gfs2/lock_dlm.c 		fs_err(sdp, "recover_slot jid %d gen %u short size %d\n",
sdp              1113 fs/gfs2/lock_dlm.c 		fs_info(sdp, "recover_slot jid %d gen %u prev %u\n",
sdp              1125 fs/gfs2/lock_dlm.c 	struct gfs2_sbd *sdp = arg;
sdp              1126 fs/gfs2/lock_dlm.c 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
sdp              1129 fs/gfs2/lock_dlm.c 	set_recover_size(sdp, slots, num_slots);
sdp              1140 fs/gfs2/lock_dlm.c 		queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
sdp              1150 fs/gfs2/lock_dlm.c static void gdlm_recovery_result(struct gfs2_sbd *sdp, unsigned int jid,
sdp              1153 fs/gfs2/lock_dlm.c 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
sdp              1168 fs/gfs2/lock_dlm.c 		fs_err(sdp, "recovery_result jid %d short size %d\n",
sdp              1174 fs/gfs2/lock_dlm.c 	fs_info(sdp, "recover jid %d result %s\n", jid,
sdp              1184 fs/gfs2/lock_dlm.c 		queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work,
sdp              1195 fs/gfs2/lock_dlm.c static int gdlm_mount(struct gfs2_sbd *sdp, const char *table)
sdp              1197 fs/gfs2/lock_dlm.c 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
sdp              1207 fs/gfs2/lock_dlm.c 	INIT_DELAYED_WORK(&sdp->sd_control_work, gfs2_control_func);
sdp              1218 fs/gfs2/lock_dlm.c 	error = set_recover_size(sdp, NULL, 0);
sdp              1228 fs/gfs2/lock_dlm.c 		fs_info(sdp, "no fsname found\n");
sdp              1243 fs/gfs2/lock_dlm.c 				  &gdlm_lockspace_ops, sdp, &ops_result,
sdp              1246 fs/gfs2/lock_dlm.c 		fs_err(sdp, "dlm_new_lockspace error %d\n", error);
sdp              1255 fs/gfs2/lock_dlm.c 		fs_info(sdp, "dlm lockspace ops not used\n");
sdp              1261 fs/gfs2/lock_dlm.c 	if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags)) {
sdp              1262 fs/gfs2/lock_dlm.c 		fs_err(sdp, "dlm lockspace ops disallow jid preset\n");
sdp              1272 fs/gfs2/lock_dlm.c 	error = control_mount(sdp);
sdp              1274 fs/gfs2/lock_dlm.c 		fs_err(sdp, "mount control error %d\n", error);
sdp              1279 fs/gfs2/lock_dlm.c 	clear_bit(SDF_NOJOURNALID, &sdp->sd_flags);
sdp              1281 fs/gfs2/lock_dlm.c 	wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID);
sdp              1292 fs/gfs2/lock_dlm.c static void gdlm_first_done(struct gfs2_sbd *sdp)
sdp              1294 fs/gfs2/lock_dlm.c 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
sdp              1300 fs/gfs2/lock_dlm.c 	error = control_first_done(sdp);
sdp              1302 fs/gfs2/lock_dlm.c 		fs_err(sdp, "mount first_done error %d\n", error);
sdp              1305 fs/gfs2/lock_dlm.c static void gdlm_unmount(struct gfs2_sbd *sdp)
sdp              1307 fs/gfs2/lock_dlm.c 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
sdp              1317 fs/gfs2/lock_dlm.c 	flush_delayed_work(&sdp->sd_control_work);
sdp                46 fs/gfs2/log.c  unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
sdp                53 fs/gfs2/log.c  	first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize;
sdp                56 fs/gfs2/log.c  		second = (sdp->sd_sb.sb_bsize -
sdp                90 fs/gfs2/log.c  static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
sdp                94 fs/gfs2/log.c  __releases(&sdp->sd_ail_lock)
sdp                95 fs/gfs2/log.c  __acquires(&sdp->sd_ail_lock)
sdp               105 fs/gfs2/log.c  		gfs2_assert(sdp, bd->bd_tr == tr);
sdp               110 fs/gfs2/log.c  					      &sdp->sd_flags)) {
sdp               111 fs/gfs2/log.c  				gfs2_io_error_bh(sdp, bh);
sdp               127 fs/gfs2/log.c  		spin_unlock(&sdp->sd_ail_lock);
sdp               129 fs/gfs2/log.c  		spin_lock(&sdp->sd_ail_lock);
sdp               148 fs/gfs2/log.c  void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
sdp               150 fs/gfs2/log.c  	struct list_head *head = &sdp->sd_ail1_list;
sdp               155 fs/gfs2/log.c  	trace_gfs2_ail_flush(sdp, wbc, 1);
sdp               157 fs/gfs2/log.c  	spin_lock(&sdp->sd_ail_lock);
sdp               162 fs/gfs2/log.c  		if (gfs2_ail1_start_one(sdp, wbc, tr, &withdraw))
sdp               165 fs/gfs2/log.c  	spin_unlock(&sdp->sd_ail_lock);
sdp               168 fs/gfs2/log.c  		gfs2_lm_withdraw(sdp, NULL);
sdp               169 fs/gfs2/log.c  	trace_gfs2_ail_flush(sdp, wbc, 0);
sdp               177 fs/gfs2/log.c  static void gfs2_ail1_start(struct gfs2_sbd *sdp)
sdp               186 fs/gfs2/log.c  	return gfs2_ail1_flush(sdp, &wbc);
sdp               196 fs/gfs2/log.c  static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
sdp               205 fs/gfs2/log.c  		gfs2_assert(sdp, bd->bd_tr == tr);
sdp               209 fs/gfs2/log.c  		    !test_and_set_bit(SDF_AIL1_IO_ERROR, &sdp->sd_flags)) {
sdp               210 fs/gfs2/log.c  			gfs2_io_error_bh(sdp, bh);
sdp               224 fs/gfs2/log.c  static int gfs2_ail1_empty(struct gfs2_sbd *sdp)
sdp               231 fs/gfs2/log.c  	spin_lock(&sdp->sd_ail_lock);
sdp               232 fs/gfs2/log.c  	list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) {
sdp               233 fs/gfs2/log.c  		gfs2_ail1_empty_one(sdp, tr, &withdraw);
sdp               235 fs/gfs2/log.c  			list_move(&tr->tr_list, &sdp->sd_ail2_list);
sdp               239 fs/gfs2/log.c  	ret = list_empty(&sdp->sd_ail1_list);
sdp               240 fs/gfs2/log.c  	spin_unlock(&sdp->sd_ail_lock);
sdp               243 fs/gfs2/log.c  		gfs2_lm_withdraw(sdp, "fatal: I/O error(s)\n");
sdp               248 fs/gfs2/log.c  static void gfs2_ail1_wait(struct gfs2_sbd *sdp)
sdp               254 fs/gfs2/log.c  	spin_lock(&sdp->sd_ail_lock);
sdp               255 fs/gfs2/log.c  	list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
sdp               261 fs/gfs2/log.c  			spin_unlock(&sdp->sd_ail_lock);
sdp               267 fs/gfs2/log.c  	spin_unlock(&sdp->sd_ail_lock);
sdp               277 fs/gfs2/log.c  static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
sdp               285 fs/gfs2/log.c  		gfs2_assert(sdp, bd->bd_tr == tr);
sdp               290 fs/gfs2/log.c  static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
sdp               293 fs/gfs2/log.c  	unsigned int old_tail = sdp->sd_log_tail;
sdp               297 fs/gfs2/log.c  	spin_lock(&sdp->sd_ail_lock);
sdp               299 fs/gfs2/log.c  	list_for_each_entry_safe(tr, safe, &sdp->sd_ail2_list, tr_list) {
sdp               306 fs/gfs2/log.c  		gfs2_ail2_empty_one(sdp, tr);
sdp               308 fs/gfs2/log.c  		gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
sdp               309 fs/gfs2/log.c  		gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
sdp               313 fs/gfs2/log.c  	spin_unlock(&sdp->sd_ail_lock);
sdp               323 fs/gfs2/log.c  void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
sdp               326 fs/gfs2/log.c  	atomic_add(blks, &sdp->sd_log_blks_free);
sdp               327 fs/gfs2/log.c  	trace_gfs2_log_blocks(sdp, blks);
sdp               328 fs/gfs2/log.c  	gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
sdp               329 fs/gfs2/log.c  				  sdp->sd_jdesc->jd_blocks);
sdp               330 fs/gfs2/log.c  	up_read(&sdp->sd_log_flush_lock);
sdp               353 fs/gfs2/log.c  int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
sdp               356 fs/gfs2/log.c  	unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize);
sdp               362 fs/gfs2/log.c  	if (gfs2_assert_warn(sdp, blks) ||
sdp               363 fs/gfs2/log.c  	    gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
sdp               365 fs/gfs2/log.c  	atomic_add(blks, &sdp->sd_log_blks_needed);
sdp               367 fs/gfs2/log.c  	free_blocks = atomic_read(&sdp->sd_log_blks_free);
sdp               370 fs/gfs2/log.c  			prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait,
sdp               372 fs/gfs2/log.c  			wake_up(&sdp->sd_logd_waitq);
sdp               374 fs/gfs2/log.c  			if (atomic_read(&sdp->sd_log_blks_free) <= wanted)
sdp               376 fs/gfs2/log.c  			free_blocks = atomic_read(&sdp->sd_log_blks_free);
sdp               378 fs/gfs2/log.c  		finish_wait(&sdp->sd_log_waitq, &wait);
sdp               380 fs/gfs2/log.c  	atomic_inc(&sdp->sd_reserving_log);
sdp               381 fs/gfs2/log.c  	if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks,
sdp               383 fs/gfs2/log.c  		if (atomic_dec_and_test(&sdp->sd_reserving_log))
sdp               384 fs/gfs2/log.c  			wake_up(&sdp->sd_reserving_log_wait);
sdp               387 fs/gfs2/log.c  	atomic_sub(blks, &sdp->sd_log_blks_needed);
sdp               388 fs/gfs2/log.c  	trace_gfs2_log_blocks(sdp, -blks);
sdp               395 fs/gfs2/log.c  		wake_up(&sdp->sd_log_waitq);
sdp               397 fs/gfs2/log.c  	down_read(&sdp->sd_log_flush_lock);
sdp               398 fs/gfs2/log.c  	if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
sdp               399 fs/gfs2/log.c  		gfs2_log_release(sdp, blks);
sdp               402 fs/gfs2/log.c  	if (atomic_dec_and_test(&sdp->sd_reserving_log))
sdp               403 fs/gfs2/log.c  		wake_up(&sdp->sd_reserving_log_wait);
sdp               419 fs/gfs2/log.c  static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
sdp               426 fs/gfs2/log.c  		dist += sdp->sd_jdesc->jd_blocks;
sdp               456 fs/gfs2/log.c  static unsigned int calc_reserved(struct gfs2_sbd *sdp)
sdp               461 fs/gfs2/log.c  	struct gfs2_trans *tr = sdp->sd_log_tr;
sdp               468 fs/gfs2/log.c  		reserved += DIV_ROUND_UP(mbuf, buf_limit(sdp));
sdp               469 fs/gfs2/log.c  		reserved += DIV_ROUND_UP(dbuf, databuf_limit(sdp));
sdp               472 fs/gfs2/log.c  	if (sdp->sd_log_commited_revoke > 0)
sdp               473 fs/gfs2/log.c  		reserved += gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke,
sdp               481 fs/gfs2/log.c  static unsigned int current_tail(struct gfs2_sbd *sdp)
sdp               486 fs/gfs2/log.c  	spin_lock(&sdp->sd_ail_lock);
sdp               488 fs/gfs2/log.c  	if (list_empty(&sdp->sd_ail1_list)) {
sdp               489 fs/gfs2/log.c  		tail = sdp->sd_log_head;
sdp               491 fs/gfs2/log.c  		tr = list_entry(sdp->sd_ail1_list.prev, struct gfs2_trans,
sdp               496 fs/gfs2/log.c  	spin_unlock(&sdp->sd_ail_lock);
sdp               501 fs/gfs2/log.c  static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
sdp               503 fs/gfs2/log.c  	unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
sdp               505 fs/gfs2/log.c  	ail2_empty(sdp, new_tail);
sdp               507 fs/gfs2/log.c  	atomic_add(dist, &sdp->sd_log_blks_free);
sdp               508 fs/gfs2/log.c  	trace_gfs2_log_blocks(sdp, dist);
sdp               509 fs/gfs2/log.c  	gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
sdp               510 fs/gfs2/log.c  			     sdp->sd_jdesc->jd_blocks);
sdp               512 fs/gfs2/log.c  	sdp->sd_log_tail = new_tail;
sdp               516 fs/gfs2/log.c  void log_flush_wait(struct gfs2_sbd *sdp)
sdp               520 fs/gfs2/log.c  	if (atomic_read(&sdp->sd_log_in_flight)) {
sdp               522 fs/gfs2/log.c  			prepare_to_wait(&sdp->sd_log_flush_wait, &wait,
sdp               524 fs/gfs2/log.c  			if (atomic_read(&sdp->sd_log_in_flight))
sdp               526 fs/gfs2/log.c  		} while(atomic_read(&sdp->sd_log_in_flight));
sdp               527 fs/gfs2/log.c  		finish_wait(&sdp->sd_log_flush_wait, &wait);
sdp               545 fs/gfs2/log.c  static void gfs2_ordered_write(struct gfs2_sbd *sdp)
sdp               550 fs/gfs2/log.c  	spin_lock(&sdp->sd_ordered_lock);
sdp               551 fs/gfs2/log.c  	list_sort(NULL, &sdp->sd_log_ordered, &ip_cmp);
sdp               552 fs/gfs2/log.c  	while (!list_empty(&sdp->sd_log_ordered)) {
sdp               553 fs/gfs2/log.c  		ip = list_entry(sdp->sd_log_ordered.next, struct gfs2_inode, i_ordered);
sdp               560 fs/gfs2/log.c  		spin_unlock(&sdp->sd_ordered_lock);
sdp               562 fs/gfs2/log.c  		spin_lock(&sdp->sd_ordered_lock);
sdp               564 fs/gfs2/log.c  	list_splice(&written, &sdp->sd_log_ordered);
sdp               565 fs/gfs2/log.c  	spin_unlock(&sdp->sd_ordered_lock);
sdp               568 fs/gfs2/log.c  static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
sdp               572 fs/gfs2/log.c  	spin_lock(&sdp->sd_ordered_lock);
sdp               573 fs/gfs2/log.c  	while (!list_empty(&sdp->sd_log_ordered)) {
sdp               574 fs/gfs2/log.c  		ip = list_entry(sdp->sd_log_ordered.next, struct gfs2_inode, i_ordered);
sdp               579 fs/gfs2/log.c  		spin_unlock(&sdp->sd_ordered_lock);
sdp               581 fs/gfs2/log.c  		spin_lock(&sdp->sd_ordered_lock);
sdp               583 fs/gfs2/log.c  	spin_unlock(&sdp->sd_ordered_lock);
sdp               588 fs/gfs2/log.c  	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp               590 fs/gfs2/log.c  	spin_lock(&sdp->sd_ordered_lock);
sdp               593 fs/gfs2/log.c  	spin_unlock(&sdp->sd_ordered_lock);
sdp               596 fs/gfs2/log.c  void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
sdp               601 fs/gfs2/log.c  	sdp->sd_log_num_revoke++;
sdp               609 fs/gfs2/log.c  	list_add(&bd->bd_list, &sdp->sd_log_revokes);
sdp               620 fs/gfs2/log.c  void gfs2_write_revokes(struct gfs2_sbd *sdp)
sdp               625 fs/gfs2/log.c  	int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
sdp               627 fs/gfs2/log.c  	gfs2_ail1_empty(sdp);
sdp               628 fs/gfs2/log.c  	spin_lock(&sdp->sd_ail_lock);
sdp               629 fs/gfs2/log.c  	list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
sdp               638 fs/gfs2/log.c  	spin_unlock(&sdp->sd_ail_lock);
sdp               641 fs/gfs2/log.c  	while (sdp->sd_log_num_revoke > max_revokes)
sdp               642 fs/gfs2/log.c  		max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
sdp               643 fs/gfs2/log.c  	max_revokes -= sdp->sd_log_num_revoke;
sdp               644 fs/gfs2/log.c  	if (!sdp->sd_log_num_revoke) {
sdp               645 fs/gfs2/log.c  		atomic_dec(&sdp->sd_log_blks_free);
sdp               648 fs/gfs2/log.c  		if (!sdp->sd_log_blks_reserved)
sdp               649 fs/gfs2/log.c  			atomic_dec(&sdp->sd_log_blks_free);
sdp               651 fs/gfs2/log.c  	gfs2_log_lock(sdp);
sdp               652 fs/gfs2/log.c  	spin_lock(&sdp->sd_ail_lock);
sdp               653 fs/gfs2/log.c  	list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
sdp               659 fs/gfs2/log.c  			gfs2_add_revoke(sdp, bd);
sdp               664 fs/gfs2/log.c  	spin_unlock(&sdp->sd_ail_lock);
sdp               665 fs/gfs2/log.c  	gfs2_log_unlock(sdp);
sdp               667 fs/gfs2/log.c  	if (!sdp->sd_log_num_revoke) {
sdp               668 fs/gfs2/log.c  		atomic_inc(&sdp->sd_log_blks_free);
sdp               669 fs/gfs2/log.c  		if (!sdp->sd_log_blks_reserved)
sdp               670 fs/gfs2/log.c  			atomic_inc(&sdp->sd_log_blks_free);
sdp               687 fs/gfs2/log.c  void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
sdp               694 fs/gfs2/log.c  	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
sdp               696 fs/gfs2/log.c  	struct super_block *sb = sdp->sd_vfs;
sdp               706 fs/gfs2/log.c  	lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
sdp               718 fs/gfs2/log.c  		dblock = gfs2_log_bmap(sdp);
sdp               721 fs/gfs2/log.c  		if (gfs2_assert_withdraw(sdp, ret == 0))
sdp               732 fs/gfs2/log.c  			cpu_to_be64(GFS2_I(sdp->sd_sc_inode)->i_no_addr);
sdp               734 fs/gfs2/log.c  			cpu_to_be64(GFS2_I(sdp->sd_qc_inode)->i_no_addr);
sdp               736 fs/gfs2/log.c  		spin_lock(&sdp->sd_statfs_spin);
sdp               740 fs/gfs2/log.c  		spin_unlock(&sdp->sd_statfs_spin);
sdp               749 fs/gfs2/log.c  	gfs2_log_write(sdp, page, sb->s_blocksize, 0, dblock);
sdp               750 fs/gfs2/log.c  	gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE | op_flags);
sdp               751 fs/gfs2/log.c  	log_flush_wait(sdp);
sdp               762 fs/gfs2/log.c  static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
sdp               766 fs/gfs2/log.c  	enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
sdp               768 fs/gfs2/log.c  	gfs2_assert_withdraw(sdp, (state != SFS_FROZEN));
sdp               769 fs/gfs2/log.c  	tail = current_tail(sdp);
sdp               771 fs/gfs2/log.c  	if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
sdp               772 fs/gfs2/log.c  		gfs2_ordered_wait(sdp);
sdp               773 fs/gfs2/log.c  		log_flush_wait(sdp);
sdp               776 fs/gfs2/log.c  	sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
sdp               777 fs/gfs2/log.c  	gfs2_write_log_header(sdp, sdp->sd_jdesc, sdp->sd_log_sequence++, tail,
sdp               778 fs/gfs2/log.c  			      sdp->sd_log_flush_head, flags, op_flags);
sdp               780 fs/gfs2/log.c  	if (sdp->sd_log_tail != tail)
sdp               781 fs/gfs2/log.c  		log_pull_tail(sdp, tail);
sdp               792 fs/gfs2/log.c  void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
sdp               795 fs/gfs2/log.c  	enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
sdp               797 fs/gfs2/log.c  	down_write(&sdp->sd_log_flush_lock);
sdp               801 fs/gfs2/log.c  		up_write(&sdp->sd_log_flush_lock);
sdp               804 fs/gfs2/log.c  	trace_gfs2_log_flush(sdp, 1, flags);
sdp               807 fs/gfs2/log.c  		clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
sdp               809 fs/gfs2/log.c  	sdp->sd_log_flush_head = sdp->sd_log_head;
sdp               810 fs/gfs2/log.c  	tr = sdp->sd_log_tr;
sdp               812 fs/gfs2/log.c  		sdp->sd_log_tr = NULL;
sdp               815 fs/gfs2/log.c  		tr->tr_first = sdp->sd_log_flush_head;
sdp               817 fs/gfs2/log.c  			gfs2_assert_withdraw(sdp, !tr->tr_num_buf_new && !tr->tr_num_databuf_new);
sdp               821 fs/gfs2/log.c  		gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
sdp               822 fs/gfs2/log.c  	gfs2_assert_withdraw(sdp,
sdp               823 fs/gfs2/log.c  			sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);
sdp               825 fs/gfs2/log.c  	gfs2_ordered_write(sdp);
sdp               826 fs/gfs2/log.c  	lops_before_commit(sdp, tr);
sdp               827 fs/gfs2/log.c  	gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE);
sdp               829 fs/gfs2/log.c  	if (sdp->sd_log_head != sdp->sd_log_flush_head) {
sdp               830 fs/gfs2/log.c  		log_flush_wait(sdp);
sdp               831 fs/gfs2/log.c  		log_write_header(sdp, flags);
sdp               832 fs/gfs2/log.c  	} else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
sdp               833 fs/gfs2/log.c  		atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
sdp               834 fs/gfs2/log.c  		trace_gfs2_log_blocks(sdp, -1);
sdp               835 fs/gfs2/log.c  		log_write_header(sdp, flags);
sdp               837 fs/gfs2/log.c  	lops_after_commit(sdp, tr);
sdp               839 fs/gfs2/log.c  	gfs2_log_lock(sdp);
sdp               840 fs/gfs2/log.c  	sdp->sd_log_head = sdp->sd_log_flush_head;
sdp               841 fs/gfs2/log.c  	sdp->sd_log_blks_reserved = 0;
sdp               842 fs/gfs2/log.c  	sdp->sd_log_commited_revoke = 0;
sdp               844 fs/gfs2/log.c  	spin_lock(&sdp->sd_ail_lock);
sdp               846 fs/gfs2/log.c  		list_add(&tr->tr_list, &sdp->sd_ail1_list);
sdp               849 fs/gfs2/log.c  	spin_unlock(&sdp->sd_ail_lock);
sdp               850 fs/gfs2/log.c  	gfs2_log_unlock(sdp);
sdp               853 fs/gfs2/log.c  		if (!sdp->sd_log_idle) {
sdp               855 fs/gfs2/log.c  				gfs2_ail1_start(sdp);
sdp               856 fs/gfs2/log.c  				gfs2_ail1_wait(sdp);
sdp               857 fs/gfs2/log.c  				if (gfs2_ail1_empty(sdp))
sdp               860 fs/gfs2/log.c  			atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
sdp               861 fs/gfs2/log.c  			trace_gfs2_log_blocks(sdp, -1);
sdp               862 fs/gfs2/log.c  			log_write_header(sdp, flags);
sdp               863 fs/gfs2/log.c  			sdp->sd_log_head = sdp->sd_log_flush_head;
sdp               867 fs/gfs2/log.c  			gfs2_log_shutdown(sdp);
sdp               869 fs/gfs2/log.c  			atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
sdp               872 fs/gfs2/log.c  	trace_gfs2_log_flush(sdp, 0, flags);
sdp               873 fs/gfs2/log.c  	up_write(&sdp->sd_log_flush_lock);
sdp               898 fs/gfs2/log.c  static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
sdp               904 fs/gfs2/log.c  	gfs2_log_lock(sdp);
sdp               906 fs/gfs2/log.c  	if (sdp->sd_log_tr) {
sdp               907 fs/gfs2/log.c  		gfs2_merge_trans(sdp->sd_log_tr, tr);
sdp               909 fs/gfs2/log.c  		gfs2_assert_withdraw(sdp, test_bit(TR_ALLOCED, &tr->tr_flags));
sdp               910 fs/gfs2/log.c  		sdp->sd_log_tr = tr;
sdp               914 fs/gfs2/log.c  	sdp->sd_log_commited_revoke += tr->tr_num_revoke;
sdp               915 fs/gfs2/log.c  	reserved = calc_reserved(sdp);
sdp               916 fs/gfs2/log.c  	maxres = sdp->sd_log_blks_reserved + tr->tr_reserved;
sdp               917 fs/gfs2/log.c  	gfs2_assert_withdraw(sdp, maxres >= reserved);
sdp               919 fs/gfs2/log.c  	atomic_add(unused, &sdp->sd_log_blks_free);
sdp               920 fs/gfs2/log.c  	trace_gfs2_log_blocks(sdp, unused);
sdp               921 fs/gfs2/log.c  	gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
sdp               922 fs/gfs2/log.c  			     sdp->sd_jdesc->jd_blocks);
sdp               923 fs/gfs2/log.c  	sdp->sd_log_blks_reserved = reserved;
sdp               925 fs/gfs2/log.c  	gfs2_log_unlock(sdp);
sdp               943 fs/gfs2/log.c  void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
sdp               945 fs/gfs2/log.c  	log_refund(sdp, tr);
sdp               947 fs/gfs2/log.c  	if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
sdp               948 fs/gfs2/log.c  	    ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
sdp               949 fs/gfs2/log.c  	    atomic_read(&sdp->sd_log_thresh2)))
sdp               950 fs/gfs2/log.c  		wake_up(&sdp->sd_logd_waitq);
sdp               959 fs/gfs2/log.c  void gfs2_log_shutdown(struct gfs2_sbd *sdp)
sdp               961 fs/gfs2/log.c  	gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
sdp               962 fs/gfs2/log.c  	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
sdp               963 fs/gfs2/log.c  	gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
sdp               965 fs/gfs2/log.c  	sdp->sd_log_flush_head = sdp->sd_log_head;
sdp               967 fs/gfs2/log.c  	log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT | GFS2_LFC_SHUTDOWN);
sdp               969 fs/gfs2/log.c  	gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
sdp               970 fs/gfs2/log.c  	gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
sdp               972 fs/gfs2/log.c  	sdp->sd_log_head = sdp->sd_log_flush_head;
sdp               973 fs/gfs2/log.c  	sdp->sd_log_tail = sdp->sd_log_head;
sdp               976 fs/gfs2/log.c  static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
sdp               978 fs/gfs2/log.c  	return (atomic_read(&sdp->sd_log_pinned) +
sdp               979 fs/gfs2/log.c  		atomic_read(&sdp->sd_log_blks_needed) >=
sdp               980 fs/gfs2/log.c  		atomic_read(&sdp->sd_log_thresh1));
sdp               983 fs/gfs2/log.c  static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
sdp               985 fs/gfs2/log.c  	unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
sdp               987 fs/gfs2/log.c  	if (test_and_clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags))
sdp               990 fs/gfs2/log.c  	return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >=
sdp               991 fs/gfs2/log.c  		atomic_read(&sdp->sd_log_thresh2);
sdp              1004 fs/gfs2/log.c  	struct gfs2_sbd *sdp = data;
sdp              1012 fs/gfs2/log.c  		if (sdp->sd_log_error) {
sdp              1013 fs/gfs2/log.c  			gfs2_lm_withdraw(sdp,
sdp              1017 fs/gfs2/log.c  					 sdp->sd_fsname, sdp->sd_log_error);
sdp              1021 fs/gfs2/log.c  		if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
sdp              1022 fs/gfs2/log.c  			gfs2_ail1_empty(sdp);
sdp              1023 fs/gfs2/log.c  			gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
sdp              1028 fs/gfs2/log.c  		if (gfs2_ail_flush_reqd(sdp)) {
sdp              1029 fs/gfs2/log.c  			gfs2_ail1_start(sdp);
sdp              1030 fs/gfs2/log.c  			gfs2_ail1_wait(sdp);
sdp              1031 fs/gfs2/log.c  			gfs2_ail1_empty(sdp);
sdp              1032 fs/gfs2/log.c  			gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
sdp              1037 fs/gfs2/log.c  		if (!gfs2_ail_flush_reqd(sdp) || did_flush)
sdp              1038 fs/gfs2/log.c  			wake_up(&sdp->sd_log_waitq);
sdp              1040 fs/gfs2/log.c  		t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
sdp              1045 fs/gfs2/log.c  			prepare_to_wait(&sdp->sd_logd_waitq, &wait,
sdp              1047 fs/gfs2/log.c  			if (!gfs2_ail_flush_reqd(sdp) &&
sdp              1048 fs/gfs2/log.c  			    !gfs2_jrnl_flush_reqd(sdp) &&
sdp              1051 fs/gfs2/log.c  		} while(t && !gfs2_ail_flush_reqd(sdp) &&
sdp              1052 fs/gfs2/log.c  			!gfs2_jrnl_flush_reqd(sdp) &&
sdp              1054 fs/gfs2/log.c  		finish_wait(&sdp->sd_logd_waitq, &wait);
sdp                22 fs/gfs2/log.h  static inline void gfs2_log_lock(struct gfs2_sbd *sdp)
sdp                23 fs/gfs2/log.h  __acquires(&sdp->sd_log_lock)
sdp                25 fs/gfs2/log.h  	spin_lock(&sdp->sd_log_lock);
sdp                34 fs/gfs2/log.h  static inline void gfs2_log_unlock(struct gfs2_sbd *sdp)
sdp                35 fs/gfs2/log.h  __releases(&sdp->sd_log_lock)
sdp                37 fs/gfs2/log.h  	spin_unlock(&sdp->sd_log_lock);
sdp                40 fs/gfs2/log.h  static inline void gfs2_log_pointers_init(struct gfs2_sbd *sdp,
sdp                43 fs/gfs2/log.h  	if (++value == sdp->sd_jdesc->jd_blocks) {
sdp                46 fs/gfs2/log.h  	sdp->sd_log_head = sdp->sd_log_tail = value;
sdp                51 fs/gfs2/log.h  	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp                53 fs/gfs2/log.h  	if (gfs2_is_jdata(ip) || !gfs2_is_ordered(sdp))
sdp                57 fs/gfs2/log.h  		spin_lock(&sdp->sd_ordered_lock);
sdp                59 fs/gfs2/log.h  			list_add(&ip->i_ordered, &sdp->sd_log_ordered);
sdp                60 fs/gfs2/log.h  		spin_unlock(&sdp->sd_ordered_lock);
sdp                64 fs/gfs2/log.h  extern unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
sdp                67 fs/gfs2/log.h  extern void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks);
sdp                68 fs/gfs2/log.h  extern int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks);
sdp                69 fs/gfs2/log.h  extern void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
sdp                72 fs/gfs2/log.h  extern void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
sdp                74 fs/gfs2/log.h  extern void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans);
sdp                75 fs/gfs2/log.h  extern void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc);
sdp                76 fs/gfs2/log.h  extern void log_flush_wait(struct gfs2_sbd *sdp);
sdp                78 fs/gfs2/log.h  extern void gfs2_log_shutdown(struct gfs2_sbd *sdp);
sdp                80 fs/gfs2/log.h  extern void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
sdp                82 fs/gfs2/log.h  extern void gfs2_write_revokes(struct gfs2_sbd *sdp);
sdp                41 fs/gfs2/lops.c void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
sdp                49 fs/gfs2/lops.c 		gfs2_assert_withdraw(sdp, 0);
sdp                51 fs/gfs2/lops.c 		gfs2_io_error_bh_wd(sdp, bh);
sdp                56 fs/gfs2/lops.c 	spin_lock(&sdp->sd_ail_lock);
sdp                59 fs/gfs2/lops.c 	spin_unlock(&sdp->sd_ail_lock);
sdp                61 fs/gfs2/lops.c 	atomic_inc(&sdp->sd_log_pinned);
sdp                73 fs/gfs2/lops.c 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
sdp                80 fs/gfs2/lops.c 	if (sdp->sd_args.ar_discard)
sdp                81 fs/gfs2/lops.c 		gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
sdp                98 fs/gfs2/lops.c static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
sdp               113 fs/gfs2/lops.c 	spin_lock(&sdp->sd_ail_lock);
sdp               124 fs/gfs2/lops.c 	spin_unlock(&sdp->sd_ail_lock);
sdp               129 fs/gfs2/lops.c 	atomic_dec(&sdp->sd_log_pinned);
sdp               132 fs/gfs2/lops.c static void gfs2_log_incr_head(struct gfs2_sbd *sdp)
sdp               134 fs/gfs2/lops.c 	BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
sdp               135 fs/gfs2/lops.c 	       (sdp->sd_log_flush_head != sdp->sd_log_head));
sdp               137 fs/gfs2/lops.c 	if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks)
sdp               138 fs/gfs2/lops.c 		sdp->sd_log_flush_head = 0;
sdp               141 fs/gfs2/lops.c u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
sdp               143 fs/gfs2/lops.c 	unsigned int lbn = sdp->sd_log_flush_head;
sdp               147 fs/gfs2/lops.c 	list_for_each_entry(je, &sdp->sd_jdesc->extent_list, list) {
sdp               150 fs/gfs2/lops.c 			gfs2_log_incr_head(sdp);
sdp               170 fs/gfs2/lops.c static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp,
sdp               205 fs/gfs2/lops.c 	struct gfs2_sbd *sdp = bio->bi_private;
sdp               211 fs/gfs2/lops.c 		fs_err(sdp, "Error %d writing to journal, jid=%u\n",
sdp               212 fs/gfs2/lops.c 		       bio->bi_status, sdp->sd_jdesc->jd_jid);
sdp               213 fs/gfs2/lops.c 		wake_up(&sdp->sd_logd_waitq);
sdp               219 fs/gfs2/lops.c 			gfs2_end_log_write_bh(sdp, bvec, bio->bi_status);
sdp               225 fs/gfs2/lops.c 	if (atomic_dec_and_test(&sdp->sd_log_in_flight))
sdp               226 fs/gfs2/lops.c 		wake_up(&sdp->sd_log_flush_wait);
sdp               242 fs/gfs2/lops.c 		struct gfs2_sbd *sdp = bio->bi_private;
sdp               243 fs/gfs2/lops.c 		atomic_inc(&sdp->sd_log_in_flight);
sdp               261 fs/gfs2/lops.c static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
sdp               264 fs/gfs2/lops.c 	struct super_block *sb = sdp->sd_vfs;
sdp               267 fs/gfs2/lops.c 	bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift;
sdp               270 fs/gfs2/lops.c 	bio->bi_private = sdp;
sdp               292 fs/gfs2/lops.c static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno,
sdp               302 fs/gfs2/lops.c 		nblk >>= sdp->sd_fsb2bb_shift;
sdp               308 fs/gfs2/lops.c 	*biop = gfs2_log_alloc_bio(sdp, blkno, end_io);
sdp               325 fs/gfs2/lops.c void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
sdp               331 fs/gfs2/lops.c 	bio = gfs2_log_get_bio(sdp, blkno, &sdp->sd_log_bio, REQ_OP_WRITE,
sdp               335 fs/gfs2/lops.c 		bio = gfs2_log_get_bio(sdp, blkno, &sdp->sd_log_bio,
sdp               352 fs/gfs2/lops.c static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
sdp               354 fs/gfs2/lops.c 	gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh),
sdp               355 fs/gfs2/lops.c 		       gfs2_log_bmap(sdp));
sdp               369 fs/gfs2/lops.c void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
sdp               371 fs/gfs2/lops.c 	struct super_block *sb = sdp->sd_vfs;
sdp               372 fs/gfs2/lops.c 	gfs2_log_write(sdp, page, sb->s_blocksize, 0,
sdp               373 fs/gfs2/lops.c 		       gfs2_log_bmap(sdp));
sdp               416 fs/gfs2/lops.c 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
sdp               422 fs/gfs2/lops.c 	for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) {
sdp               423 fs/gfs2/lops.c 		if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) {
sdp               501 fs/gfs2/lops.c 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
sdp               504 fs/gfs2/lops.c 	unsigned int bsize = sdp->sd_sb.sb_bsize, off;
sdp               505 fs/gfs2/lops.c 	unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
sdp               517 fs/gfs2/lops.c 		gfs2_map_journal_extents(sdp, jd);
sdp               536 fs/gfs2/lops.c 				sector_t sector = dblock << sdp->sd_fsb2bb_shift;
sdp               557 fs/gfs2/lops.c 			bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read);
sdp               595 fs/gfs2/lops.c static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
sdp               638 fs/gfs2/lops.c static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
sdp               649 fs/gfs2/lops.c 	gfs2_log_lock(sdp);
sdp               656 fs/gfs2/lops.c 		gfs2_log_unlock(sdp);
sdp               657 fs/gfs2/lops.c 		page = gfs2_get_log_desc(sdp,
sdp               661 fs/gfs2/lops.c 		gfs2_log_lock(sdp);
sdp               675 fs/gfs2/lops.c 		gfs2_log_unlock(sdp);
sdp               676 fs/gfs2/lops.c 		gfs2_log_write_page(sdp, page);
sdp               677 fs/gfs2/lops.c 		gfs2_log_lock(sdp);
sdp               682 fs/gfs2/lops.c 			gfs2_log_unlock(sdp);
sdp               697 fs/gfs2/lops.c 				gfs2_log_write_page(sdp, page);
sdp               699 fs/gfs2/lops.c 				gfs2_log_write_bh(sdp, bd2->bd_bh);
sdp               701 fs/gfs2/lops.c 			gfs2_log_lock(sdp);
sdp               709 fs/gfs2/lops.c 	gfs2_log_unlock(sdp);
sdp               712 fs/gfs2/lops.c static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
sdp               714 fs/gfs2/lops.c 	unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */
sdp               719 fs/gfs2/lops.c 	gfs2_before_commit(sdp, limit, nbuf, &tr->tr_buf, 0);
sdp               722 fs/gfs2/lops.c static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
sdp               734 fs/gfs2/lops.c 		gfs2_unpin(sdp, bd->bd_bh, tr);
sdp               753 fs/gfs2/lops.c 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
sdp               780 fs/gfs2/lops.c 		if (gfs2_meta_check(sdp, bh_ip))
sdp               789 fs/gfs2/lops.c 				rgd = gfs2_blk2rgrpd(sdp, blkno, false);
sdp               792 fs/gfs2/lops.c 					fs_info(sdp, "Replaying 0x%llx but we "
sdp               795 fs/gfs2/lops.c 					fs_info(sdp, "busy:%d, pinned:%d\n",
sdp               824 fs/gfs2/lops.c 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
sdp               828 fs/gfs2/lops.c 		mapping = &sdp->sd_aspace;
sdp               840 fs/gfs2/lops.c 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
sdp               851 fs/gfs2/lops.c 	fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
sdp               855 fs/gfs2/lops.c static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
sdp               859 fs/gfs2/lops.c 	struct list_head *head = &sdp->sd_log_revokes;
sdp               864 fs/gfs2/lops.c 	gfs2_write_revokes(sdp);
sdp               865 fs/gfs2/lops.c 	if (!sdp->sd_log_num_revoke)
sdp               868 fs/gfs2/lops.c 	length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, sizeof(u64));
sdp               869 fs/gfs2/lops.c 	page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
sdp               873 fs/gfs2/lops.c 		sdp->sd_log_num_revoke--;
sdp               875 fs/gfs2/lops.c 		if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
sdp               877 fs/gfs2/lops.c 			gfs2_log_write_page(sdp, page);
sdp               890 fs/gfs2/lops.c 	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
sdp               892 fs/gfs2/lops.c 	gfs2_log_write_page(sdp, page);
sdp               895 fs/gfs2/lops.c static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
sdp               897 fs/gfs2/lops.c 	struct list_head *head = &sdp->sd_log_revokes;
sdp               924 fs/gfs2/lops.c 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
sdp               944 fs/gfs2/lops.c 			gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
sdp               946 fs/gfs2/lops.c 		while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
sdp               972 fs/gfs2/lops.c 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
sdp               981 fs/gfs2/lops.c 	fs_info(sdp, "jid=%u: Found %u revoke tags\n",
sdp               992 fs/gfs2/lops.c static void databuf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
sdp               994 fs/gfs2/lops.c 	unsigned int limit = databuf_limit(sdp);
sdp               999 fs/gfs2/lops.c 	gfs2_before_commit(sdp, limit, nbuf, &tr->tr_databuf, 1);
sdp              1055 fs/gfs2/lops.c 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
sdp              1067 fs/gfs2/lops.c 	fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
sdp              1071 fs/gfs2/lops.c static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
sdp              1083 fs/gfs2/lops.c 		gfs2_unpin(sdp, bd->bd_bh, tr);
sdp                21 fs/gfs2/lops.h extern u64 gfs2_log_bmap(struct gfs2_sbd *sdp);
sdp                22 fs/gfs2/lops.h extern void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
sdp                24 fs/gfs2/lops.h extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page);
sdp                26 fs/gfs2/lops.h extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
sdp                30 fs/gfs2/lops.h static inline unsigned int buf_limit(struct gfs2_sbd *sdp)
sdp                34 fs/gfs2/lops.h 	limit = (sdp->sd_sb.sb_bsize - BUF_OFFSET) / sizeof(__be64);
sdp                38 fs/gfs2/lops.h static inline unsigned int databuf_limit(struct gfs2_sbd *sdp)
sdp                42 fs/gfs2/lops.h 	limit = (sdp->sd_sb.sb_bsize - DATABUF_OFFSET) / (2 * sizeof(__be64));
sdp                46 fs/gfs2/lops.h static inline void lops_before_commit(struct gfs2_sbd *sdp,
sdp                52 fs/gfs2/lops.h 			gfs2_log_ops[x]->lo_before_commit(sdp, tr);
sdp                55 fs/gfs2/lops.h static inline void lops_after_commit(struct gfs2_sbd *sdp,
sdp                61 fs/gfs2/lops.h 			gfs2_log_ops[x]->lo_after_commit(sdp, tr);
sdp               113 fs/gfs2/meta_io.c 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
sdp               121 fs/gfs2/meta_io.c 		mapping = &sdp->sd_aspace;
sdp               123 fs/gfs2/meta_io.c 	shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
sdp               142 fs/gfs2/meta_io.c 		create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);
sdp               150 fs/gfs2/meta_io.c 		map_bh(bh, sdp->sd_vfs, blkno);
sdp               250 fs/gfs2/meta_io.c 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
sdp               254 fs/gfs2/meta_io.c 	if (unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags))) {
sdp               293 fs/gfs2/meta_io.c 			gfs2_io_error_bh_wd(sdp, bh);
sdp               310 fs/gfs2/meta_io.c int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
sdp               312 fs/gfs2/meta_io.c 	if (unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags)))
sdp               320 fs/gfs2/meta_io.c 			gfs2_io_error_bh_wd(sdp, bh);
sdp               323 fs/gfs2/meta_io.c 	if (unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags)))
sdp               332 fs/gfs2/meta_io.c 	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
sdp               339 fs/gfs2/meta_io.c 		atomic_dec(&sdp->sd_log_pinned);
sdp               350 fs/gfs2/meta_io.c 		spin_lock(&sdp->sd_ail_lock);
sdp               352 fs/gfs2/meta_io.c 			gfs2_trans_add_revoke(sdp, bd);
sdp               357 fs/gfs2/meta_io.c 		spin_unlock(&sdp->sd_ail_lock);
sdp               373 fs/gfs2/meta_io.c 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp               380 fs/gfs2/meta_io.c 			gfs2_log_lock(sdp);
sdp               382 fs/gfs2/meta_io.c 			gfs2_log_unlock(sdp);
sdp               405 fs/gfs2/meta_io.c 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp               416 fs/gfs2/meta_io.c 	if (ret == 0 && gfs2_metatype_check(sdp, bh, mtype)) {
sdp               436 fs/gfs2/meta_io.c 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
sdp               438 fs/gfs2/meta_io.c 	u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
sdp               439 fs/gfs2/meta_io.c 			  sdp->sd_sb.sb_bsize_shift;
sdp                54 fs/gfs2/meta_io.h extern int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh);
sdp                65 fs/gfs2/ops_fstype.c void free_sbd(struct gfs2_sbd *sdp)
sdp                67 fs/gfs2/ops_fstype.c 	if (sdp->sd_lkstats)
sdp                68 fs/gfs2/ops_fstype.c 		free_percpu(sdp->sd_lkstats);
sdp                69 fs/gfs2/ops_fstype.c 	kfree(sdp);
sdp                74 fs/gfs2/ops_fstype.c 	struct gfs2_sbd *sdp;
sdp                77 fs/gfs2/ops_fstype.c 	sdp = kzalloc(sizeof(struct gfs2_sbd), GFP_KERNEL);
sdp                78 fs/gfs2/ops_fstype.c 	if (!sdp)
sdp                81 fs/gfs2/ops_fstype.c 	sdp->sd_vfs = sb;
sdp                82 fs/gfs2/ops_fstype.c 	sdp->sd_lkstats = alloc_percpu(struct gfs2_pcpu_lkstats);
sdp                83 fs/gfs2/ops_fstype.c 	if (!sdp->sd_lkstats)
sdp                85 fs/gfs2/ops_fstype.c 	sb->s_fs_info = sdp;
sdp                87 fs/gfs2/ops_fstype.c 	set_bit(SDF_NOJOURNALID, &sdp->sd_flags);
sdp                88 fs/gfs2/ops_fstype.c 	gfs2_tune_init(&sdp->sd_tune);
sdp                90 fs/gfs2/ops_fstype.c 	init_waitqueue_head(&sdp->sd_glock_wait);
sdp                91 fs/gfs2/ops_fstype.c 	init_waitqueue_head(&sdp->sd_async_glock_wait);
sdp                92 fs/gfs2/ops_fstype.c 	atomic_set(&sdp->sd_glock_disposal, 0);
sdp                93 fs/gfs2/ops_fstype.c 	init_completion(&sdp->sd_locking_init);
sdp                94 fs/gfs2/ops_fstype.c 	init_completion(&sdp->sd_wdack);
sdp                95 fs/gfs2/ops_fstype.c 	spin_lock_init(&sdp->sd_statfs_spin);
sdp                97 fs/gfs2/ops_fstype.c 	spin_lock_init(&sdp->sd_rindex_spin);
sdp                98 fs/gfs2/ops_fstype.c 	sdp->sd_rindex_tree.rb_node = NULL;
sdp               100 fs/gfs2/ops_fstype.c 	INIT_LIST_HEAD(&sdp->sd_jindex_list);
sdp               101 fs/gfs2/ops_fstype.c 	spin_lock_init(&sdp->sd_jindex_spin);
sdp               102 fs/gfs2/ops_fstype.c 	mutex_init(&sdp->sd_jindex_mutex);
sdp               103 fs/gfs2/ops_fstype.c 	init_completion(&sdp->sd_journal_ready);
sdp               105 fs/gfs2/ops_fstype.c 	INIT_LIST_HEAD(&sdp->sd_quota_list);
sdp               106 fs/gfs2/ops_fstype.c 	mutex_init(&sdp->sd_quota_mutex);
sdp               107 fs/gfs2/ops_fstype.c 	mutex_init(&sdp->sd_quota_sync_mutex);
sdp               108 fs/gfs2/ops_fstype.c 	init_waitqueue_head(&sdp->sd_quota_wait);
sdp               109 fs/gfs2/ops_fstype.c 	INIT_LIST_HEAD(&sdp->sd_trunc_list);
sdp               110 fs/gfs2/ops_fstype.c 	spin_lock_init(&sdp->sd_trunc_lock);
sdp               111 fs/gfs2/ops_fstype.c 	spin_lock_init(&sdp->sd_bitmap_lock);
sdp               113 fs/gfs2/ops_fstype.c 	mapping = &sdp->sd_aspace;
sdp               123 fs/gfs2/ops_fstype.c 	spin_lock_init(&sdp->sd_log_lock);
sdp               124 fs/gfs2/ops_fstype.c 	atomic_set(&sdp->sd_log_pinned, 0);
sdp               125 fs/gfs2/ops_fstype.c 	INIT_LIST_HEAD(&sdp->sd_log_revokes);
sdp               126 fs/gfs2/ops_fstype.c 	INIT_LIST_HEAD(&sdp->sd_log_ordered);
sdp               127 fs/gfs2/ops_fstype.c 	spin_lock_init(&sdp->sd_ordered_lock);
sdp               129 fs/gfs2/ops_fstype.c 	init_waitqueue_head(&sdp->sd_log_waitq);
sdp               130 fs/gfs2/ops_fstype.c 	init_waitqueue_head(&sdp->sd_logd_waitq);
sdp               131 fs/gfs2/ops_fstype.c 	spin_lock_init(&sdp->sd_ail_lock);
sdp               132 fs/gfs2/ops_fstype.c 	INIT_LIST_HEAD(&sdp->sd_ail1_list);
sdp               133 fs/gfs2/ops_fstype.c 	INIT_LIST_HEAD(&sdp->sd_ail2_list);
sdp               135 fs/gfs2/ops_fstype.c 	init_rwsem(&sdp->sd_log_flush_lock);
sdp               136 fs/gfs2/ops_fstype.c 	atomic_set(&sdp->sd_log_in_flight, 0);
sdp               137 fs/gfs2/ops_fstype.c 	atomic_set(&sdp->sd_reserving_log, 0);
sdp               138 fs/gfs2/ops_fstype.c 	init_waitqueue_head(&sdp->sd_reserving_log_wait);
sdp               139 fs/gfs2/ops_fstype.c 	init_waitqueue_head(&sdp->sd_log_flush_wait);
sdp               140 fs/gfs2/ops_fstype.c 	atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
sdp               141 fs/gfs2/ops_fstype.c 	mutex_init(&sdp->sd_freeze_mutex);
sdp               143 fs/gfs2/ops_fstype.c 	return sdp;
sdp               146 fs/gfs2/ops_fstype.c 	free_sbd(sdp);
sdp               161 fs/gfs2/ops_fstype.c static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent)
sdp               163 fs/gfs2/ops_fstype.c 	struct gfs2_sb_host *sb = &sdp->sd_sb;
sdp               178 fs/gfs2/ops_fstype.c 	fs_warn(sdp, "Unknown on-disk format, unable to mount\n");
sdp               194 fs/gfs2/ops_fstype.c static void gfs2_sb_in(struct gfs2_sbd *sdp, const void *buf)
sdp               196 fs/gfs2/ops_fstype.c 	struct gfs2_sb_host *sb = &sdp->sd_sb;
sdp               197 fs/gfs2/ops_fstype.c 	struct super_block *s = sdp->sd_vfs;
sdp               236 fs/gfs2/ops_fstype.c static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
sdp               238 fs/gfs2/ops_fstype.c 	struct super_block *sb = sdp->sd_vfs;
sdp               267 fs/gfs2/ops_fstype.c 	gfs2_sb_in(sdp, p);
sdp               270 fs/gfs2/ops_fstype.c 	return gfs2_check_sb(sdp, silent);
sdp               280 fs/gfs2/ops_fstype.c static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent)
sdp               287 fs/gfs2/ops_fstype.c 	error = gfs2_read_super(sdp, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift, silent);
sdp               290 fs/gfs2/ops_fstype.c 			fs_err(sdp, "can't read superblock\n");
sdp               294 fs/gfs2/ops_fstype.c 	sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
sdp               296 fs/gfs2/ops_fstype.c 	sdp->sd_fsb2bb = BIT(sdp->sd_fsb2bb_shift);
sdp               297 fs/gfs2/ops_fstype.c 	sdp->sd_diptrs = (sdp->sd_sb.sb_bsize -
sdp               299 fs/gfs2/ops_fstype.c 	sdp->sd_inptrs = (sdp->sd_sb.sb_bsize -
sdp               301 fs/gfs2/ops_fstype.c 	sdp->sd_jbsize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header);
sdp               302 fs/gfs2/ops_fstype.c 	sdp->sd_hash_bsize = sdp->sd_sb.sb_bsize / 2;
sdp               303 fs/gfs2/ops_fstype.c 	sdp->sd_hash_bsize_shift = sdp->sd_sb.sb_bsize_shift - 1;
sdp               304 fs/gfs2/ops_fstype.c 	sdp->sd_hash_ptrs = sdp->sd_hash_bsize / sizeof(u64);
sdp               305 fs/gfs2/ops_fstype.c 	sdp->sd_qc_per_block = (sdp->sd_sb.sb_bsize -
sdp               308 fs/gfs2/ops_fstype.c 	sdp->sd_blocks_per_bitmap = (sdp->sd_sb.sb_bsize -
sdp               315 fs/gfs2/ops_fstype.c 			     sdp->sd_jbsize);
sdp               318 fs/gfs2/ops_fstype.c 	for (tmp_blocks = hash_blocks; tmp_blocks > sdp->sd_diptrs;) {
sdp               319 fs/gfs2/ops_fstype.c 		tmp_blocks = DIV_ROUND_UP(tmp_blocks, sdp->sd_inptrs);
sdp               325 fs/gfs2/ops_fstype.c 	sdp->sd_max_dirres = hash_blocks + ind_blocks + leaf_blocks;
sdp               327 fs/gfs2/ops_fstype.c 	sdp->sd_heightsize[0] = sdp->sd_sb.sb_bsize -
sdp               329 fs/gfs2/ops_fstype.c 	sdp->sd_heightsize[1] = sdp->sd_sb.sb_bsize * sdp->sd_diptrs;
sdp               334 fs/gfs2/ops_fstype.c 		space = sdp->sd_heightsize[x - 1] * sdp->sd_inptrs;
sdp               336 fs/gfs2/ops_fstype.c 		m = do_div(d, sdp->sd_inptrs);
sdp               338 fs/gfs2/ops_fstype.c 		if (d != sdp->sd_heightsize[x - 1] || m)
sdp               340 fs/gfs2/ops_fstype.c 		sdp->sd_heightsize[x] = space;
sdp               342 fs/gfs2/ops_fstype.c 	sdp->sd_max_height = x;
sdp               343 fs/gfs2/ops_fstype.c 	sdp->sd_heightsize[x] = ~0;
sdp               344 fs/gfs2/ops_fstype.c 	gfs2_assert(sdp, sdp->sd_max_height <= GFS2_MAX_META_HEIGHT);
sdp               346 fs/gfs2/ops_fstype.c 	sdp->sd_max_dents_per_leaf = (sdp->sd_sb.sb_bsize -
sdp               352 fs/gfs2/ops_fstype.c static int init_names(struct gfs2_sbd *sdp, int silent)
sdp               357 fs/gfs2/ops_fstype.c 	proto = sdp->sd_args.ar_lockproto;
sdp               358 fs/gfs2/ops_fstype.c 	table = sdp->sd_args.ar_locktable;
sdp               363 fs/gfs2/ops_fstype.c 		error = gfs2_read_super(sdp, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift, silent);
sdp               368 fs/gfs2/ops_fstype.c 			proto = sdp->sd_sb.sb_lockproto;
sdp               370 fs/gfs2/ops_fstype.c 			table = sdp->sd_sb.sb_locktable;
sdp               374 fs/gfs2/ops_fstype.c 		table = sdp->sd_vfs->s_id;
sdp               376 fs/gfs2/ops_fstype.c 	strlcpy(sdp->sd_proto_name, proto, GFS2_FSNAME_LEN);
sdp               377 fs/gfs2/ops_fstype.c 	strlcpy(sdp->sd_table_name, table, GFS2_FSNAME_LEN);
sdp               379 fs/gfs2/ops_fstype.c 	table = sdp->sd_table_name;
sdp               386 fs/gfs2/ops_fstype.c static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh,
sdp               394 fs/gfs2/ops_fstype.c 	error = gfs2_glock_nq_num(sdp,
sdp               399 fs/gfs2/ops_fstype.c 		fs_err(sdp, "can't acquire mount glock: %d\n", error);
sdp               403 fs/gfs2/ops_fstype.c 	error = gfs2_glock_nq_num(sdp,
sdp               407 fs/gfs2/ops_fstype.c 				  &sdp->sd_live_gh);
sdp               409 fs/gfs2/ops_fstype.c 		fs_err(sdp, "can't acquire live glock: %d\n", error);
sdp               413 fs/gfs2/ops_fstype.c 	error = gfs2_glock_get(sdp, GFS2_RENAME_LOCK, &gfs2_nondisk_glops,
sdp               414 fs/gfs2/ops_fstype.c 			       CREATE, &sdp->sd_rename_gl);
sdp               416 fs/gfs2/ops_fstype.c 		fs_err(sdp, "can't create rename glock: %d\n", error);
sdp               420 fs/gfs2/ops_fstype.c 	error = gfs2_glock_get(sdp, GFS2_FREEZE_LOCK, &gfs2_freeze_glops,
sdp               421 fs/gfs2/ops_fstype.c 			       CREATE, &sdp->sd_freeze_gl);
sdp               423 fs/gfs2/ops_fstype.c 		fs_err(sdp, "can't create transaction glock: %d\n", error);
sdp               430 fs/gfs2/ops_fstype.c 	gfs2_glock_put(sdp->sd_freeze_gl);
sdp               432 fs/gfs2/ops_fstype.c 	gfs2_glock_put(sdp->sd_rename_gl);
sdp               434 fs/gfs2/ops_fstype.c 	gfs2_glock_dq_uninit(&sdp->sd_live_gh);
sdp               444 fs/gfs2/ops_fstype.c 	struct gfs2_sbd *sdp = sb->s_fs_info;
sdp               451 fs/gfs2/ops_fstype.c 		fs_err(sdp, "can't read in %s inode: %ld\n", name, PTR_ERR(inode));
sdp               456 fs/gfs2/ops_fstype.c 		fs_err(sdp, "can't alloc %s dentry\n", name);
sdp               463 fs/gfs2/ops_fstype.c static int init_sb(struct gfs2_sbd *sdp, int silent)
sdp               465 fs/gfs2/ops_fstype.c 	struct super_block *sb = sdp->sd_vfs;
sdp               470 fs/gfs2/ops_fstype.c 	ret = gfs2_glock_nq_num(sdp, GFS2_SB_LOCK, &gfs2_meta_glops,
sdp               473 fs/gfs2/ops_fstype.c 		fs_err(sdp, "can't acquire superblock glock: %d\n", ret);
sdp               477 fs/gfs2/ops_fstype.c 	ret = gfs2_read_sb(sdp, silent);
sdp               479 fs/gfs2/ops_fstype.c 		fs_err(sdp, "can't read superblock: %d\n", ret);
sdp               484 fs/gfs2/ops_fstype.c 	if (sdp->sd_sb.sb_bsize < bdev_logical_block_size(sb->s_bdev)) {
sdp               486 fs/gfs2/ops_fstype.c 		fs_err(sdp, "FS block size (%u) is too small for device "
sdp               488 fs/gfs2/ops_fstype.c 		       sdp->sd_sb.sb_bsize, bdev_logical_block_size(sb->s_bdev));
sdp               491 fs/gfs2/ops_fstype.c 	if (sdp->sd_sb.sb_bsize > PAGE_SIZE) {
sdp               493 fs/gfs2/ops_fstype.c 		fs_err(sdp, "FS block size (%u) is too big for machine "
sdp               495 fs/gfs2/ops_fstype.c 		       sdp->sd_sb.sb_bsize, (unsigned int)PAGE_SIZE);
sdp               498 fs/gfs2/ops_fstype.c 	sb_set_blocksize(sb, sdp->sd_sb.sb_bsize);
sdp               501 fs/gfs2/ops_fstype.c 	no_addr = sdp->sd_sb.sb_root_dir.no_addr;
sdp               502 fs/gfs2/ops_fstype.c 	ret = gfs2_lookup_root(sb, &sdp->sd_root_dir, no_addr, "root");
sdp               507 fs/gfs2/ops_fstype.c 	no_addr = sdp->sd_sb.sb_master_dir.no_addr;
sdp               508 fs/gfs2/ops_fstype.c 	ret = gfs2_lookup_root(sb, &sdp->sd_master_dir, no_addr, "master");
sdp               510 fs/gfs2/ops_fstype.c 		dput(sdp->sd_root_dir);
sdp               513 fs/gfs2/ops_fstype.c 	sb->s_root = dget(sdp->sd_args.ar_meta ? sdp->sd_master_dir : sdp->sd_root_dir);
sdp               519 fs/gfs2/ops_fstype.c static void gfs2_others_may_mount(struct gfs2_sbd *sdp)
sdp               524 fs/gfs2/ops_fstype.c 	fs_info(sdp, "first mount done, others may mount\n");
sdp               526 fs/gfs2/ops_fstype.c 	if (sdp->sd_lockstruct.ls_ops->lm_first_done)
sdp               527 fs/gfs2/ops_fstype.c 		sdp->sd_lockstruct.ls_ops->lm_first_done(sdp);
sdp               529 fs/gfs2/ops_fstype.c 	kobject_uevent_env(&sdp->sd_kobj, KOBJ_CHANGE, envp);
sdp               540 fs/gfs2/ops_fstype.c static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh)
sdp               542 fs/gfs2/ops_fstype.c 	struct gfs2_inode *dip = GFS2_I(sdp->sd_jindex);
sdp               550 fs/gfs2/ops_fstype.c 	mutex_lock(&sdp->sd_jindex_mutex);
sdp               557 fs/gfs2/ops_fstype.c 		name.len = sprintf(buf, "journal%u", sdp->sd_journals);
sdp               560 fs/gfs2/ops_fstype.c 		error = gfs2_dir_check(sdp->sd_jindex, &name, NULL);
sdp               580 fs/gfs2/ops_fstype.c 		jd->jd_inode = gfs2_lookupi(sdp->sd_jindex, &name, 1);
sdp               590 fs/gfs2/ops_fstype.c 		spin_lock(&sdp->sd_jindex_spin);
sdp               591 fs/gfs2/ops_fstype.c 		jd->jd_jid = sdp->sd_journals++;
sdp               592 fs/gfs2/ops_fstype.c 		list_add_tail(&jd->jd_list, &sdp->sd_jindex_list);
sdp               593 fs/gfs2/ops_fstype.c 		spin_unlock(&sdp->sd_jindex_spin);
sdp               596 fs/gfs2/ops_fstype.c 	mutex_unlock(&sdp->sd_jindex_mutex);
sdp               608 fs/gfs2/ops_fstype.c static int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
sdp               619 fs/gfs2/ops_fstype.c 		fs_err(sdp, "Error locking journal for spectator mount.\n");
sdp               624 fs/gfs2/ops_fstype.c 		fs_err(sdp, "Error checking journal for spectator mount.\n");
sdp               629 fs/gfs2/ops_fstype.c 		fs_err(sdp, "Error parsing journal for spectator mount.\n");
sdp               634 fs/gfs2/ops_fstype.c 		fs_err(sdp, "jid=%u: Journal is dirty, so the first mounter "
sdp               643 fs/gfs2/ops_fstype.c static int init_journal(struct gfs2_sbd *sdp, int undo)
sdp               645 fs/gfs2/ops_fstype.c 	struct inode *master = d_inode(sdp->sd_master_dir);
sdp               656 fs/gfs2/ops_fstype.c 	sdp->sd_jindex = gfs2_lookup_simple(master, "jindex");
sdp               657 fs/gfs2/ops_fstype.c 	if (IS_ERR(sdp->sd_jindex)) {
sdp               658 fs/gfs2/ops_fstype.c 		fs_err(sdp, "can't lookup journal index: %d\n", error);
sdp               659 fs/gfs2/ops_fstype.c 		return PTR_ERR(sdp->sd_jindex);
sdp               664 fs/gfs2/ops_fstype.c 	error = gfs2_jindex_hold(sdp, &ji_gh);
sdp               666 fs/gfs2/ops_fstype.c 		fs_err(sdp, "can't read journal index: %d\n", error);
sdp               671 fs/gfs2/ops_fstype.c 	if (!gfs2_jindex_size(sdp)) {
sdp               672 fs/gfs2/ops_fstype.c 		fs_err(sdp, "no journals!\n");
sdp               676 fs/gfs2/ops_fstype.c 	atomic_set(&sdp->sd_log_blks_needed, 0);
sdp               677 fs/gfs2/ops_fstype.c 	if (sdp->sd_args.ar_spectator) {
sdp               678 fs/gfs2/ops_fstype.c 		sdp->sd_jdesc = gfs2_jdesc_find(sdp, 0);
sdp               679 fs/gfs2/ops_fstype.c 		atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks);
sdp               680 fs/gfs2/ops_fstype.c 		atomic_set(&sdp->sd_log_thresh1, 2*sdp->sd_jdesc->jd_blocks/5);
sdp               681 fs/gfs2/ops_fstype.c 		atomic_set(&sdp->sd_log_thresh2, 4*sdp->sd_jdesc->jd_blocks/5);
sdp               683 fs/gfs2/ops_fstype.c 		if (sdp->sd_lockstruct.ls_jid >= gfs2_jindex_size(sdp)) {
sdp               684 fs/gfs2/ops_fstype.c 			fs_err(sdp, "can't mount journal #%u\n",
sdp               685 fs/gfs2/ops_fstype.c 			       sdp->sd_lockstruct.ls_jid);
sdp               686 fs/gfs2/ops_fstype.c 			fs_err(sdp, "there are only %u journals (0 - %u)\n",
sdp               687 fs/gfs2/ops_fstype.c 			       gfs2_jindex_size(sdp),
sdp               688 fs/gfs2/ops_fstype.c 			       gfs2_jindex_size(sdp) - 1);
sdp               691 fs/gfs2/ops_fstype.c 		sdp->sd_jdesc = gfs2_jdesc_find(sdp, sdp->sd_lockstruct.ls_jid);
sdp               693 fs/gfs2/ops_fstype.c 		error = gfs2_glock_nq_num(sdp, sdp->sd_lockstruct.ls_jid,
sdp               696 fs/gfs2/ops_fstype.c 					  &sdp->sd_journal_gh);
sdp               698 fs/gfs2/ops_fstype.c 			fs_err(sdp, "can't acquire journal glock: %d\n", error);
sdp               702 fs/gfs2/ops_fstype.c 		ip = GFS2_I(sdp->sd_jdesc->jd_inode);
sdp               705 fs/gfs2/ops_fstype.c 					   &sdp->sd_jinode_gh);
sdp               707 fs/gfs2/ops_fstype.c 			fs_err(sdp, "can't acquire journal inode glock: %d\n",
sdp               712 fs/gfs2/ops_fstype.c 		error = gfs2_jdesc_check(sdp->sd_jdesc);
sdp               714 fs/gfs2/ops_fstype.c 			fs_err(sdp, "my journal (%u) is bad: %d\n",
sdp               715 fs/gfs2/ops_fstype.c 			       sdp->sd_jdesc->jd_jid, error);
sdp               718 fs/gfs2/ops_fstype.c 		atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks);
sdp               719 fs/gfs2/ops_fstype.c 		atomic_set(&sdp->sd_log_thresh1, 2*sdp->sd_jdesc->jd_blocks/5);
sdp               720 fs/gfs2/ops_fstype.c 		atomic_set(&sdp->sd_log_thresh2, 4*sdp->sd_jdesc->jd_blocks/5);
sdp               723 fs/gfs2/ops_fstype.c 		gfs2_map_journal_extents(sdp, sdp->sd_jdesc);
sdp               725 fs/gfs2/ops_fstype.c 	trace_gfs2_log_blocks(sdp, atomic_read(&sdp->sd_log_blks_free));
sdp               727 fs/gfs2/ops_fstype.c 	if (sdp->sd_lockstruct.ls_first) {
sdp               729 fs/gfs2/ops_fstype.c 		for (x = 0; x < sdp->sd_journals; x++) {
sdp               730 fs/gfs2/ops_fstype.c 			struct gfs2_jdesc *jd = gfs2_jdesc_find(sdp, x);
sdp               732 fs/gfs2/ops_fstype.c 			if (sdp->sd_args.ar_spectator) {
sdp               733 fs/gfs2/ops_fstype.c 				error = check_journal_clean(sdp, jd);
sdp               740 fs/gfs2/ops_fstype.c 				fs_err(sdp, "error recovering journal %u: %d\n",
sdp               746 fs/gfs2/ops_fstype.c 		gfs2_others_may_mount(sdp);
sdp               747 fs/gfs2/ops_fstype.c 	} else if (!sdp->sd_args.ar_spectator) {
sdp               748 fs/gfs2/ops_fstype.c 		error = gfs2_recover_journal(sdp->sd_jdesc, true);
sdp               750 fs/gfs2/ops_fstype.c 			fs_err(sdp, "error recovering my journal: %d\n", error);
sdp               755 fs/gfs2/ops_fstype.c 	sdp->sd_log_idle = 1;
sdp               756 fs/gfs2/ops_fstype.c 	set_bit(SDF_JOURNAL_CHECKED, &sdp->sd_flags);
sdp               759 fs/gfs2/ops_fstype.c 	INIT_WORK(&sdp->sd_freeze_work, gfs2_freeze_func);
sdp               763 fs/gfs2/ops_fstype.c 	if (!sdp->sd_args.ar_spectator)
sdp               764 fs/gfs2/ops_fstype.c 		gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
sdp               766 fs/gfs2/ops_fstype.c 	if (!sdp->sd_args.ar_spectator)
sdp               767 fs/gfs2/ops_fstype.c 		gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
sdp               769 fs/gfs2/ops_fstype.c 	gfs2_jindex_free(sdp);
sdp               773 fs/gfs2/ops_fstype.c 	iput(sdp->sd_jindex);
sdp               779 fs/gfs2/ops_fstype.c static int init_inodes(struct gfs2_sbd *sdp, int undo)
sdp               782 fs/gfs2/ops_fstype.c 	struct inode *master = d_inode(sdp->sd_master_dir);
sdp               787 fs/gfs2/ops_fstype.c 	error = init_journal(sdp, undo);
sdp               788 fs/gfs2/ops_fstype.c 	complete_all(&sdp->sd_journal_ready);
sdp               793 fs/gfs2/ops_fstype.c 	sdp->sd_statfs_inode = gfs2_lookup_simple(master, "statfs");
sdp               794 fs/gfs2/ops_fstype.c 	if (IS_ERR(sdp->sd_statfs_inode)) {
sdp               795 fs/gfs2/ops_fstype.c 		error = PTR_ERR(sdp->sd_statfs_inode);
sdp               796 fs/gfs2/ops_fstype.c 		fs_err(sdp, "can't read in statfs inode: %d\n", error);
sdp               801 fs/gfs2/ops_fstype.c 	sdp->sd_rindex = gfs2_lookup_simple(master, "rindex");
sdp               802 fs/gfs2/ops_fstype.c 	if (IS_ERR(sdp->sd_rindex)) {
sdp               803 fs/gfs2/ops_fstype.c 		error = PTR_ERR(sdp->sd_rindex);
sdp               804 fs/gfs2/ops_fstype.c 		fs_err(sdp, "can't get resource index inode: %d\n", error);
sdp               807 fs/gfs2/ops_fstype.c 	sdp->sd_rindex_uptodate = 0;
sdp               810 fs/gfs2/ops_fstype.c 	sdp->sd_quota_inode = gfs2_lookup_simple(master, "quota");
sdp               811 fs/gfs2/ops_fstype.c 	if (IS_ERR(sdp->sd_quota_inode)) {
sdp               812 fs/gfs2/ops_fstype.c 		error = PTR_ERR(sdp->sd_quota_inode);
sdp               813 fs/gfs2/ops_fstype.c 		fs_err(sdp, "can't get quota file inode: %d\n", error);
sdp               820 fs/gfs2/ops_fstype.c 	lockdep_set_class(&sdp->sd_quota_inode->i_rwsem,
sdp               823 fs/gfs2/ops_fstype.c 	error = gfs2_rindex_update(sdp);
sdp               830 fs/gfs2/ops_fstype.c 	iput(sdp->sd_quota_inode);
sdp               832 fs/gfs2/ops_fstype.c 	gfs2_clear_rgrpd(sdp);
sdp               833 fs/gfs2/ops_fstype.c 	iput(sdp->sd_rindex);
sdp               835 fs/gfs2/ops_fstype.c 	iput(sdp->sd_statfs_inode);
sdp               837 fs/gfs2/ops_fstype.c 	init_journal(sdp, UNDO);
sdp               842 fs/gfs2/ops_fstype.c static int init_per_node(struct gfs2_sbd *sdp, int undo)
sdp               848 fs/gfs2/ops_fstype.c 	struct inode *master = d_inode(sdp->sd_master_dir);
sdp               850 fs/gfs2/ops_fstype.c 	if (sdp->sd_args.ar_spectator)
sdp               859 fs/gfs2/ops_fstype.c 		fs_err(sdp, "can't find per_node directory: %d\n", error);
sdp               863 fs/gfs2/ops_fstype.c 	sprintf(buf, "statfs_change%u", sdp->sd_jdesc->jd_jid);
sdp               864 fs/gfs2/ops_fstype.c 	sdp->sd_sc_inode = gfs2_lookup_simple(pn, buf);
sdp               865 fs/gfs2/ops_fstype.c 	if (IS_ERR(sdp->sd_sc_inode)) {
sdp               866 fs/gfs2/ops_fstype.c 		error = PTR_ERR(sdp->sd_sc_inode);
sdp               867 fs/gfs2/ops_fstype.c 		fs_err(sdp, "can't find local \"sc\" file: %d\n", error);
sdp               871 fs/gfs2/ops_fstype.c 	sprintf(buf, "quota_change%u", sdp->sd_jdesc->jd_jid);
sdp               872 fs/gfs2/ops_fstype.c 	sdp->sd_qc_inode = gfs2_lookup_simple(pn, buf);
sdp               873 fs/gfs2/ops_fstype.c 	if (IS_ERR(sdp->sd_qc_inode)) {
sdp               874 fs/gfs2/ops_fstype.c 		error = PTR_ERR(sdp->sd_qc_inode);
sdp               875 fs/gfs2/ops_fstype.c 		fs_err(sdp, "can't find local \"qc\" file: %d\n", error);
sdp               882 fs/gfs2/ops_fstype.c 	ip = GFS2_I(sdp->sd_sc_inode);
sdp               884 fs/gfs2/ops_fstype.c 				   &sdp->sd_sc_gh);
sdp               886 fs/gfs2/ops_fstype.c 		fs_err(sdp, "can't lock local \"sc\" file: %d\n", error);
sdp               890 fs/gfs2/ops_fstype.c 	ip = GFS2_I(sdp->sd_qc_inode);
sdp               892 fs/gfs2/ops_fstype.c 				   &sdp->sd_qc_gh);
sdp               894 fs/gfs2/ops_fstype.c 		fs_err(sdp, "can't lock local \"qc\" file: %d\n", error);
sdp               901 fs/gfs2/ops_fstype.c 	gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
sdp               903 fs/gfs2/ops_fstype.c 	gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
sdp               905 fs/gfs2/ops_fstype.c 	iput(sdp->sd_qc_inode);
sdp               907 fs/gfs2/ops_fstype.c 	iput(sdp->sd_sc_inode);
sdp               933 fs/gfs2/ops_fstype.c static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent)
sdp               936 fs/gfs2/ops_fstype.c 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
sdp               937 fs/gfs2/ops_fstype.c 	struct gfs2_args *args = &sdp->sd_args;
sdp               938 fs/gfs2/ops_fstype.c 	const char *proto = sdp->sd_proto_name;
sdp               939 fs/gfs2/ops_fstype.c 	const char *table = sdp->sd_table_name;
sdp               945 fs/gfs2/ops_fstype.c 		sdp->sd_args.ar_localflocks = 1;
sdp               955 fs/gfs2/ops_fstype.c 	fs_info(sdp, "Trying to join cluster \"%s\", \"%s\"\n", proto, table);
sdp               973 fs/gfs2/ops_fstype.c 			if (test_and_clear_bit(SDF_NOJOURNALID, &sdp->sd_flags))
sdp               989 fs/gfs2/ops_fstype.c 			fs_info(sdp, "unknown hostdata (%s)\n", o);
sdp               995 fs/gfs2/ops_fstype.c 		fs_info(sdp, "Now mounting FS...\n");
sdp               996 fs/gfs2/ops_fstype.c 		complete_all(&sdp->sd_locking_init);
sdp               999 fs/gfs2/ops_fstype.c 	ret = lm->lm_mount(sdp, table);
sdp              1001 fs/gfs2/ops_fstype.c 		fs_info(sdp, "Joined cluster. Now mounting FS...\n");
sdp              1002 fs/gfs2/ops_fstype.c 	complete_all(&sdp->sd_locking_init);
sdp              1006 fs/gfs2/ops_fstype.c void gfs2_lm_unmount(struct gfs2_sbd *sdp)
sdp              1008 fs/gfs2/ops_fstype.c 	const struct lm_lockops *lm = sdp->sd_lockstruct.ls_ops;
sdp              1009 fs/gfs2/ops_fstype.c 	if (likely(!test_bit(SDF_WITHDRAWN, &sdp->sd_flags)) &&
sdp              1011 fs/gfs2/ops_fstype.c 		lm->lm_unmount(sdp);
sdp              1014 fs/gfs2/ops_fstype.c static int wait_on_journal(struct gfs2_sbd *sdp)
sdp              1016 fs/gfs2/ops_fstype.c 	if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
sdp              1019 fs/gfs2/ops_fstype.c 	return wait_on_bit(&sdp->sd_flags, SDF_NOJOURNALID, TASK_INTERRUPTIBLE)
sdp              1023 fs/gfs2/ops_fstype.c void gfs2_online_uevent(struct gfs2_sbd *sdp)
sdp              1025 fs/gfs2/ops_fstype.c 	struct super_block *sb = sdp->sd_vfs;
sdp              1030 fs/gfs2/ops_fstype.c 	sprintf(spectator, "SPECTATOR=%d", sdp->sd_args.ar_spectator ? 1 : 0);
sdp              1031 fs/gfs2/ops_fstype.c 	kobject_uevent_env(&sdp->sd_kobj, KOBJ_ONLINE, envp);
sdp              1046 fs/gfs2/ops_fstype.c 	struct gfs2_sbd *sdp;
sdp              1050 fs/gfs2/ops_fstype.c 	sdp = init_sbd(sb);
sdp              1051 fs/gfs2/ops_fstype.c 	if (!sdp) {
sdp              1055 fs/gfs2/ops_fstype.c 	sdp->sd_args = *args;
sdp              1057 fs/gfs2/ops_fstype.c 	if (sdp->sd_args.ar_spectator) {
sdp              1059 fs/gfs2/ops_fstype.c 		set_bit(SDF_RORECOVERY, &sdp->sd_flags);
sdp              1061 fs/gfs2/ops_fstype.c 	if (sdp->sd_args.ar_posix_acl)
sdp              1063 fs/gfs2/ops_fstype.c 	if (sdp->sd_args.ar_nobarrier)
sdp              1064 fs/gfs2/ops_fstype.c 		set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
sdp              1080 fs/gfs2/ops_fstype.c 	sdp->sd_sb.sb_bsize = sb_min_blocksize(sb, GFS2_BASIC_BLOCK);
sdp              1081 fs/gfs2/ops_fstype.c 	sdp->sd_sb.sb_bsize_shift = sb->s_blocksize_bits;
sdp              1082 fs/gfs2/ops_fstype.c 	sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
sdp              1084 fs/gfs2/ops_fstype.c 	sdp->sd_fsb2bb = BIT(sdp->sd_fsb2bb_shift);
sdp              1086 fs/gfs2/ops_fstype.c 	sdp->sd_tune.gt_logd_secs = sdp->sd_args.ar_commit;
sdp              1087 fs/gfs2/ops_fstype.c 	sdp->sd_tune.gt_quota_quantum = sdp->sd_args.ar_quota_quantum;
sdp              1088 fs/gfs2/ops_fstype.c 	if (sdp->sd_args.ar_statfs_quantum) {
sdp              1089 fs/gfs2/ops_fstype.c 		sdp->sd_tune.gt_statfs_slow = 0;
sdp              1090 fs/gfs2/ops_fstype.c 		sdp->sd_tune.gt_statfs_quantum = sdp->sd_args.ar_statfs_quantum;
sdp              1092 fs/gfs2/ops_fstype.c 		sdp->sd_tune.gt_statfs_slow = 1;
sdp              1093 fs/gfs2/ops_fstype.c 		sdp->sd_tune.gt_statfs_quantum = 30;
sdp              1096 fs/gfs2/ops_fstype.c 	error = init_names(sdp, silent);
sdp              1100 fs/gfs2/ops_fstype.c 		free_sbd(sdp);
sdp              1105 fs/gfs2/ops_fstype.c 	snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s", sdp->sd_table_name);
sdp              1107 fs/gfs2/ops_fstype.c 	error = gfs2_sys_fs_add(sdp);
sdp              1118 fs/gfs2/ops_fstype.c 	gfs2_create_debugfs_file(sdp);
sdp              1120 fs/gfs2/ops_fstype.c 	error = gfs2_lm_mount(sdp, silent);
sdp              1124 fs/gfs2/ops_fstype.c 	error = init_locking(sdp, &mount_gh, DO);
sdp              1128 fs/gfs2/ops_fstype.c 	error = init_sb(sdp, silent);
sdp              1132 fs/gfs2/ops_fstype.c 	error = wait_on_journal(sdp);
sdp              1144 fs/gfs2/ops_fstype.c 	if (sdp->sd_lockstruct.ls_jid < 0) {
sdp              1145 fs/gfs2/ops_fstype.c 		error = sdp->sd_lockstruct.ls_jid;
sdp              1146 fs/gfs2/ops_fstype.c 		sdp->sd_lockstruct.ls_jid = 0;
sdp              1150 fs/gfs2/ops_fstype.c 	if (sdp->sd_args.ar_spectator)
sdp              1151 fs/gfs2/ops_fstype.c 		snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s.s",
sdp              1152 fs/gfs2/ops_fstype.c 			 sdp->sd_table_name);
sdp              1154 fs/gfs2/ops_fstype.c 		snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s.%u",
sdp              1155 fs/gfs2/ops_fstype.c 			 sdp->sd_table_name, sdp->sd_lockstruct.ls_jid);
sdp              1157 fs/gfs2/ops_fstype.c 	error = init_inodes(sdp, DO);
sdp              1161 fs/gfs2/ops_fstype.c 	error = init_per_node(sdp, DO);
sdp              1165 fs/gfs2/ops_fstype.c 	error = gfs2_statfs_init(sdp);
sdp              1167 fs/gfs2/ops_fstype.c 		fs_err(sdp, "can't initialize statfs subsystem: %d\n", error);
sdp              1172 fs/gfs2/ops_fstype.c 		error = gfs2_make_fs_rw(sdp);
sdp              1174 fs/gfs2/ops_fstype.c 			fs_err(sdp, "can't make FS RW: %d\n", error);
sdp              1180 fs/gfs2/ops_fstype.c 	gfs2_online_uevent(sdp);
sdp              1184 fs/gfs2/ops_fstype.c 	init_per_node(sdp, UNDO);
sdp              1186 fs/gfs2/ops_fstype.c 	init_inodes(sdp, UNDO);
sdp              1188 fs/gfs2/ops_fstype.c 	if (sdp->sd_root_dir)
sdp              1189 fs/gfs2/ops_fstype.c 		dput(sdp->sd_root_dir);
sdp              1190 fs/gfs2/ops_fstype.c 	if (sdp->sd_master_dir)
sdp              1191 fs/gfs2/ops_fstype.c 		dput(sdp->sd_master_dir);
sdp              1196 fs/gfs2/ops_fstype.c 	init_locking(sdp, &mount_gh, UNDO);
sdp              1198 fs/gfs2/ops_fstype.c 	complete_all(&sdp->sd_journal_ready);
sdp              1199 fs/gfs2/ops_fstype.c 	gfs2_gl_hash_clear(sdp);
sdp              1200 fs/gfs2/ops_fstype.c 	gfs2_lm_unmount(sdp);
sdp              1202 fs/gfs2/ops_fstype.c 	gfs2_delete_debugfs_file(sdp);
sdp              1205 fs/gfs2/ops_fstype.c 	gfs2_sys_fs_del(sdp);
sdp              1219 fs/gfs2/ops_fstype.c 	struct gfs2_sbd *sdp;
sdp              1226 fs/gfs2/ops_fstype.c 	sdp = fc->root->d_sb->s_fs_info;
sdp              1229 fs/gfs2/ops_fstype.c 		fc->root = dget(sdp->sd_master_dir);
sdp              1231 fs/gfs2/ops_fstype.c 		fc->root = dget(sdp->sd_root_dir);
sdp              1449 fs/gfs2/ops_fstype.c 	struct gfs2_sbd *sdp = sb->s_fs_info;
sdp              1450 fs/gfs2/ops_fstype.c 	struct gfs2_args *oldargs = &sdp->sd_args;
sdp              1452 fs/gfs2/ops_fstype.c 	struct gfs2_tune *gt = &sdp->sd_tune;
sdp              1495 fs/gfs2/ops_fstype.c 			error = gfs2_make_fs_ro(sdp);
sdp              1499 fs/gfs2/ops_fstype.c 			error = gfs2_make_fs_rw(sdp);
sdp              1504 fs/gfs2/ops_fstype.c 	sdp->sd_args = *newargs;
sdp              1506 fs/gfs2/ops_fstype.c 	if (sdp->sd_args.ar_posix_acl)
sdp              1510 fs/gfs2/ops_fstype.c 	if (sdp->sd_args.ar_nobarrier)
sdp              1511 fs/gfs2/ops_fstype.c 		set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
sdp              1513 fs/gfs2/ops_fstype.c 		clear_bit(SDF_NOBARRIERS, &sdp->sd_flags);
sdp              1527 fs/gfs2/ops_fstype.c 	gfs2_online_uevent(sdp);
sdp              1548 fs/gfs2/ops_fstype.c 		struct gfs2_sbd *sdp = fc->root->d_sb->s_fs_info;
sdp              1550 fs/gfs2/ops_fstype.c 		*args = sdp->sd_args;
sdp              1578 fs/gfs2/ops_fstype.c 	struct gfs2_sbd *sdp;
sdp              1603 fs/gfs2/ops_fstype.c 	sdp = s->s_fs_info;
sdp              1604 fs/gfs2/ops_fstype.c 	fc->root = dget(sdp->sd_master_dir);
sdp              1626 fs/gfs2/ops_fstype.c 	struct gfs2_sbd *sdp = sb->s_fs_info;
sdp              1628 fs/gfs2/ops_fstype.c 	if (sdp == NULL) {
sdp              1633 fs/gfs2/ops_fstype.c 	gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SYNC | GFS2_LFC_KILL_SB);
sdp              1634 fs/gfs2/ops_fstype.c 	dput(sdp->sd_root_dir);
sdp              1635 fs/gfs2/ops_fstype.c 	dput(sdp->sd_master_dir);
sdp              1636 fs/gfs2/ops_fstype.c 	sdp->sd_root_dir = NULL;
sdp              1637 fs/gfs2/ops_fstype.c 	sdp->sd_master_dir = NULL;
sdp                85 fs/gfs2/quota.c static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp,
sdp                90 fs/gfs2/quota.c 	h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0);
sdp               115 fs/gfs2/quota.c 	struct gfs2_sbd *sdp;
sdp               119 fs/gfs2/quota.c 		sdp = qd->qd_gl->gl_name.ln_sbd;
sdp               132 fs/gfs2/quota.c 		gfs2_assert_warn(sdp, !qd->qd_change);
sdp               133 fs/gfs2/quota.c 		gfs2_assert_warn(sdp, !qd->qd_slot_count);
sdp               134 fs/gfs2/quota.c 		gfs2_assert_warn(sdp, !qd->qd_bh_count);
sdp               137 fs/gfs2/quota.c 		atomic_dec(&sdp->sd_quota_count);
sdp               211 fs/gfs2/quota.c static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
sdp               220 fs/gfs2/quota.c 	qd->qd_sbd = sdp;
sdp               228 fs/gfs2/quota.c 	error = gfs2_glock_get(sdp, qd2index(qd),
sdp               241 fs/gfs2/quota.c 						     const struct gfs2_sbd *sdp,
sdp               250 fs/gfs2/quota.c 		if (qd->qd_sbd != sdp)
sdp               262 fs/gfs2/quota.c static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
sdp               266 fs/gfs2/quota.c 	unsigned int hash = gfs2_qd_hash(sdp, qid);
sdp               269 fs/gfs2/quota.c 	*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
sdp               275 fs/gfs2/quota.c 	new_qd = qd_alloc(hash, sdp, qid);
sdp               281 fs/gfs2/quota.c 	*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
sdp               284 fs/gfs2/quota.c 		list_add(&new_qd->qd_list, &sdp->sd_quota_list);
sdp               286 fs/gfs2/quota.c 		atomic_inc(&sdp->sd_quota_count);
sdp               302 fs/gfs2/quota.c 	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
sdp               303 fs/gfs2/quota.c 	gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
sdp               320 fs/gfs2/quota.c 	struct gfs2_sbd *sdp = qd->qd_sbd;
sdp               324 fs/gfs2/quota.c 	spin_lock(&sdp->sd_bitmap_lock);
sdp               329 fs/gfs2/quota.c 	bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots);
sdp               330 fs/gfs2/quota.c 	if (bit < sdp->sd_quota_slots) {
sdp               331 fs/gfs2/quota.c 		set_bit(bit, sdp->sd_quota_bitmap);
sdp               337 fs/gfs2/quota.c 	spin_unlock(&sdp->sd_bitmap_lock);
sdp               344 fs/gfs2/quota.c 	struct gfs2_sbd *sdp = qd->qd_sbd;
sdp               346 fs/gfs2/quota.c 	spin_lock(&sdp->sd_bitmap_lock);
sdp               347 fs/gfs2/quota.c 	gfs2_assert(sdp, qd->qd_slot_count);
sdp               349 fs/gfs2/quota.c 	spin_unlock(&sdp->sd_bitmap_lock);
sdp               354 fs/gfs2/quota.c 	struct gfs2_sbd *sdp = qd->qd_sbd;
sdp               356 fs/gfs2/quota.c 	spin_lock(&sdp->sd_bitmap_lock);
sdp               357 fs/gfs2/quota.c 	gfs2_assert(sdp, qd->qd_slot_count);
sdp               359 fs/gfs2/quota.c 		BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
sdp               362 fs/gfs2/quota.c 	spin_unlock(&sdp->sd_bitmap_lock);
sdp               367 fs/gfs2/quota.c 	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
sdp               368 fs/gfs2/quota.c 	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
sdp               374 fs/gfs2/quota.c 	mutex_lock(&sdp->sd_quota_mutex);
sdp               377 fs/gfs2/quota.c 		mutex_unlock(&sdp->sd_quota_mutex);
sdp               381 fs/gfs2/quota.c 	block = qd->qd_slot / sdp->sd_qc_per_block;
sdp               382 fs/gfs2/quota.c 	offset = qd->qd_slot % sdp->sd_qc_per_block;
sdp               392 fs/gfs2/quota.c 	if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
sdp               400 fs/gfs2/quota.c 	mutex_unlock(&sdp->sd_quota_mutex);
sdp               408 fs/gfs2/quota.c 	mutex_unlock(&sdp->sd_quota_mutex);
sdp               414 fs/gfs2/quota.c 	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
sdp               416 fs/gfs2/quota.c 	mutex_lock(&sdp->sd_quota_mutex);
sdp               417 fs/gfs2/quota.c 	gfs2_assert(sdp, qd->qd_bh_count);
sdp               423 fs/gfs2/quota.c 	mutex_unlock(&sdp->sd_quota_mutex);
sdp               426 fs/gfs2/quota.c static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
sdp               437 fs/gfs2/quota.c 	list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
sdp               444 fs/gfs2/quota.c static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
sdp               452 fs/gfs2/quota.c 	if (sb_rdonly(sdp->sd_vfs))
sdp               457 fs/gfs2/quota.c 	list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
sdp               458 fs/gfs2/quota.c 		found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen);
sdp               469 fs/gfs2/quota.c 		gfs2_assert_warn(sdp, qd->qd_change_sync);
sdp               494 fs/gfs2/quota.c static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
sdp               499 fs/gfs2/quota.c 	error = qd_get(sdp, qid, qdp);
sdp               535 fs/gfs2/quota.c 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp               537 fs/gfs2/quota.c 	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
sdp               562 fs/gfs2/quota.c 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp               566 fs/gfs2/quota.c 	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
sdp               577 fs/gfs2/quota.c 	if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) ||
sdp               578 fs/gfs2/quota.c 	    gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
sdp               581 fs/gfs2/quota.c 	error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
sdp               587 fs/gfs2/quota.c 	error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
sdp               595 fs/gfs2/quota.c 		error = qdsb_get(sdp, make_kqid_uid(uid), qd);
sdp               604 fs/gfs2/quota.c 		error = qdsb_get(sdp, make_kqid_gid(gid), qd);
sdp               619 fs/gfs2/quota.c 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp               624 fs/gfs2/quota.c 	gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
sdp               647 fs/gfs2/quota.c 	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
sdp               648 fs/gfs2/quota.c 	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
sdp               652 fs/gfs2/quota.c 	mutex_lock(&sdp->sd_quota_mutex);
sdp               671 fs/gfs2/quota.c 		gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
sdp               684 fs/gfs2/quota.c 	mutex_unlock(&sdp->sd_quota_mutex);
sdp               691 fs/gfs2/quota.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp               697 fs/gfs2/quota.c 	unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0;
sdp               701 fs/gfs2/quota.c 	blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift);
sdp               815 fs/gfs2/quota.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp               839 fs/gfs2/quota.c 			q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift);
sdp               843 fs/gfs2/quota.c 			q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift);
sdp               847 fs/gfs2/quota.c 			q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift);
sdp               867 fs/gfs2/quota.c 	struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_name.ln_sbd;
sdp               868 fs/gfs2/quota.c 	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
sdp               930 fs/gfs2/quota.c 	error = gfs2_trans_begin(sdp, blocks, 0);
sdp               948 fs/gfs2/quota.c 	gfs2_trans_end(sdp);
sdp               963 fs/gfs2/quota.c static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
sdp               965 fs/gfs2/quota.c 	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
sdp               991 fs/gfs2/quota.c 	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
sdp               992 fs/gfs2/quota.c 	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
sdp              1017 fs/gfs2/quota.c 		error = update_qd(sdp, qd);
sdp              1038 fs/gfs2/quota.c 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp              1043 fs/gfs2/quota.c 	if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
sdp              1073 fs/gfs2/quota.c 	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
sdp              1074 fs/gfs2/quota.c 	struct gfs2_tune *gt = &sdp->sd_tune;
sdp              1097 fs/gfs2/quota.c 		value *= gfs2_jindex_size(sdp) * num;
sdp              1109 fs/gfs2/quota.c 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp              1130 fs/gfs2/quota.c 		found = qd_check_sync(sdp, qd, NULL);
sdp              1136 fs/gfs2/quota.c 		gfs2_assert_warn(sdp, qd->qd_change_sync);
sdp              1161 fs/gfs2/quota.c 	struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
sdp              1163 fs/gfs2/quota.c 	fs_info(sdp, "quota %s for %s %u\n",
sdp              1191 fs/gfs2/quota.c 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp              1201 fs/gfs2/quota.c         if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
sdp              1229 fs/gfs2/quota.c 							   sdp->sd_vfs->s_dev,
sdp              1237 fs/gfs2/quota.c 					 gfs2_tune_get(sdp, gt_quota_warn_period)
sdp              1240 fs/gfs2/quota.c 					   sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
sdp              1253 fs/gfs2/quota.c 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp              1255 fs/gfs2/quota.c 	if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON ||
sdp              1256 fs/gfs2/quota.c 	    gfs2_assert_warn(sdp, change))
sdp              1273 fs/gfs2/quota.c 	struct gfs2_sbd *sdp = sb->s_fs_info;
sdp              1284 fs/gfs2/quota.c 	mutex_lock(&sdp->sd_quota_sync_mutex);
sdp              1285 fs/gfs2/quota.c 	sdp->sd_quota_sync_gen++;
sdp              1291 fs/gfs2/quota.c 			error = qd_fish(sdp, qda + num_qd);
sdp              1304 fs/gfs2/quota.c 						sdp->sd_quota_sync_gen;
sdp              1311 fs/gfs2/quota.c 	mutex_unlock(&sdp->sd_quota_sync_mutex);
sdp              1317 fs/gfs2/quota.c int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
sdp              1323 fs/gfs2/quota.c 	error = qd_get(sdp, qid, &qd);
sdp              1335 fs/gfs2/quota.c int gfs2_quota_init(struct gfs2_sbd *sdp)
sdp              1337 fs/gfs2/quota.c 	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
sdp              1338 fs/gfs2/quota.c 	u64 size = i_size_read(sdp->sd_qc_inode);
sdp              1339 fs/gfs2/quota.c 	unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
sdp              1348 fs/gfs2/quota.c 	if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
sdp              1351 fs/gfs2/quota.c 	sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
sdp              1352 fs/gfs2/quota.c 	bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long));
sdp              1355 fs/gfs2/quota.c 	sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN);
sdp              1356 fs/gfs2/quota.c 	if (sdp->sd_quota_bitmap == NULL)
sdp              1357 fs/gfs2/quota.c 		sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS |
sdp              1359 fs/gfs2/quota.c 	if (!sdp->sd_quota_bitmap)
sdp              1377 fs/gfs2/quota.c 		if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
sdp              1383 fs/gfs2/quota.c 		for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
sdp              1396 fs/gfs2/quota.c 			hash = gfs2_qd_hash(sdp, qc_id);
sdp              1397 fs/gfs2/quota.c 			qd = qd_alloc(hash, sdp, qc_id);
sdp              1409 fs/gfs2/quota.c 			BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap));
sdp              1410 fs/gfs2/quota.c 			list_add(&qd->qd_list, &sdp->sd_quota_list);
sdp              1411 fs/gfs2/quota.c 			atomic_inc(&sdp->sd_quota_count);
sdp              1427 fs/gfs2/quota.c 		fs_info(sdp, "found %u quota changes\n", found);
sdp              1432 fs/gfs2/quota.c 	gfs2_quota_cleanup(sdp);
sdp              1436 fs/gfs2/quota.c void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
sdp              1438 fs/gfs2/quota.c 	struct list_head *head = &sdp->sd_quota_list;
sdp              1449 fs/gfs2/quota.c 		atomic_dec(&sdp->sd_quota_count);
sdp              1456 fs/gfs2/quota.c 		gfs2_assert_warn(sdp, !qd->qd_change);
sdp              1457 fs/gfs2/quota.c 		gfs2_assert_warn(sdp, !qd->qd_slot_count);
sdp              1458 fs/gfs2/quota.c 		gfs2_assert_warn(sdp, !qd->qd_bh_count);
sdp              1467 fs/gfs2/quota.c 	gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
sdp              1469 fs/gfs2/quota.c 	kvfree(sdp->sd_quota_bitmap);
sdp              1470 fs/gfs2/quota.c 	sdp->sd_quota_bitmap = NULL;
sdp              1473 fs/gfs2/quota.c static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
sdp              1477 fs/gfs2/quota.c 	if (!test_bit(SDF_WITHDRAWN, &sdp->sd_flags)) {
sdp              1478 fs/gfs2/quota.c 		fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
sdp              1479 fs/gfs2/quota.c 		sdp->sd_log_error = error;
sdp              1480 fs/gfs2/quota.c 		wake_up(&sdp->sd_logd_waitq);
sdp              1484 fs/gfs2/quota.c static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
sdp              1490 fs/gfs2/quota.c 		int error = fxn(sdp->sd_vfs, 0);
sdp              1491 fs/gfs2/quota.c 		quotad_error(sdp, msg, error);
sdp              1492 fs/gfs2/quota.c 		*timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
sdp              1498 fs/gfs2/quota.c static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
sdp              1504 fs/gfs2/quota.c 		spin_lock(&sdp->sd_trunc_lock);
sdp              1505 fs/gfs2/quota.c 		if (!list_empty(&sdp->sd_trunc_list)) {
sdp              1506 fs/gfs2/quota.c 			ip = list_entry(sdp->sd_trunc_list.next,
sdp              1510 fs/gfs2/quota.c 		spin_unlock(&sdp->sd_trunc_lock);
sdp              1517 fs/gfs2/quota.c void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
sdp              1518 fs/gfs2/quota.c 	if (!sdp->sd_statfs_force_sync) {
sdp              1519 fs/gfs2/quota.c 		sdp->sd_statfs_force_sync = 1;
sdp              1520 fs/gfs2/quota.c 		wake_up(&sdp->sd_quota_wait);
sdp              1533 fs/gfs2/quota.c 	struct gfs2_sbd *sdp = data;
sdp              1534 fs/gfs2/quota.c 	struct gfs2_tune *tune = &sdp->sd_tune;
sdp              1544 fs/gfs2/quota.c 		if (sdp->sd_statfs_force_sync) {
sdp              1545 fs/gfs2/quota.c 			int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
sdp              1546 fs/gfs2/quota.c 			quotad_error(sdp, "statfs", error);
sdp              1547 fs/gfs2/quota.c 			statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
sdp              1550 fs/gfs2/quota.c 			quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
sdp              1555 fs/gfs2/quota.c 		quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
sdp              1559 fs/gfs2/quota.c 		quotad_check_trunc_list(sdp);
sdp              1565 fs/gfs2/quota.c 		prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
sdp              1566 fs/gfs2/quota.c 		spin_lock(&sdp->sd_trunc_lock);
sdp              1567 fs/gfs2/quota.c 		empty = list_empty(&sdp->sd_trunc_list);
sdp              1568 fs/gfs2/quota.c 		spin_unlock(&sdp->sd_trunc_lock);
sdp              1569 fs/gfs2/quota.c 		if (empty && !sdp->sd_statfs_force_sync)
sdp              1573 fs/gfs2/quota.c 		finish_wait(&sdp->sd_quota_wait, &wait);
sdp              1581 fs/gfs2/quota.c 	struct gfs2_sbd *sdp = sb->s_fs_info;
sdp              1585 fs/gfs2/quota.c 	switch (sdp->sd_args.ar_quota) {
sdp              1599 fs/gfs2/quota.c 	if (sdp->sd_quota_inode) {
sdp              1601 fs/gfs2/quota.c 					GFS2_I(sdp->sd_quota_inode)->i_no_addr;
sdp              1602 fs/gfs2/quota.c 		state->s_state[USRQUOTA].blocks = sdp->sd_quota_inode->i_blocks;
sdp              1613 fs/gfs2/quota.c 	struct gfs2_sbd *sdp = sb->s_fs_info;
sdp              1621 fs/gfs2/quota.c 	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
sdp              1628 fs/gfs2/quota.c 	error = qd_get(sdp, qid, &qd);
sdp              1636 fs/gfs2/quota.c 	fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift;
sdp              1637 fs/gfs2/quota.c 	fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift;
sdp              1638 fs/gfs2/quota.c 	fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift;
sdp              1652 fs/gfs2/quota.c 	struct gfs2_sbd *sdp = sb->s_fs_info;
sdp              1653 fs/gfs2/quota.c 	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
sdp              1662 fs/gfs2/quota.c 	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
sdp              1672 fs/gfs2/quota.c 	error = qd_get(sdp, qid, &qd);
sdp              1689 fs/gfs2/quota.c 	error = update_qd(sdp, qd);
sdp              1695 fs/gfs2/quota.c 	    ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
sdp              1699 fs/gfs2/quota.c 	    ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
sdp              1703 fs/gfs2/quota.c 	    ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
sdp              1727 fs/gfs2/quota.c 	error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
sdp              1736 fs/gfs2/quota.c 	gfs2_trans_end(sdp);
sdp                32 fs/gfs2/quota.h extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid);
sdp                34 fs/gfs2/quota.h extern int gfs2_quota_init(struct gfs2_sbd *sdp);
sdp                35 fs/gfs2/quota.h extern void gfs2_quota_cleanup(struct gfs2_sbd *sdp);
sdp                38 fs/gfs2/quota.h extern void gfs2_wake_up_statfs(struct gfs2_sbd *sdp);
sdp                43 fs/gfs2/quota.h 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp                48 fs/gfs2/quota.h 	    sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
sdp                53 fs/gfs2/quota.h 	if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
sdp               120 fs/gfs2/recovery.c int __get_log_header(struct gfs2_sbd *sdp, const struct gfs2_log_header *lh,
sdp               137 fs/gfs2/recovery.c 		     sdp->sd_sb.sb_bsize - LH_V1_SIZE - 4);
sdp               166 fs/gfs2/recovery.c 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
sdp               174 fs/gfs2/recovery.c 	error = __get_log_header(sdp, (const struct gfs2_log_header *)bh->b_data,
sdp               196 fs/gfs2/recovery.c 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
sdp               210 fs/gfs2/recovery.c 		if (gfs2_meta_check(sdp, bh)) {
sdp               231 fs/gfs2/recovery.c 		} else if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LD)) {
sdp               262 fs/gfs2/recovery.c 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
sdp               266 fs/gfs2/recovery.c 	if (jd->jd_jid == sdp->sd_lockstruct.ls_jid)
sdp               267 fs/gfs2/recovery.c 		sdp->sd_log_flush_head = lblock;
sdp               268 fs/gfs2/recovery.c 	gfs2_write_log_header(sdp, jd, head->lh_sequence + 1, 0, lblock,
sdp               274 fs/gfs2/recovery.c static void gfs2_recovery_done(struct gfs2_sbd *sdp, unsigned int jid,
sdp               280 fs/gfs2/recovery.c 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
sdp               287 fs/gfs2/recovery.c         kobject_uevent_env(&sdp->sd_kobj, KOBJ_CHANGE, envp);
sdp               289 fs/gfs2/recovery.c 	if (sdp->sd_lockstruct.ls_ops->lm_recovery_result)
sdp               290 fs/gfs2/recovery.c 		sdp->sd_lockstruct.ls_ops->lm_recovery_result(sdp, jid, message);
sdp               297 fs/gfs2/recovery.c 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
sdp               307 fs/gfs2/recovery.c 	if (sdp->sd_args.ar_spectator)
sdp               309 fs/gfs2/recovery.c 	if (jd->jd_jid != sdp->sd_lockstruct.ls_jid) {
sdp               310 fs/gfs2/recovery.c 		fs_info(sdp, "jid=%u: Trying to acquire journal lock...\n",
sdp               315 fs/gfs2/recovery.c 		error = gfs2_glock_nq_num(sdp, jd->jd_jid, &gfs2_journal_glops,
sdp               324 fs/gfs2/recovery.c 			fs_info(sdp, "jid=%u: Busy\n", jd->jd_jid);
sdp               336 fs/gfs2/recovery.c 		fs_info(sdp, "jid=%u, already locked for use\n", jd->jd_jid);
sdp               340 fs/gfs2/recovery.c 	fs_info(sdp, "jid=%u: Looking at journal...\n", jd->jd_jid);
sdp               350 fs/gfs2/recovery.c 	fs_info(sdp, "jid=%u: Journal head lookup took %lldms\n", jd->jd_jid,
sdp               354 fs/gfs2/recovery.c 		fs_info(sdp, "jid=%u: Acquiring the transaction lock...\n",
sdp               359 fs/gfs2/recovery.c 		error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
sdp               365 fs/gfs2/recovery.c 		if (test_bit(SDF_RORECOVERY, &sdp->sd_flags)) {
sdp               367 fs/gfs2/recovery.c 		} else if (test_bit(SDF_JOURNAL_CHECKED, &sdp->sd_flags)) {
sdp               368 fs/gfs2/recovery.c 			if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
sdp               371 fs/gfs2/recovery.c 			if (sb_rdonly(sdp->sd_vfs)) {
sdp               373 fs/gfs2/recovery.c 				ro = bdev_read_only(sdp->sd_vfs->s_bdev);
sdp               375 fs/gfs2/recovery.c 					fs_info(sdp, "recovery required on "
sdp               377 fs/gfs2/recovery.c 					fs_info(sdp, "write access will be "
sdp               384 fs/gfs2/recovery.c 			fs_warn(sdp, "jid=%u: Can't replay: read-only block "
sdp               391 fs/gfs2/recovery.c 		fs_info(sdp, "jid=%u: Replaying journal...0x%x to 0x%x\n",
sdp               407 fs/gfs2/recovery.c 		fs_info(sdp, "jid=%u: Journal replayed in %lldms [jlck:%lldms, "
sdp               416 fs/gfs2/recovery.c 	gfs2_recovery_done(sdp, jd->jd_jid, LM_RD_SUCCESS);
sdp               423 fs/gfs2/recovery.c 	fs_info(sdp, "jid=%u: Done\n", jd->jd_jid);
sdp               435 fs/gfs2/recovery.c 	fs_info(sdp, "jid=%u: %s\n", jd->jd_jid, (error) ? "Failed" : "Done");
sdp               438 fs/gfs2/recovery.c 	gfs2_recovery_done(sdp, jd->jd_jid, LM_RD_GAVEUP);
sdp                29 fs/gfs2/recovery.h extern int __get_log_header(struct gfs2_sbd *sdp,
sdp               101 fs/gfs2/rgrp.c 		struct gfs2_sbd *sdp = rbm->rgd->rd_sbd;
sdp               103 fs/gfs2/rgrp.c 		fs_warn(sdp, "buf_blk = 0x%x old_state=%d, new_state=%d\n",
sdp               105 fs/gfs2/rgrp.c 		fs_warn(sdp, "rgrp=0x%llx bi_start=0x%x biblk: 0x%llx\n",
sdp               108 fs/gfs2/rgrp.c 		fs_warn(sdp, "bi_offset=0x%x bi_bytes=0x%x block=0x%llx\n",
sdp               451 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = rgd->rd_sbd;
sdp               471 fs/gfs2/rgrp.c 			fs_err(sdp, "free data mismatch:  %u != %u\n",
sdp               479 fs/gfs2/rgrp.c 			fs_err(sdp, "used data mismatch:  %u != %u\n",
sdp               486 fs/gfs2/rgrp.c 			fs_err(sdp, "used metadata mismatch:  %u != %u\n",
sdp               508 fs/gfs2/rgrp.c struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact)
sdp               513 fs/gfs2/rgrp.c 	spin_lock(&sdp->sd_rindex_spin);
sdp               514 fs/gfs2/rgrp.c 	n = sdp->sd_rindex_tree.rb_node;
sdp               523 fs/gfs2/rgrp.c 			spin_unlock(&sdp->sd_rindex_spin);
sdp               534 fs/gfs2/rgrp.c 	spin_unlock(&sdp->sd_rindex_spin);
sdp               546 fs/gfs2/rgrp.c struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
sdp               551 fs/gfs2/rgrp.c 	spin_lock(&sdp->sd_rindex_spin);
sdp               552 fs/gfs2/rgrp.c 	n = rb_first(&sdp->sd_rindex_tree);
sdp               554 fs/gfs2/rgrp.c 	spin_unlock(&sdp->sd_rindex_spin);
sdp               568 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = rgd->rd_sbd;
sdp               571 fs/gfs2/rgrp.c 	spin_lock(&sdp->sd_rindex_spin);
sdp               574 fs/gfs2/rgrp.c 		n = rb_first(&sdp->sd_rindex_tree);
sdp               577 fs/gfs2/rgrp.c 		spin_unlock(&sdp->sd_rindex_spin);
sdp               581 fs/gfs2/rgrp.c 	spin_unlock(&sdp->sd_rindex_spin);
sdp               587 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp               588 fs/gfs2/rgrp.c 	if (!ip->i_goal || gfs2_blk2rgrpd(sdp, ip->i_goal, 1) == NULL)
sdp               720 fs/gfs2/rgrp.c void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
sdp               726 fs/gfs2/rgrp.c 	while ((n = rb_first(&sdp->sd_rindex_tree))) {
sdp               730 fs/gfs2/rgrp.c 		rb_erase(n, &sdp->sd_rindex_tree);
sdp               748 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = rgd->rd_sbd;
sdp               750 fs/gfs2/rgrp.c 	fs_info(sdp, "ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
sdp               751 fs/gfs2/rgrp.c 	fs_info(sdp, "ri_length = %u\n", rgd->rd_length);
sdp               752 fs/gfs2/rgrp.c 	fs_info(sdp, "ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
sdp               753 fs/gfs2/rgrp.c 	fs_info(sdp, "ri_data = %u\n", rgd->rd_data);
sdp               754 fs/gfs2/rgrp.c 	fs_info(sdp, "ri_bitbytes = %u\n", rgd->rd_bitbytes);
sdp               768 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = rgd->rd_sbd;
sdp               796 fs/gfs2/rgrp.c 			bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
sdp               810 fs/gfs2/rgrp.c 			bytes = sdp->sd_sb.sb_bsize -
sdp               829 fs/gfs2/rgrp.c 			fs_err(sdp, "start=%u len=%u offset=%u\n",
sdp               843 fs/gfs2/rgrp.c u64 gfs2_ri_total(struct gfs2_sbd *sdp)
sdp               846 fs/gfs2/rgrp.c 	struct inode *inode = sdp->sd_rindex;
sdp               867 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = rgd->rd_sbd;
sdp               868 fs/gfs2/rgrp.c 	struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL;
sdp               885 fs/gfs2/rgrp.c 	rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree);
sdp               886 fs/gfs2/rgrp.c 	sdp->sd_rgrps++;
sdp               899 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp               900 fs/gfs2/rgrp.c 	const unsigned bsize = sdp->sd_sb.sb_bsize;
sdp               901 fs/gfs2/rgrp.c 	loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
sdp               920 fs/gfs2/rgrp.c 	rgd->rd_sbd = sdp;
sdp               932 fs/gfs2/rgrp.c 	error = gfs2_glock_get(sdp, rgd->rd_addr,
sdp               939 fs/gfs2/rgrp.c 	if (rgd->rd_data > sdp->sd_max_rg_data)
sdp               940 fs/gfs2/rgrp.c 		sdp->sd_max_rg_data = rgd->rd_data;
sdp               941 fs/gfs2/rgrp.c 	spin_lock(&sdp->sd_rindex_spin);
sdp               943 fs/gfs2/rgrp.c 	spin_unlock(&sdp->sd_rindex_spin);
sdp               970 fs/gfs2/rgrp.c static void set_rgrp_preferences(struct gfs2_sbd *sdp)
sdp               977 fs/gfs2/rgrp.c 	rgd = gfs2_rgrpd_get_first(sdp);
sdp               978 fs/gfs2/rgrp.c 	for (i = 0; i < sdp->sd_lockstruct.ls_jid; i++)
sdp               984 fs/gfs2/rgrp.c 		for (i = 0; i < sdp->sd_journals; i++) {
sdp              1001 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp              1011 fs/gfs2/rgrp.c 	set_rgrp_preferences(sdp);
sdp              1013 fs/gfs2/rgrp.c 	sdp->sd_rindex_uptodate = 1;
sdp              1034 fs/gfs2/rgrp.c int gfs2_rindex_update(struct gfs2_sbd *sdp)
sdp              1036 fs/gfs2/rgrp.c 	struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
sdp              1043 fs/gfs2/rgrp.c 	if (!sdp->sd_rindex_uptodate) {
sdp              1050 fs/gfs2/rgrp.c 		if (!sdp->sd_rindex_uptodate)
sdp              1115 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = rgd->rd_sbd;
sdp              1119 fs/gfs2/rgrp.c 		fs_warn(sdp, "GFS2: rgd: %llu lvb flag mismatch %u/%u",
sdp              1125 fs/gfs2/rgrp.c 		fs_warn(sdp, "GFS2: rgd: %llu lvb free mismatch %u/%u",
sdp              1131 fs/gfs2/rgrp.c 		fs_warn(sdp, "GFS2: rgd: %llu lvb dinode mismatch %u/%u",
sdp              1138 fs/gfs2/rgrp.c 		fs_warn(sdp, "GFS2: rgd: %llu lvb igen mismatch %llu/%llu",
sdp              1184 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = rgd->rd_sbd;
sdp              1203 fs/gfs2/rgrp.c 		error = gfs2_meta_wait(sdp, bi->bi_bh);
sdp              1206 fs/gfs2/rgrp.c 		if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB :
sdp              1227 fs/gfs2/rgrp.c 	else if (sdp->sd_args.ar_rgrplvb) {
sdp              1243 fs/gfs2/rgrp.c 		gfs2_assert_warn(sdp, !bi->bi_clone);
sdp              1275 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = rgd->rd_sbd;
sdp              1277 fs/gfs2/rgrp.c 	if (gh->gh_flags & GL_SKIP && sdp->sd_args.ar_rgrplvb)
sdp              1318 fs/gfs2/rgrp.c int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
sdp              1322 fs/gfs2/rgrp.c 	struct super_block *sb = sdp->sd_vfs;
sdp              1379 fs/gfs2/rgrp.c 	if (sdp->sd_args.ar_discard)
sdp              1380 fs/gfs2/rgrp.c 		fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem\n", rv);
sdp              1381 fs/gfs2/rgrp.c 	sdp->sd_args.ar_discard = 0;
sdp              1396 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp              1397 fs/gfs2/rgrp.c 	struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev);
sdp              1408 fs/gfs2/rgrp.c 	unsigned bs_shift = sdp->sd_sb.sb_bsize_shift;
sdp              1419 fs/gfs2/rgrp.c 	ret = gfs2_rindex_update(sdp);
sdp              1428 fs/gfs2/rgrp.c 	if (end <= start || minlen > sdp->sd_max_rg_data)
sdp              1431 fs/gfs2/rgrp.c 	rgd = gfs2_blk2rgrpd(sdp, start, 0);
sdp              1432 fs/gfs2/rgrp.c 	rgd_end = gfs2_blk2rgrpd(sdp, end, 0);
sdp              1434 fs/gfs2/rgrp.c 	if ((gfs2_rgrpd_get_first(sdp) == gfs2_rgrpd_get_next(rgd_end))
sdp              1448 fs/gfs2/rgrp.c 				ret = gfs2_rgrp_send_discards(sdp,
sdp              1459 fs/gfs2/rgrp.c 			ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0);
sdp              1465 fs/gfs2/rgrp.c 				gfs2_trans_end(sdp);
sdp              1837 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = rgd->rd_sbd;
sdp              1845 fs/gfs2/rgrp.c 		down_write(&sdp->sd_log_flush_lock);
sdp              1848 fs/gfs2/rgrp.c 		up_write(&sdp->sd_log_flush_lock);
sdp              1863 fs/gfs2/rgrp.c 		error = gfs2_glock_get(sdp, block, &gfs2_iopen_glops, CREATE, &gl);
sdp              1920 fs/gfs2/rgrp.c 	const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
sdp              1931 fs/gfs2/rgrp.c 		st = &per_cpu_ptr(sdp->sd_lkstats, cpu)->lkstats[LM_TYPE_RGRP];
sdp              1937 fs/gfs2/rgrp.c 	st = &this_cpu_ptr(sdp->sd_lkstats)->lkstats[LM_TYPE_RGRP];
sdp              1983 fs/gfs2/rgrp.c 	const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp              1987 fs/gfs2/rgrp.c 	return skip % sdp->sd_rgrps;
sdp              1993 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = rgd->rd_sbd;
sdp              1997 fs/gfs2/rgrp.c 		rgd = gfs2_rgrpd_get_first(sdp);
sdp              2041 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp              2049 fs/gfs2/rgrp.c 	if (sdp->sd_args.ar_rgrplvb)
sdp              2051 fs/gfs2/rgrp.c 	if (gfs2_assert_warn(sdp, ap->target))
sdp              2060 fs/gfs2/rgrp.c 		rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
sdp              2091 fs/gfs2/rgrp.c 			if (sdp->sd_args.ar_rgrplvb) {
sdp              2106 fs/gfs2/rgrp.c 		if (sdp->sd_args.ar_rgrplvb)
sdp              2151 fs/gfs2/rgrp.c 		if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) {
sdp              2158 fs/gfs2/rgrp.c 			gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
sdp              2220 fs/gfs2/rgrp.c static void rgblk_free(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd,
sdp              2287 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = rgd->rd_sbd;
sdp              2288 fs/gfs2/rgrp.c 	char fs_id_buf[sizeof(sdp->sd_fsname) + 7];
sdp              2290 fs/gfs2/rgrp.c 	fs_warn(sdp, "rgrp %llu has an error, marking it readonly until umount\n",
sdp              2292 fs/gfs2/rgrp.c 	fs_warn(sdp, "umount on all nodes and run fsck.gfs2 to fix the error\n");
sdp              2293 fs/gfs2/rgrp.c 	sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname);
sdp              2385 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp              2402 fs/gfs2/rgrp.c 		fs_warn(sdp, "inum=%llu error=%d, nblocks=%u, full=%d fail_pt=%d\n",
sdp              2431 fs/gfs2/rgrp.c 		fs_warn(sdp, "nblocks=%u\n", *nblocks);
sdp              2446 fs/gfs2/rgrp.c 	gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
sdp              2448 fs/gfs2/rgrp.c 		gfs2_trans_remove_revoke(sdp, block, *nblocks);
sdp              2476 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp              2478 fs/gfs2/rgrp.c 	rgblk_free(sdp, rgd, bstart, blen, GFS2_BLKST_FREE);
sdp              2502 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp              2505 fs/gfs2/rgrp.c 	gfs2_statfs_change(sdp, 0, +blen, 0);
sdp              2512 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp              2516 fs/gfs2/rgrp.c 	rgd = gfs2_blk2rgrpd(sdp, blkno, true);
sdp              2519 fs/gfs2/rgrp.c 	rgblk_free(sdp, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
sdp              2528 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = rgd->rd_sbd;
sdp              2530 fs/gfs2/rgrp.c 	rgblk_free(sdp, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
sdp              2540 fs/gfs2/rgrp.c 	gfs2_statfs_change(sdp, 0, +1, -1);
sdp              2557 fs/gfs2/rgrp.c int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
sdp              2564 fs/gfs2/rgrp.c 	rgd = gfs2_blk2rgrpd(sdp, no_addr, 1);
sdp              2600 fs/gfs2/rgrp.c 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp              2606 fs/gfs2/rgrp.c 	if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
sdp              2617 fs/gfs2/rgrp.c 		rgd = gfs2_blk2rgrpd(sdp, block, 1);
sdp              2621 fs/gfs2/rgrp.c 			rgd = gfs2_blk2rgrpd(sdp, block, 1);
sdp              2625 fs/gfs2/rgrp.c 		fs_err(sdp, "rlist_add: no rgrp for block %llu\n",
sdp                27 fs/gfs2/rgrp.h extern struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact);
sdp                28 fs/gfs2/rgrp.h extern struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp);
sdp                31 fs/gfs2/rgrp.h extern void gfs2_clear_rgrpd(struct gfs2_sbd *sdp);
sdp                32 fs/gfs2/rgrp.h extern int gfs2_rindex_update(struct gfs2_sbd *sdp);
sdp                57 fs/gfs2/rgrp.h extern int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr,
sdp                71 fs/gfs2/rgrp.h extern u64 gfs2_ri_total(struct gfs2_sbd *sdp);
sdp                74 fs/gfs2/rgrp.h extern int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
sdp                53 fs/gfs2/super.c void gfs2_jindex_free(struct gfs2_sbd *sdp)
sdp                58 fs/gfs2/super.c 	spin_lock(&sdp->sd_jindex_spin);
sdp                59 fs/gfs2/super.c 	list_add(&list, &sdp->sd_jindex_list);
sdp                60 fs/gfs2/super.c 	list_del_init(&sdp->sd_jindex_list);
sdp                61 fs/gfs2/super.c 	sdp->sd_journals = 0;
sdp                62 fs/gfs2/super.c 	spin_unlock(&sdp->sd_jindex_spin);
sdp                91 fs/gfs2/super.c struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid)
sdp                95 fs/gfs2/super.c 	spin_lock(&sdp->sd_jindex_spin);
sdp                96 fs/gfs2/super.c 	jd = jdesc_find_i(&sdp->sd_jindex_list, jid);
sdp                97 fs/gfs2/super.c 	spin_unlock(&sdp->sd_jindex_spin);
sdp               105 fs/gfs2/super.c 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
sdp               111 fs/gfs2/super.c 	jd->jd_blocks = size >> sdp->sd_sb.sb_bsize_shift;
sdp               121 fs/gfs2/super.c static int init_threads(struct gfs2_sbd *sdp)
sdp               126 fs/gfs2/super.c 	p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
sdp               129 fs/gfs2/super.c 		fs_err(sdp, "can't start logd thread: %d\n", error);
sdp               132 fs/gfs2/super.c 	sdp->sd_logd_process = p;
sdp               134 fs/gfs2/super.c 	p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad");
sdp               137 fs/gfs2/super.c 		fs_err(sdp, "can't start quotad thread: %d\n", error);
sdp               140 fs/gfs2/super.c 	sdp->sd_quotad_process = p;
sdp               144 fs/gfs2/super.c 	kthread_stop(sdp->sd_logd_process);
sdp               145 fs/gfs2/super.c 	sdp->sd_logd_process = NULL;
sdp               156 fs/gfs2/super.c int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
sdp               158 fs/gfs2/super.c 	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
sdp               164 fs/gfs2/super.c 	error = init_threads(sdp);
sdp               168 fs/gfs2/super.c 	error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0,
sdp               175 fs/gfs2/super.c 	error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
sdp               180 fs/gfs2/super.c 		gfs2_consist(sdp);
sdp               186 fs/gfs2/super.c 	sdp->sd_log_sequence = head.lh_sequence + 1;
sdp               187 fs/gfs2/super.c 	gfs2_log_pointers_init(sdp, head.lh_blkno);
sdp               189 fs/gfs2/super.c 	error = gfs2_quota_init(sdp);
sdp               193 fs/gfs2/super.c 	set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
sdp               203 fs/gfs2/super.c 	if (sdp->sd_quotad_process)
sdp               204 fs/gfs2/super.c 		kthread_stop(sdp->sd_quotad_process);
sdp               205 fs/gfs2/super.c 	sdp->sd_quotad_process = NULL;
sdp               206 fs/gfs2/super.c 	if (sdp->sd_logd_process)
sdp               207 fs/gfs2/super.c 		kthread_stop(sdp->sd_logd_process);
sdp               208 fs/gfs2/super.c 	sdp->sd_logd_process = NULL;
sdp               230 fs/gfs2/super.c int gfs2_statfs_init(struct gfs2_sbd *sdp)
sdp               232 fs/gfs2/super.c 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
sdp               233 fs/gfs2/super.c 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
sdp               234 fs/gfs2/super.c 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
sdp               235 fs/gfs2/super.c 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
sdp               249 fs/gfs2/super.c 	if (sdp->sd_args.ar_spectator) {
sdp               250 fs/gfs2/super.c 		spin_lock(&sdp->sd_statfs_spin);
sdp               253 fs/gfs2/super.c 		spin_unlock(&sdp->sd_statfs_spin);
sdp               259 fs/gfs2/super.c 		spin_lock(&sdp->sd_statfs_spin);
sdp               264 fs/gfs2/super.c 		spin_unlock(&sdp->sd_statfs_spin);
sdp               276 fs/gfs2/super.c void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
sdp               279 fs/gfs2/super.c 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
sdp               280 fs/gfs2/super.c 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
sdp               281 fs/gfs2/super.c 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
sdp               293 fs/gfs2/super.c 	spin_lock(&sdp->sd_statfs_spin);
sdp               298 fs/gfs2/super.c 	if (sdp->sd_args.ar_statfs_percent) {
sdp               300 fs/gfs2/super.c 		y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent;
sdp               304 fs/gfs2/super.c 	spin_unlock(&sdp->sd_statfs_spin);
sdp               308 fs/gfs2/super.c 		gfs2_wake_up_statfs(sdp);
sdp               311 fs/gfs2/super.c void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
sdp               314 fs/gfs2/super.c 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
sdp               315 fs/gfs2/super.c 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
sdp               316 fs/gfs2/super.c 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
sdp               317 fs/gfs2/super.c 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
sdp               322 fs/gfs2/super.c 	spin_lock(&sdp->sd_statfs_spin);
sdp               330 fs/gfs2/super.c 	spin_unlock(&sdp->sd_statfs_spin);
sdp               335 fs/gfs2/super.c 	struct gfs2_sbd *sdp = sb->s_fs_info;
sdp               336 fs/gfs2/super.c 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
sdp               337 fs/gfs2/super.c 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
sdp               338 fs/gfs2/super.c 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
sdp               339 fs/gfs2/super.c 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
sdp               354 fs/gfs2/super.c 	spin_lock(&sdp->sd_statfs_spin);
sdp               358 fs/gfs2/super.c 		spin_unlock(&sdp->sd_statfs_spin);
sdp               361 fs/gfs2/super.c 	spin_unlock(&sdp->sd_statfs_spin);
sdp               367 fs/gfs2/super.c 	error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
sdp               371 fs/gfs2/super.c 	update_statfs(sdp, m_bh, l_bh);
sdp               372 fs/gfs2/super.c 	sdp->sd_statfs_force_sync = 0;
sdp               374 fs/gfs2/super.c 	gfs2_trans_end(sdp);
sdp               402 fs/gfs2/super.c static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp,
sdp               412 fs/gfs2/super.c 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
sdp               427 fs/gfs2/super.c 	error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
sdp               430 fs/gfs2/super.c 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
sdp               504 fs/gfs2/super.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp               515 fs/gfs2/super.c 		gfs2_ail1_flush(sdp, wbc);
sdp               547 fs/gfs2/super.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp               556 fs/gfs2/super.c 	if (unlikely(test_bit(SDF_WITHDRAWN, &sdp->sd_flags)))
sdp               561 fs/gfs2/super.c 			fs_err(sdp, "dirty_inode: glock %d\n", ret);
sdp               569 fs/gfs2/super.c 		ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
sdp               571 fs/gfs2/super.c 			fs_err(sdp, "dirty_inode: gfs2_trans_begin %d\n", ret);
sdp               585 fs/gfs2/super.c 		gfs2_trans_end(sdp);
sdp               598 fs/gfs2/super.c int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
sdp               603 fs/gfs2/super.c 	error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, GL_NOCACHE,
sdp               605 fs/gfs2/super.c 	if (error && !test_bit(SDF_WITHDRAWN, &sdp->sd_flags))
sdp               609 fs/gfs2/super.c 	if (sdp->sd_quotad_process)
sdp               610 fs/gfs2/super.c 		kthread_stop(sdp->sd_quotad_process);
sdp               611 fs/gfs2/super.c 	sdp->sd_quotad_process = NULL;
sdp               612 fs/gfs2/super.c 	if (sdp->sd_logd_process)
sdp               613 fs/gfs2/super.c 		kthread_stop(sdp->sd_logd_process);
sdp               614 fs/gfs2/super.c 	sdp->sd_logd_process = NULL;
sdp               616 fs/gfs2/super.c 	gfs2_quota_sync(sdp->sd_vfs, 0);
sdp               617 fs/gfs2/super.c 	gfs2_statfs_sync(sdp->sd_vfs, 0);
sdp               619 fs/gfs2/super.c 	gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
sdp               621 fs/gfs2/super.c 	wait_event(sdp->sd_reserving_log_wait, atomic_read(&sdp->sd_reserving_log) == 0);
sdp               622 fs/gfs2/super.c 	gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks);
sdp               627 fs/gfs2/super.c 	gfs2_quota_cleanup(sdp);
sdp               640 fs/gfs2/super.c 	struct gfs2_sbd *sdp = sb->s_fs_info;
sdp               645 fs/gfs2/super.c 	set_bit(SDF_NORECOVERY, &sdp->sd_flags);
sdp               650 fs/gfs2/super.c 	spin_lock(&sdp->sd_jindex_spin);
sdp               651 fs/gfs2/super.c 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
sdp               654 fs/gfs2/super.c 		spin_unlock(&sdp->sd_jindex_spin);
sdp               659 fs/gfs2/super.c 	spin_unlock(&sdp->sd_jindex_spin);
sdp               662 fs/gfs2/super.c 		error = gfs2_make_fs_ro(sdp);
sdp               664 fs/gfs2/super.c 			gfs2_io_error(sdp);
sdp               670 fs/gfs2/super.c 	iput(sdp->sd_jindex);
sdp               671 fs/gfs2/super.c 	iput(sdp->sd_statfs_inode);
sdp               672 fs/gfs2/super.c 	iput(sdp->sd_rindex);
sdp               673 fs/gfs2/super.c 	iput(sdp->sd_quota_inode);
sdp               675 fs/gfs2/super.c 	gfs2_glock_put(sdp->sd_rename_gl);
sdp               676 fs/gfs2/super.c 	gfs2_glock_put(sdp->sd_freeze_gl);
sdp               678 fs/gfs2/super.c 	if (!sdp->sd_args.ar_spectator) {
sdp               679 fs/gfs2/super.c 		gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
sdp               680 fs/gfs2/super.c 		gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
sdp               681 fs/gfs2/super.c 		gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
sdp               682 fs/gfs2/super.c 		gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
sdp               683 fs/gfs2/super.c 		iput(sdp->sd_sc_inode);
sdp               684 fs/gfs2/super.c 		iput(sdp->sd_qc_inode);
sdp               687 fs/gfs2/super.c 	gfs2_glock_dq_uninit(&sdp->sd_live_gh);
sdp               688 fs/gfs2/super.c 	gfs2_clear_rgrpd(sdp);
sdp               689 fs/gfs2/super.c 	gfs2_jindex_free(sdp);
sdp               691 fs/gfs2/super.c 	gfs2_gl_hash_clear(sdp);
sdp               692 fs/gfs2/super.c 	gfs2_delete_debugfs_file(sdp);
sdp               694 fs/gfs2/super.c 	gfs2_lm_unmount(sdp);
sdp               697 fs/gfs2/super.c 	gfs2_sys_fs_del(sdp);
sdp               709 fs/gfs2/super.c 	struct gfs2_sbd *sdp = sb->s_fs_info;
sdp               713 fs/gfs2/super.c 		gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
sdp               715 fs/gfs2/super.c 	return sdp->sd_log_error;
sdp               722 fs/gfs2/super.c 	struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_freeze_work);
sdp               723 fs/gfs2/super.c 	struct super_block *sb = sdp->sd_vfs;
sdp               726 fs/gfs2/super.c 	error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0,
sdp               729 fs/gfs2/super.c 		fs_info(sdp, "GFS2: couldn't get freeze lock : %d\n", error);
sdp               730 fs/gfs2/super.c 		gfs2_assert_withdraw(sdp, 0);
sdp               732 fs/gfs2/super.c 		atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
sdp               735 fs/gfs2/super.c 			fs_info(sdp, "GFS2: couldn't thaw filesystem: %d\n",
sdp               737 fs/gfs2/super.c 			gfs2_assert_withdraw(sdp, 0);
sdp               739 fs/gfs2/super.c 		if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
sdp               744 fs/gfs2/super.c 	clear_bit_unlock(SDF_FS_FROZEN, &sdp->sd_flags);
sdp               745 fs/gfs2/super.c 	wake_up_bit(&sdp->sd_flags, SDF_FS_FROZEN);
sdp               757 fs/gfs2/super.c 	struct gfs2_sbd *sdp = sb->s_fs_info;
sdp               760 fs/gfs2/super.c 	mutex_lock(&sdp->sd_freeze_mutex);
sdp               761 fs/gfs2/super.c 	if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN)
sdp               764 fs/gfs2/super.c 	if (test_bit(SDF_WITHDRAWN, &sdp->sd_flags)) {
sdp               770 fs/gfs2/super.c 		error = gfs2_lock_fs_check_clean(sdp, &sdp->sd_freeze_gh);
sdp               775 fs/gfs2/super.c 			fs_err(sdp, "waiting for recovery before freeze\n");
sdp               777 fs/gfs2/super.c 			fs_err(sdp, "error freezing FS: %d\n", error);
sdp               779 fs/gfs2/super.c 		fs_err(sdp, "retrying...\n");
sdp               782 fs/gfs2/super.c 	set_bit(SDF_FS_FROZEN, &sdp->sd_flags);
sdp               784 fs/gfs2/super.c 	mutex_unlock(&sdp->sd_freeze_mutex);
sdp               796 fs/gfs2/super.c 	struct gfs2_sbd *sdp = sb->s_fs_info;
sdp               798 fs/gfs2/super.c 	mutex_lock(&sdp->sd_freeze_mutex);
sdp               799 fs/gfs2/super.c         if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN ||
sdp               800 fs/gfs2/super.c 	    !gfs2_holder_initialized(&sdp->sd_freeze_gh)) {
sdp               801 fs/gfs2/super.c 		mutex_unlock(&sdp->sd_freeze_mutex);
sdp               805 fs/gfs2/super.c 	gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
sdp               806 fs/gfs2/super.c 	mutex_unlock(&sdp->sd_freeze_mutex);
sdp               807 fs/gfs2/super.c 	return wait_on_bit(&sdp->sd_flags, SDF_FS_FROZEN, TASK_INTERRUPTIBLE);
sdp               841 fs/gfs2/super.c static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
sdp               857 fs/gfs2/super.c 	rgd_next = gfs2_rgrpd_get_first(sdp);
sdp               914 fs/gfs2/super.c static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
sdp               916 fs/gfs2/super.c 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
sdp               917 fs/gfs2/super.c 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
sdp               919 fs/gfs2/super.c 	spin_lock(&sdp->sd_statfs_spin);
sdp               926 fs/gfs2/super.c 	spin_unlock(&sdp->sd_statfs_spin);
sdp               949 fs/gfs2/super.c 	struct gfs2_sbd *sdp = sb->s_fs_info;
sdp               953 fs/gfs2/super.c 	error = gfs2_rindex_update(sdp);
sdp               957 fs/gfs2/super.c 	if (gfs2_tune_get(sdp, gt_statfs_slow))
sdp               958 fs/gfs2/super.c 		error = gfs2_statfs_slow(sdp, &sc);
sdp               960 fs/gfs2/super.c 		error = gfs2_statfs_i(sdp, &sc);
sdp               966 fs/gfs2/super.c 	buf->f_bsize = sdp->sd_sb.sb_bsize;
sdp              1043 fs/gfs2/super.c 	struct gfs2_sbd *sdp = root->d_sb->s_fs_info;
sdp              1044 fs/gfs2/super.c 	struct gfs2_args *args = &sdp->sd_args;
sdp              1047 fs/gfs2/super.c 	if (is_ancestor(root, sdp->sd_master_dir))
sdp              1100 fs/gfs2/super.c 	val = sdp->sd_tune.gt_logd_secs;
sdp              1103 fs/gfs2/super.c 	val = sdp->sd_tune.gt_statfs_quantum;
sdp              1106 fs/gfs2/super.c 	else if (sdp->sd_tune.gt_statfs_slow)
sdp              1108 fs/gfs2/super.c 	val = sdp->sd_tune.gt_quota_quantum;
sdp              1129 fs/gfs2/super.c 	if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
sdp              1131 fs/gfs2/super.c 	if (test_bit(SDF_DEMOTE, &sdp->sd_flags))
sdp              1156 fs/gfs2/super.c 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp              1166 fs/gfs2/super.c 	error = gfs2_rindex_update(sdp);
sdp              1174 fs/gfs2/super.c 	rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1);
sdp              1185 fs/gfs2/super.c 	error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA,
sdp              1186 fs/gfs2/super.c 				 sdp->sd_jdesc->jd_blocks);
sdp              1194 fs/gfs2/super.c 	gfs2_trans_end(sdp);
sdp              1243 fs/gfs2/super.c 	struct gfs2_sbd *sdp = sb->s_fs_info;
sdp              1276 fs/gfs2/super.c 	error = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED);
sdp              1332 fs/gfs2/super.c 	gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
sdp              1342 fs/gfs2/super.c 	error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
sdp              1348 fs/gfs2/super.c 	gfs2_trans_end(sdp);
sdp              1367 fs/gfs2/super.c 		fs_warn(sdp, "gfs2_evict_inode: %d\n", error);
sdp                14 fs/gfs2/super.h extern void gfs2_lm_unmount(struct gfs2_sbd *sdp);
sdp                16 fs/gfs2/super.h static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp)
sdp                19 fs/gfs2/super.h 	spin_lock(&sdp->sd_jindex_spin);
sdp                20 fs/gfs2/super.h 	x = sdp->sd_journals;
sdp                21 fs/gfs2/super.h 	spin_unlock(&sdp->sd_jindex_spin);
sdp                25 fs/gfs2/super.h extern void gfs2_jindex_free(struct gfs2_sbd *sdp);
sdp                27 fs/gfs2/super.h extern struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid);
sdp                30 fs/gfs2/super.h extern int gfs2_lookup_in_master_dir(struct gfs2_sbd *sdp, char *filename,
sdp                33 fs/gfs2/super.h extern int gfs2_make_fs_rw(struct gfs2_sbd *sdp);
sdp                34 fs/gfs2/super.h extern int gfs2_make_fs_ro(struct gfs2_sbd *sdp);
sdp                35 fs/gfs2/super.h extern void gfs2_online_uevent(struct gfs2_sbd *sdp);
sdp                36 fs/gfs2/super.h extern int gfs2_statfs_init(struct gfs2_sbd *sdp);
sdp                37 fs/gfs2/super.h extern void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
sdp                41 fs/gfs2/super.h extern void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
sdp                46 fs/gfs2/super.h extern void free_sbd(struct gfs2_sbd *sdp);
sdp                39 fs/gfs2/sys.c  	struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
sdp                41 fs/gfs2/sys.c  	return a->show ? a->show(sdp, buf) : 0;
sdp                47 fs/gfs2/sys.c  	struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
sdp                49 fs/gfs2/sys.c  	return a->store ? a->store(sdp, buf, len) : len;
sdp                60 fs/gfs2/sys.c  static ssize_t id_show(struct gfs2_sbd *sdp, char *buf)
sdp                63 fs/gfs2/sys.c  			MAJOR(sdp->sd_vfs->s_dev), MINOR(sdp->sd_vfs->s_dev));
sdp                66 fs/gfs2/sys.c  static ssize_t fsname_show(struct gfs2_sbd *sdp, char *buf)
sdp                68 fs/gfs2/sys.c  	return snprintf(buf, PAGE_SIZE, "%s\n", sdp->sd_fsname);
sdp                71 fs/gfs2/sys.c  static ssize_t uuid_show(struct gfs2_sbd *sdp, char *buf)
sdp                73 fs/gfs2/sys.c  	struct super_block *s = sdp->sd_vfs;
sdp                81 fs/gfs2/sys.c  static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf)
sdp                83 fs/gfs2/sys.c  	struct super_block *sb = sdp->sd_vfs;
sdp                89 fs/gfs2/sys.c  static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
sdp               102 fs/gfs2/sys.c  		error = thaw_super(sdp->sd_vfs);
sdp               105 fs/gfs2/sys.c  		error = freeze_super(sdp->sd_vfs);
sdp               112 fs/gfs2/sys.c  		fs_warn(sdp, "freeze %d error %d\n", n, error);
sdp               119 fs/gfs2/sys.c  static ssize_t withdraw_show(struct gfs2_sbd *sdp, char *buf)
sdp               121 fs/gfs2/sys.c  	unsigned int b = test_bit(SDF_WITHDRAWN, &sdp->sd_flags);
sdp               125 fs/gfs2/sys.c  static ssize_t withdraw_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
sdp               139 fs/gfs2/sys.c  	gfs2_lm_withdraw(sdp, "withdrawing from cluster at user's request\n");
sdp               144 fs/gfs2/sys.c  static ssize_t statfs_sync_store(struct gfs2_sbd *sdp, const char *buf,
sdp               159 fs/gfs2/sys.c  	gfs2_statfs_sync(sdp->sd_vfs, 0);
sdp               163 fs/gfs2/sys.c  static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf,
sdp               178 fs/gfs2/sys.c  	gfs2_quota_sync(sdp->sd_vfs, 0);
sdp               182 fs/gfs2/sys.c  static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf,
sdp               200 fs/gfs2/sys.c  	error = gfs2_quota_refresh(sdp, qid);
sdp               204 fs/gfs2/sys.c  static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf,
sdp               222 fs/gfs2/sys.c  	error = gfs2_quota_refresh(sdp, qid);
sdp               226 fs/gfs2/sys.c  static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
sdp               261 fs/gfs2/sys.c  	if (!test_and_set_bit(SDF_DEMOTE, &sdp->sd_flags))
sdp               262 fs/gfs2/sys.c  		fs_info(sdp, "demote interface used\n");
sdp               263 fs/gfs2/sys.c  	rv = gfs2_glock_get(sdp, glnum, glops, 0, &gl);
sdp               303 fs/gfs2/sys.c  	struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
sdp               305 fs/gfs2/sys.c  	free_sbd(sdp);
sdp               319 fs/gfs2/sys.c  static ssize_t proto_name_show(struct gfs2_sbd *sdp, char *buf)
sdp               321 fs/gfs2/sys.c  	const struct lm_lockops *ops = sdp->sd_lockstruct.ls_ops;
sdp               325 fs/gfs2/sys.c  static ssize_t block_show(struct gfs2_sbd *sdp, char *buf)
sdp               327 fs/gfs2/sys.c  	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
sdp               337 fs/gfs2/sys.c  static ssize_t block_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
sdp               339 fs/gfs2/sys.c  	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
sdp               351 fs/gfs2/sys.c  		gfs2_glock_thaw(sdp);
sdp               358 fs/gfs2/sys.c  static ssize_t wdack_show(struct gfs2_sbd *sdp, char *buf)
sdp               360 fs/gfs2/sys.c  	int val = completion_done(&sdp->sd_wdack) ? 1 : 0;
sdp               365 fs/gfs2/sys.c  static ssize_t wdack_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
sdp               374 fs/gfs2/sys.c  	    !strcmp(sdp->sd_lockstruct.ls_ops->lm_proto_name, "lock_dlm"))
sdp               375 fs/gfs2/sys.c  		complete(&sdp->sd_wdack);
sdp               381 fs/gfs2/sys.c  static ssize_t lkfirst_show(struct gfs2_sbd *sdp, char *buf)
sdp               383 fs/gfs2/sys.c  	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
sdp               387 fs/gfs2/sys.c  static ssize_t lkfirst_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
sdp               395 fs/gfs2/sys.c  	rv = wait_for_completion_killable(&sdp->sd_locking_init);
sdp               398 fs/gfs2/sys.c  	spin_lock(&sdp->sd_jindex_spin);
sdp               400 fs/gfs2/sys.c  	if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0)
sdp               403 fs/gfs2/sys.c  	if (sdp->sd_args.ar_spectator)
sdp               405 fs/gfs2/sys.c  	if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
sdp               407 fs/gfs2/sys.c  	sdp->sd_lockstruct.ls_first = first;
sdp               410 fs/gfs2/sys.c          spin_unlock(&sdp->sd_jindex_spin);
sdp               414 fs/gfs2/sys.c  static ssize_t first_done_show(struct gfs2_sbd *sdp, char *buf)
sdp               416 fs/gfs2/sys.c  	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
sdp               420 fs/gfs2/sys.c  int gfs2_recover_set(struct gfs2_sbd *sdp, unsigned jid)
sdp               426 fs/gfs2/sys.c  	wait_for_completion(&sdp->sd_journal_ready);
sdp               428 fs/gfs2/sys.c  	spin_lock(&sdp->sd_jindex_spin);
sdp               437 fs/gfs2/sys.c  	if (sdp->sd_jdesc->jd_jid == jid && !sdp->sd_args.ar_spectator)
sdp               440 fs/gfs2/sys.c  	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
sdp               441 fs/gfs2/sys.c  		if (jd->jd_jid != jid && !sdp->sd_args.ar_spectator)
sdp               447 fs/gfs2/sys.c  	spin_unlock(&sdp->sd_jindex_spin);
sdp               451 fs/gfs2/sys.c  static ssize_t recover_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
sdp               460 fs/gfs2/sys.c  	if (test_bit(SDF_NORECOVERY, &sdp->sd_flags)) {
sdp               465 fs/gfs2/sys.c  	rv = gfs2_recover_set(sdp, jid);
sdp               470 fs/gfs2/sys.c  static ssize_t recover_done_show(struct gfs2_sbd *sdp, char *buf)
sdp               472 fs/gfs2/sys.c  	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
sdp               476 fs/gfs2/sys.c  static ssize_t recover_status_show(struct gfs2_sbd *sdp, char *buf)
sdp               478 fs/gfs2/sys.c  	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
sdp               482 fs/gfs2/sys.c  static ssize_t jid_show(struct gfs2_sbd *sdp, char *buf)
sdp               484 fs/gfs2/sys.c  	return sprintf(buf, "%d\n", sdp->sd_lockstruct.ls_jid);
sdp               487 fs/gfs2/sys.c  static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
sdp               495 fs/gfs2/sys.c  	rv = wait_for_completion_killable(&sdp->sd_locking_init);
sdp               498 fs/gfs2/sys.c  	spin_lock(&sdp->sd_jindex_spin);
sdp               500 fs/gfs2/sys.c  	if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
sdp               503 fs/gfs2/sys.c  	if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0)
sdp               506 fs/gfs2/sys.c  	if (sdp->sd_args.ar_spectator && jid > 0)
sdp               508 fs/gfs2/sys.c  	sdp->sd_lockstruct.ls_jid = jid;
sdp               509 fs/gfs2/sys.c  	clear_bit(SDF_NOJOURNALID, &sdp->sd_flags);
sdp               511 fs/gfs2/sys.c  	wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID);
sdp               513 fs/gfs2/sys.c  	spin_unlock(&sdp->sd_jindex_spin);
sdp               547 fs/gfs2/sys.c  static ssize_t quota_scale_show(struct gfs2_sbd *sdp, char *buf)
sdp               550 fs/gfs2/sys.c  			sdp->sd_tune.gt_quota_scale_num,
sdp               551 fs/gfs2/sys.c  			sdp->sd_tune.gt_quota_scale_den);
sdp               554 fs/gfs2/sys.c  static ssize_t quota_scale_store(struct gfs2_sbd *sdp, const char *buf,
sdp               557 fs/gfs2/sys.c  	struct gfs2_tune *gt = &sdp->sd_tune;
sdp               573 fs/gfs2/sys.c  static ssize_t tune_set(struct gfs2_sbd *sdp, unsigned int *field,
sdp               576 fs/gfs2/sys.c  	struct gfs2_tune *gt = &sdp->sd_tune;
sdp               600 fs/gfs2/sys.c  static ssize_t name##_show(struct gfs2_sbd *sdp, char *buf)                   \
sdp               602 fs/gfs2/sys.c  	return snprintf(buf, PAGE_SIZE, "%u\n", sdp->sd_tune.gt_##name);      \
sdp               607 fs/gfs2/sys.c  static ssize_t name##_store(struct gfs2_sbd *sdp, const char *buf, size_t len)\
sdp               609 fs/gfs2/sys.c  	return tune_set(sdp, &sdp->sd_tune.gt_##name, check_zero, buf, len);  \
sdp               644 fs/gfs2/sys.c  int gfs2_sys_fs_add(struct gfs2_sbd *sdp)
sdp               646 fs/gfs2/sys.c  	struct super_block *sb = sdp->sd_vfs;
sdp               653 fs/gfs2/sys.c  	sprintf(spectator, "SPECTATOR=%d", sdp->sd_args.ar_spectator ? 1 : 0);
sdp               655 fs/gfs2/sys.c  	sdp->sd_kobj.kset = gfs2_kset;
sdp               656 fs/gfs2/sys.c  	error = kobject_init_and_add(&sdp->sd_kobj, &gfs2_ktype, NULL,
sdp               657 fs/gfs2/sys.c  				     "%s", sdp->sd_table_name);
sdp               661 fs/gfs2/sys.c  	error = sysfs_create_group(&sdp->sd_kobj, &tune_group);
sdp               665 fs/gfs2/sys.c  	error = sysfs_create_group(&sdp->sd_kobj, &lock_module_group);
sdp               669 fs/gfs2/sys.c  	error = sysfs_create_link(&sdp->sd_kobj,
sdp               675 fs/gfs2/sys.c  	kobject_uevent_env(&sdp->sd_kobj, KOBJ_ADD, envp);
sdp               679 fs/gfs2/sys.c  	sysfs_remove_group(&sdp->sd_kobj, &lock_module_group);
sdp               681 fs/gfs2/sys.c  	sysfs_remove_group(&sdp->sd_kobj, &tune_group);
sdp               683 fs/gfs2/sys.c  	fs_err(sdp, "error %d adding sysfs files\n", error);
sdp               684 fs/gfs2/sys.c  	kobject_put(&sdp->sd_kobj);
sdp               689 fs/gfs2/sys.c  void gfs2_sys_fs_del(struct gfs2_sbd *sdp)
sdp               691 fs/gfs2/sys.c  	sysfs_remove_link(&sdp->sd_kobj, "device");
sdp               692 fs/gfs2/sys.c  	sysfs_remove_group(&sdp->sd_kobj, &tune_group);
sdp               693 fs/gfs2/sys.c  	sysfs_remove_group(&sdp->sd_kobj, &lock_module_group);
sdp               694 fs/gfs2/sys.c  	kobject_put(&sdp->sd_kobj);
sdp               700 fs/gfs2/sys.c  	struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
sdp               701 fs/gfs2/sys.c  	struct super_block *s = sdp->sd_vfs;
sdp               703 fs/gfs2/sys.c  	add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name);
sdp               704 fs/gfs2/sys.c  	add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name);
sdp               705 fs/gfs2/sys.c  	if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags))
sdp               706 fs/gfs2/sys.c  		add_uevent_var(env, "JOURNALID=%d", sdp->sd_lockstruct.ls_jid);
sdp                13 fs/gfs2/sys.h  int gfs2_sys_fs_add(struct gfs2_sbd *sdp);
sdp                14 fs/gfs2/sys.h  void gfs2_sys_fs_del(struct gfs2_sbd *sdp);
sdp                19 fs/gfs2/sys.h  int gfs2_recover_set(struct gfs2_sbd *sdp, unsigned jid);
sdp               356 fs/gfs2/trace_gfs2.h 	TP_PROTO(const struct gfs2_sbd *sdp, int start, u32 flags),
sdp               358 fs/gfs2/trace_gfs2.h 	TP_ARGS(sdp, start, flags),
sdp               368 fs/gfs2/trace_gfs2.h 		__entry->dev            = sdp->sd_vfs->s_dev;
sdp               370 fs/gfs2/trace_gfs2.h 		__entry->log_seq	= sdp->sd_log_sequence;
sdp               384 fs/gfs2/trace_gfs2.h 	TP_PROTO(const struct gfs2_sbd *sdp, int blocks),
sdp               386 fs/gfs2/trace_gfs2.h 	TP_ARGS(sdp, blocks),
sdp               394 fs/gfs2/trace_gfs2.h 		__entry->dev		= sdp->sd_vfs->s_dev;
sdp               405 fs/gfs2/trace_gfs2.h 	TP_PROTO(const struct gfs2_sbd *sdp, const struct writeback_control *wbc, int start),
sdp               407 fs/gfs2/trace_gfs2.h 	TP_ARGS(sdp, wbc, start),
sdp               417 fs/gfs2/trace_gfs2.h 		__entry->dev		= sdp->sd_vfs->s_dev;
sdp                28 fs/gfs2/trans.c int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
sdp                37 fs/gfs2/trans.c 	if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
sdp                52 fs/gfs2/trans.c 		tr->tr_reserved += gfs2_struct2blk(sdp, revokes,
sdp                57 fs/gfs2/trans.c 	sb_start_intwrite(sdp->sd_vfs);
sdp                59 fs/gfs2/trans.c 	error = gfs2_log_reserve(sdp, tr->tr_reserved);
sdp                68 fs/gfs2/trans.c 	sb_end_intwrite(sdp->sd_vfs);
sdp                74 fs/gfs2/trans.c static void gfs2_print_trans(struct gfs2_sbd *sdp, const struct gfs2_trans *tr)
sdp                76 fs/gfs2/trans.c 	fs_warn(sdp, "Transaction created at: %pSR\n", (void *)tr->tr_ip);
sdp                77 fs/gfs2/trans.c 	fs_warn(sdp, "blocks=%u revokes=%u reserved=%u touched=%u\n",
sdp                80 fs/gfs2/trans.c 	fs_warn(sdp, "Buf %u/%u Databuf %u/%u Revoke %u\n",
sdp                86 fs/gfs2/trans.c void gfs2_trans_end(struct gfs2_sbd *sdp)
sdp                95 fs/gfs2/trans.c 		gfs2_log_release(sdp, tr->tr_reserved);
sdp                98 fs/gfs2/trans.c 			sb_end_intwrite(sdp->sd_vfs);
sdp               107 fs/gfs2/trans.c 	if (gfs2_assert_withdraw(sdp, (nbuf <= tr->tr_blocks) &&
sdp               109 fs/gfs2/trans.c 		gfs2_print_trans(sdp, tr);
sdp               111 fs/gfs2/trans.c 	gfs2_log_commit(sdp, tr);
sdp               114 fs/gfs2/trans.c 	up_read(&sdp->sd_log_flush_lock);
sdp               116 fs/gfs2/trans.c 	if (sdp->sd_vfs->s_flags & SB_SYNCHRONOUS)
sdp               117 fs/gfs2/trans.c 		gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
sdp               120 fs/gfs2/trans.c 		sb_end_intwrite(sdp->sd_vfs);
sdp               153 fs/gfs2/trans.c 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
sdp               161 fs/gfs2/trans.c 	gfs2_log_lock(sdp);
sdp               164 fs/gfs2/trans.c 		gfs2_log_unlock(sdp);
sdp               171 fs/gfs2/trans.c 		gfs2_log_lock(sdp);
sdp               173 fs/gfs2/trans.c 	gfs2_assert(sdp, bd->bd_gl == gl);
sdp               178 fs/gfs2/trans.c 		gfs2_pin(sdp, bd->bd_bh);
sdp               182 fs/gfs2/trans.c 	gfs2_log_unlock(sdp);
sdp               190 fs/gfs2/trans.c 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
sdp               194 fs/gfs2/trans.c 	enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
sdp               201 fs/gfs2/trans.c 	gfs2_log_lock(sdp);
sdp               204 fs/gfs2/trans.c 		gfs2_log_unlock(sdp);
sdp               213 fs/gfs2/trans.c 		gfs2_log_lock(sdp);
sdp               215 fs/gfs2/trans.c 	gfs2_assert(sdp, bd->bd_gl == gl);
sdp               223 fs/gfs2/trans.c 		fs_err(sdp, "Attempting to add uninitialised block to "
sdp               229 fs/gfs2/trans.c 		fs_info(sdp, "GFS2:adding buf while frozen\n");
sdp               230 fs/gfs2/trans.c 		gfs2_assert_withdraw(sdp, 0);
sdp               232 fs/gfs2/trans.c 	gfs2_pin(sdp, bd->bd_bh);
sdp               234 fs/gfs2/trans.c 	mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
sdp               238 fs/gfs2/trans.c 	gfs2_log_unlock(sdp);
sdp               243 fs/gfs2/trans.c void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
sdp               248 fs/gfs2/trans.c 	gfs2_add_revoke(sdp, bd);
sdp               253 fs/gfs2/trans.c void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len)
sdp               259 fs/gfs2/trans.c 	gfs2_log_lock(sdp);
sdp               260 fs/gfs2/trans.c 	list_for_each_entry_safe(bd, tmp, &sdp->sd_log_revokes, bd_list) {
sdp               263 fs/gfs2/trans.c 			gfs2_assert_withdraw(sdp, sdp->sd_log_num_revoke);
sdp               264 fs/gfs2/trans.c 			sdp->sd_log_num_revoke--;
sdp               273 fs/gfs2/trans.c 	gfs2_log_unlock(sdp);
sdp                37 fs/gfs2/trans.h extern int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
sdp                40 fs/gfs2/trans.h extern void gfs2_trans_end(struct gfs2_sbd *sdp);
sdp                43 fs/gfs2/trans.h extern void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
sdp                44 fs/gfs2/trans.h extern void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len);
sdp                31 fs/gfs2/util.c void gfs2_assert_i(struct gfs2_sbd *sdp)
sdp                33 fs/gfs2/util.c 	fs_emerg(sdp, "fatal assertion failed\n");
sdp                36 fs/gfs2/util.c int gfs2_lm_withdraw(struct gfs2_sbd *sdp, const char *fmt, ...)
sdp                38 fs/gfs2/util.c 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
sdp                43 fs/gfs2/util.c 	if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW &&
sdp                44 fs/gfs2/util.c 	    test_and_set_bit(SDF_WITHDRAWN, &sdp->sd_flags))
sdp                53 fs/gfs2/util.c 		fs_err(sdp, "%pV", &vaf);
sdp                58 fs/gfs2/util.c 	if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW) {
sdp                59 fs/gfs2/util.c 		fs_err(sdp, "about to withdraw this file system\n");
sdp                60 fs/gfs2/util.c 		BUG_ON(sdp->sd_args.ar_debug);
sdp                62 fs/gfs2/util.c 		kobject_uevent(&sdp->sd_kobj, KOBJ_OFFLINE);
sdp                64 fs/gfs2/util.c 		if (!strcmp(sdp->sd_lockstruct.ls_ops->lm_proto_name, "lock_dlm"))
sdp                65 fs/gfs2/util.c 			wait_for_completion(&sdp->sd_wdack);
sdp                68 fs/gfs2/util.c 			fs_err(sdp, "telling LM to unmount\n");
sdp                69 fs/gfs2/util.c 			lm->lm_unmount(sdp);
sdp                71 fs/gfs2/util.c 		set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
sdp                72 fs/gfs2/util.c 		fs_err(sdp, "withdrawn\n");
sdp                76 fs/gfs2/util.c 	if (sdp->sd_args.ar_errors == GFS2_ERRORS_PANIC)
sdp                77 fs/gfs2/util.c 		panic("GFS2: fsid=%s: panic requested\n", sdp->sd_fsname);
sdp                88 fs/gfs2/util.c int gfs2_assert_withdraw_i(struct gfs2_sbd *sdp, char *assertion,
sdp                92 fs/gfs2/util.c 	me = gfs2_lm_withdraw(sdp,
sdp               106 fs/gfs2/util.c int gfs2_assert_warn_i(struct gfs2_sbd *sdp, char *assertion,
sdp               110 fs/gfs2/util.c 			sdp->sd_last_warning +
sdp               111 fs/gfs2/util.c 			gfs2_tune_get(sdp, gt_complain_secs) * HZ))
sdp               114 fs/gfs2/util.c 	if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW)
sdp               115 fs/gfs2/util.c 		fs_warn(sdp, "warning: assertion \"%s\" failed at function = %s, file = %s, line = %u\n",
sdp               118 fs/gfs2/util.c 	if (sdp->sd_args.ar_debug)
sdp               123 fs/gfs2/util.c 	if (sdp->sd_args.ar_errors == GFS2_ERRORS_PANIC)
sdp               126 fs/gfs2/util.c 		      sdp->sd_fsname, assertion,
sdp               127 fs/gfs2/util.c 		      sdp->sd_fsname, function, file, line);
sdp               129 fs/gfs2/util.c 	sdp->sd_last_warning = jiffies;
sdp               140 fs/gfs2/util.c int gfs2_consist_i(struct gfs2_sbd *sdp, int cluster_wide, const char *function,
sdp               144 fs/gfs2/util.c 	rv = gfs2_lm_withdraw(sdp,
sdp               159 fs/gfs2/util.c 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp               161 fs/gfs2/util.c 	rv = gfs2_lm_withdraw(sdp,
sdp               180 fs/gfs2/util.c 	struct gfs2_sbd *sdp = rgd->rd_sbd;
sdp               181 fs/gfs2/util.c 	char fs_id_buf[sizeof(sdp->sd_fsname) + 7];
sdp               184 fs/gfs2/util.c 	sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname);
sdp               186 fs/gfs2/util.c 	rv = gfs2_lm_withdraw(sdp,
sdp               201 fs/gfs2/util.c int gfs2_meta_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
sdp               206 fs/gfs2/util.c 	me = gfs2_lm_withdraw(sdp,
sdp               221 fs/gfs2/util.c int gfs2_metatype_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
sdp               226 fs/gfs2/util.c 	me = gfs2_lm_withdraw(sdp,
sdp               241 fs/gfs2/util.c int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function, char *file,
sdp               245 fs/gfs2/util.c 	rv = gfs2_lm_withdraw(sdp,
sdp               257 fs/gfs2/util.c void gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
sdp               261 fs/gfs2/util.c 	if (!test_bit(SDF_WITHDRAWN, &sdp->sd_flags))
sdp               262 fs/gfs2/util.c 		fs_err(sdp,
sdp               269 fs/gfs2/util.c 		gfs2_lm_withdraw(sdp, NULL);
sdp                28 fs/gfs2/util.h void gfs2_assert_i(struct gfs2_sbd *sdp);
sdp                30 fs/gfs2/util.h #define gfs2_assert(sdp, assertion) \
sdp                33 fs/gfs2/util.h 		gfs2_assert_i(sdp); \
sdp                39 fs/gfs2/util.h int gfs2_assert_withdraw_i(struct gfs2_sbd *sdp, char *assertion,
sdp                42 fs/gfs2/util.h #define gfs2_assert_withdraw(sdp, assertion) \
sdp                43 fs/gfs2/util.h ((likely(assertion)) ? 0 : gfs2_assert_withdraw_i((sdp), #assertion, \
sdp                47 fs/gfs2/util.h int gfs2_assert_warn_i(struct gfs2_sbd *sdp, char *assertion,
sdp                50 fs/gfs2/util.h #define gfs2_assert_warn(sdp, assertion) \
sdp                51 fs/gfs2/util.h ((likely(assertion)) ? 0 : gfs2_assert_warn_i((sdp), #assertion, \
sdp                55 fs/gfs2/util.h int gfs2_consist_i(struct gfs2_sbd *sdp, int cluster_wide,
sdp                58 fs/gfs2/util.h #define gfs2_consist(sdp) \
sdp                59 fs/gfs2/util.h gfs2_consist_i((sdp), 0, __func__, __FILE__, __LINE__)
sdp                76 fs/gfs2/util.h int gfs2_meta_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
sdp                80 fs/gfs2/util.h static inline int gfs2_meta_check(struct gfs2_sbd *sdp,
sdp                86 fs/gfs2/util.h 		fs_err(sdp, "Magic number missing at %llu\n",
sdp                93 fs/gfs2/util.h int gfs2_metatype_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
sdp                98 fs/gfs2/util.h static inline int gfs2_metatype_check_i(struct gfs2_sbd *sdp,
sdp               108 fs/gfs2/util.h 		return gfs2_meta_check_ii(sdp, bh, "magic number", function,
sdp               111 fs/gfs2/util.h 		return gfs2_metatype_check_ii(sdp, bh, type, t, function,
sdp               116 fs/gfs2/util.h #define gfs2_metatype_check(sdp, bh, type) \
sdp               117 fs/gfs2/util.h gfs2_metatype_check_i((sdp), (bh), (type), __func__, __FILE__, __LINE__)
sdp               129 fs/gfs2/util.h int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function,
sdp               132 fs/gfs2/util.h #define gfs2_io_error(sdp) \
sdp               133 fs/gfs2/util.h gfs2_io_error_i((sdp), __func__, __FILE__, __LINE__);
sdp               136 fs/gfs2/util.h void gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
sdp               140 fs/gfs2/util.h #define gfs2_io_error_bh_wd(sdp, bh) \
sdp               141 fs/gfs2/util.h gfs2_io_error_bh_i((sdp), (bh), __func__, __FILE__, __LINE__, true);
sdp               143 fs/gfs2/util.h #define gfs2_io_error_bh(sdp, bh) \
sdp               144 fs/gfs2/util.h gfs2_io_error_bh_i((sdp), (bh), __func__, __FILE__, __LINE__, false);
sdp               167 fs/gfs2/util.h #define gfs2_tune_get(sdp, field) \
sdp               168 fs/gfs2/util.h gfs2_tune_get_i(&(sdp)->sd_tune, &(sdp)->sd_tune.field)
sdp               171 fs/gfs2/util.h int gfs2_lm_withdraw(struct gfs2_sbd *sdp, const char *fmt, ...);
sdp                39 fs/gfs2/xattr.c static int ea_calc_size(struct gfs2_sbd *sdp, unsigned int nsize, size_t dsize,
sdp                42 fs/gfs2/xattr.c 	unsigned int jbsize = sdp->sd_jbsize;
sdp                57 fs/gfs2/xattr.c static int ea_check_size(struct gfs2_sbd *sdp, unsigned int nsize, size_t dsize)
sdp                64 fs/gfs2/xattr.c 	ea_calc_size(sdp, nsize, dsize, &size);
sdp                67 fs/gfs2/xattr.c 	if (size > sdp->sd_jbsize)
sdp               228 fs/gfs2/xattr.c 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp               239 fs/gfs2/xattr.c 	error = gfs2_rindex_update(sdp);
sdp               256 fs/gfs2/xattr.c 	rgd = gfs2_blk2rgrpd(sdp, bn, 1);
sdp               266 fs/gfs2/xattr.c 	error = gfs2_trans_begin(sdp, rgd->rd_length + RES_DINODE +
sdp               310 fs/gfs2/xattr.c 	gfs2_trans_end(sdp);
sdp               442 fs/gfs2/xattr.c 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp               445 fs/gfs2/xattr.c 	unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
sdp               468 fs/gfs2/xattr.c 		error = gfs2_meta_wait(sdp, bh[x]);
sdp               474 fs/gfs2/xattr.c 		if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
sdp               482 fs/gfs2/xattr.c 		cp_size = (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize;
sdp               486 fs/gfs2/xattr.c 			dout += sdp->sd_jbsize;
sdp               492 fs/gfs2/xattr.c 			din += sdp->sd_jbsize;
sdp               495 fs/gfs2/xattr.c 		amount -= sdp->sd_jbsize;
sdp               622 fs/gfs2/xattr.c 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp               631 fs/gfs2/xattr.c 	gfs2_trans_remove_revoke(sdp, block, 1);
sdp               638 fs/gfs2/xattr.c 	ea->ea_rec_len = cpu_to_be32(sdp->sd_jbsize);
sdp               663 fs/gfs2/xattr.c 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp               673 fs/gfs2/xattr.c 	if (GFS2_EAREQ_SIZE_STUFFED(er) <= sdp->sd_jbsize) {
sdp               683 fs/gfs2/xattr.c 		ea->ea_num_ptrs = DIV_ROUND_UP(er->er_data_len, sdp->sd_jbsize);
sdp               693 fs/gfs2/xattr.c 			gfs2_trans_remove_revoke(sdp, block, 1);
sdp               700 fs/gfs2/xattr.c 			copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize :
sdp               703 fs/gfs2/xattr.c 			if (copy < sdp->sd_jbsize)
sdp               705 fs/gfs2/xattr.c 				       sdp->sd_jbsize - copy);
sdp               714 fs/gfs2/xattr.c 		gfs2_assert_withdraw(sdp, !data_len);
sdp               959 fs/gfs2/xattr.c 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp               973 fs/gfs2/xattr.c 		if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
sdp               979 fs/gfs2/xattr.c 		end = eablk + sdp->sd_inptrs;
sdp               997 fs/gfs2/xattr.c 		gfs2_trans_remove_revoke(sdp, blk, 1);
sdp              1161 fs/gfs2/xattr.c 	struct gfs2_sbd *sdp = GFS2_SB(inode);
sdp              1178 fs/gfs2/xattr.c 	if (ea_check_size(sdp, namel, size))
sdp              1248 fs/gfs2/xattr.c 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp              1260 fs/gfs2/xattr.c 	error = gfs2_rindex_update(sdp);
sdp              1270 fs/gfs2/xattr.c 	if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
sdp              1276 fs/gfs2/xattr.c 	end = eablk + sdp->sd_inptrs;
sdp              1311 fs/gfs2/xattr.c 	error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE + RES_INDIRECT +
sdp              1336 fs/gfs2/xattr.c 			rgd = gfs2_blk2rgrpd(sdp, bstart, true);
sdp              1355 fs/gfs2/xattr.c 	gfs2_trans_end(sdp);
sdp              1368 fs/gfs2/xattr.c 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
sdp              1374 fs/gfs2/xattr.c 	error = gfs2_rindex_update(sdp);
sdp              1378 fs/gfs2/xattr.c 	rgd = gfs2_blk2rgrpd(sdp, ip->i_eattr, 1);
sdp              1388 fs/gfs2/xattr.c 	error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_DINODE + RES_STATFS +
sdp              1405 fs/gfs2/xattr.c 	gfs2_trans_end(sdp);
sdp              1743 fs/reiserfs/reiserfs.h #define sd_v1_mode(sdp)         (le16_to_cpu((sdp)->sd_mode))
sdp              1744 fs/reiserfs/reiserfs.h #define set_sd_v1_mode(sdp,v)   ((sdp)->sd_mode = cpu_to_le16(v))
sdp              1745 fs/reiserfs/reiserfs.h #define sd_v1_nlink(sdp)        (le16_to_cpu((sdp)->sd_nlink))
sdp              1746 fs/reiserfs/reiserfs.h #define set_sd_v1_nlink(sdp,v)  ((sdp)->sd_nlink = cpu_to_le16(v))
sdp              1747 fs/reiserfs/reiserfs.h #define sd_v1_uid(sdp)          (le16_to_cpu((sdp)->sd_uid))
sdp              1748 fs/reiserfs/reiserfs.h #define set_sd_v1_uid(sdp,v)    ((sdp)->sd_uid = cpu_to_le16(v))
sdp              1749 fs/reiserfs/reiserfs.h #define sd_v1_gid(sdp)          (le16_to_cpu((sdp)->sd_gid))
sdp              1750 fs/reiserfs/reiserfs.h #define set_sd_v1_gid(sdp,v)    ((sdp)->sd_gid = cpu_to_le16(v))
sdp              1751 fs/reiserfs/reiserfs.h #define sd_v1_size(sdp)         (le32_to_cpu((sdp)->sd_size))
sdp              1752 fs/reiserfs/reiserfs.h #define set_sd_v1_size(sdp,v)   ((sdp)->sd_size = cpu_to_le32(v))
sdp              1753 fs/reiserfs/reiserfs.h #define sd_v1_atime(sdp)        (le32_to_cpu((sdp)->sd_atime))
sdp              1754 fs/reiserfs/reiserfs.h #define set_sd_v1_atime(sdp,v)  ((sdp)->sd_atime = cpu_to_le32(v))
sdp              1755 fs/reiserfs/reiserfs.h #define sd_v1_mtime(sdp)        (le32_to_cpu((sdp)->sd_mtime))
sdp              1756 fs/reiserfs/reiserfs.h #define set_sd_v1_mtime(sdp,v)  ((sdp)->sd_mtime = cpu_to_le32(v))
sdp              1757 fs/reiserfs/reiserfs.h #define sd_v1_ctime(sdp)        (le32_to_cpu((sdp)->sd_ctime))
sdp              1758 fs/reiserfs/reiserfs.h #define set_sd_v1_ctime(sdp,v)  ((sdp)->sd_ctime = cpu_to_le32(v))
sdp              1759 fs/reiserfs/reiserfs.h #define sd_v1_rdev(sdp)         (le32_to_cpu((sdp)->u.sd_rdev))
sdp              1760 fs/reiserfs/reiserfs.h #define set_sd_v1_rdev(sdp,v)   ((sdp)->u.sd_rdev = cpu_to_le32(v))
sdp              1761 fs/reiserfs/reiserfs.h #define sd_v1_blocks(sdp)       (le32_to_cpu((sdp)->u.sd_blocks))
sdp              1762 fs/reiserfs/reiserfs.h #define set_sd_v1_blocks(sdp,v) ((sdp)->u.sd_blocks = cpu_to_le32(v))
sdp              1763 fs/reiserfs/reiserfs.h #define sd_v1_first_direct_byte(sdp) \
sdp              1764 fs/reiserfs/reiserfs.h                                 (le32_to_cpu((sdp)->sd_first_direct_byte))
sdp              1765 fs/reiserfs/reiserfs.h #define set_sd_v1_first_direct_byte(sdp,v) \
sdp              1766 fs/reiserfs/reiserfs.h                                 ((sdp)->sd_first_direct_byte = cpu_to_le32(v))
sdp              1823 fs/reiserfs/reiserfs.h #define sd_v2_mode(sdp)         (le16_to_cpu((sdp)->sd_mode))
sdp              1824 fs/reiserfs/reiserfs.h #define set_sd_v2_mode(sdp,v)   ((sdp)->sd_mode = cpu_to_le16(v))
sdp              1827 fs/reiserfs/reiserfs.h #define sd_v2_nlink(sdp)        (le32_to_cpu((sdp)->sd_nlink))
sdp              1828 fs/reiserfs/reiserfs.h #define set_sd_v2_nlink(sdp,v)  ((sdp)->sd_nlink = cpu_to_le32(v))
sdp              1829 fs/reiserfs/reiserfs.h #define sd_v2_size(sdp)         (le64_to_cpu((sdp)->sd_size))
sdp              1830 fs/reiserfs/reiserfs.h #define set_sd_v2_size(sdp,v)   ((sdp)->sd_size = cpu_to_le64(v))
sdp              1831 fs/reiserfs/reiserfs.h #define sd_v2_uid(sdp)          (le32_to_cpu((sdp)->sd_uid))
sdp              1832 fs/reiserfs/reiserfs.h #define set_sd_v2_uid(sdp,v)    ((sdp)->sd_uid = cpu_to_le32(v))
sdp              1833 fs/reiserfs/reiserfs.h #define sd_v2_gid(sdp)          (le32_to_cpu((sdp)->sd_gid))
sdp              1834 fs/reiserfs/reiserfs.h #define set_sd_v2_gid(sdp,v)    ((sdp)->sd_gid = cpu_to_le32(v))
sdp              1835 fs/reiserfs/reiserfs.h #define sd_v2_atime(sdp)        (le32_to_cpu((sdp)->sd_atime))
sdp              1836 fs/reiserfs/reiserfs.h #define set_sd_v2_atime(sdp,v)  ((sdp)->sd_atime = cpu_to_le32(v))
sdp              1837 fs/reiserfs/reiserfs.h #define sd_v2_mtime(sdp)        (le32_to_cpu((sdp)->sd_mtime))
sdp              1838 fs/reiserfs/reiserfs.h #define set_sd_v2_mtime(sdp,v)  ((sdp)->sd_mtime = cpu_to_le32(v))
sdp              1839 fs/reiserfs/reiserfs.h #define sd_v2_ctime(sdp)        (le32_to_cpu((sdp)->sd_ctime))
sdp              1840 fs/reiserfs/reiserfs.h #define set_sd_v2_ctime(sdp,v)  ((sdp)->sd_ctime = cpu_to_le32(v))
sdp              1841 fs/reiserfs/reiserfs.h #define sd_v2_blocks(sdp)       (le32_to_cpu((sdp)->sd_blocks))
sdp              1842 fs/reiserfs/reiserfs.h #define set_sd_v2_blocks(sdp,v) ((sdp)->sd_blocks = cpu_to_le32(v))
sdp              1843 fs/reiserfs/reiserfs.h #define sd_v2_rdev(sdp)         (le32_to_cpu((sdp)->u.sd_rdev))
sdp              1844 fs/reiserfs/reiserfs.h #define set_sd_v2_rdev(sdp,v)   ((sdp)->u.sd_rdev = cpu_to_le32(v))
sdp              1845 fs/reiserfs/reiserfs.h #define sd_v2_generation(sdp)   (le32_to_cpu((sdp)->u.sd_generation))
sdp              1846 fs/reiserfs/reiserfs.h #define set_sd_v2_generation(sdp,v) ((sdp)->u.sd_generation = cpu_to_le32(v))
sdp              1847 fs/reiserfs/reiserfs.h #define sd_v2_attrs(sdp)         (le16_to_cpu((sdp)->sd_attrs))
sdp              1848 fs/reiserfs/reiserfs.h #define set_sd_v2_attrs(sdp,v)   ((sdp)->sd_attrs = cpu_to_le16(v))
sdp                89 kernel/rcu/srcutree.c 	struct srcu_data *sdp;
sdp               129 kernel/rcu/srcutree.c 	WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
sdp               130 kernel/rcu/srcutree.c 		     ARRAY_SIZE(sdp->srcu_unlock_count));
sdp               134 kernel/rcu/srcutree.c 		sdp = per_cpu_ptr(ssp->sda, cpu);
sdp               135 kernel/rcu/srcutree.c 		spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
sdp               136 kernel/rcu/srcutree.c 		rcu_segcblist_init(&sdp->srcu_cblist);
sdp               137 kernel/rcu/srcutree.c 		sdp->srcu_cblist_invoking = false;
sdp               138 kernel/rcu/srcutree.c 		sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
sdp               139 kernel/rcu/srcutree.c 		sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
sdp               140 kernel/rcu/srcutree.c 		sdp->mynode = &snp_first[cpu / levelspread[level]];
sdp               141 kernel/rcu/srcutree.c 		for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
sdp               146 kernel/rcu/srcutree.c 		sdp->cpu = cpu;
sdp               147 kernel/rcu/srcutree.c 		INIT_WORK(&sdp->work, srcu_invoke_callbacks);
sdp               148 kernel/rcu/srcutree.c 		timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
sdp               149 kernel/rcu/srcutree.c 		sdp->ssp = ssp;
sdp               150 kernel/rcu/srcutree.c 		sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
sdp               155 kernel/rcu/srcutree.c 		for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) {
sdp               156 kernel/rcu/srcutree.c 			sdp->srcu_lock_count[i] = 0;
sdp               157 kernel/rcu/srcutree.c 			sdp->srcu_unlock_count[i] = 0;
sdp               380 kernel/rcu/srcutree.c 		struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
sdp               382 kernel/rcu/srcutree.c 		del_timer_sync(&sdp->delay_work);
sdp               383 kernel/rcu/srcutree.c 		flush_work(&sdp->work);
sdp               384 kernel/rcu/srcutree.c 		if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist)))
sdp               440 kernel/rcu/srcutree.c 	struct srcu_data *sdp = this_cpu_ptr(ssp->sda);
sdp               445 kernel/rcu/srcutree.c 	spin_lock_rcu_node(sdp);  /* Interrupts already disabled. */
sdp               446 kernel/rcu/srcutree.c 	rcu_segcblist_advance(&sdp->srcu_cblist,
sdp               448 kernel/rcu/srcutree.c 	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
sdp               450 kernel/rcu/srcutree.c 	spin_unlock_rcu_node(sdp);  /* Interrupts remain disabled. */
sdp               460 kernel/rcu/srcutree.c 	struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work);
sdp               462 kernel/rcu/srcutree.c 	queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
sdp               465 kernel/rcu/srcutree.c static void srcu_queue_delayed_work_on(struct srcu_data *sdp,
sdp               469 kernel/rcu/srcutree.c 		queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
sdp               473 kernel/rcu/srcutree.c 	timer_reduce(&sdp->delay_work, jiffies + delay);
sdp               480 kernel/rcu/srcutree.c static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
sdp               482 kernel/rcu/srcutree.c 	srcu_queue_delayed_work_on(sdp, delay);
sdp               522 kernel/rcu/srcutree.c 	struct srcu_data *sdp;
sdp               563 kernel/rcu/srcutree.c 				sdp = per_cpu_ptr(ssp->sda, cpu);
sdp               564 kernel/rcu/srcutree.c 				spin_lock_irqsave_rcu_node(sdp, flags);
sdp               566 kernel/rcu/srcutree.c 						 sdp->srcu_gp_seq_needed + 100))
sdp               567 kernel/rcu/srcutree.c 					sdp->srcu_gp_seq_needed = gpseq;
sdp               569 kernel/rcu/srcutree.c 						 sdp->srcu_gp_seq_needed_exp + 100))
sdp               570 kernel/rcu/srcutree.c 					sdp->srcu_gp_seq_needed_exp = gpseq;
sdp               571 kernel/rcu/srcutree.c 				spin_unlock_irqrestore_rcu_node(sdp, flags);
sdp               631 kernel/rcu/srcutree.c static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
sdp               635 kernel/rcu/srcutree.c 	int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
sdp               636 kernel/rcu/srcutree.c 	struct srcu_node *snp = sdp->mynode;
sdp               641 kernel/rcu/srcutree.c 		if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != sdp->mynode)
sdp               646 kernel/rcu/srcutree.c 			if (snp == sdp->mynode && snp_seq == s)
sdp               647 kernel/rcu/srcutree.c 				snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
sdp               649 kernel/rcu/srcutree.c 			if (snp == sdp->mynode && snp_seq != s) {
sdp               650 kernel/rcu/srcutree.c 				srcu_schedule_cbs_sdp(sdp, do_norm
sdp               660 kernel/rcu/srcutree.c 		if (snp == sdp->mynode)
sdp               661 kernel/rcu/srcutree.c 			snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
sdp               763 kernel/rcu/srcutree.c 	struct srcu_data *sdp;
sdp               769 kernel/rcu/srcutree.c 	sdp = this_cpu_ptr(ssp->sda);
sdp               770 kernel/rcu/srcutree.c 	if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
sdp               843 kernel/rcu/srcutree.c 	struct srcu_data *sdp;
sdp               855 kernel/rcu/srcutree.c 	sdp = this_cpu_ptr(ssp->sda);
sdp               856 kernel/rcu/srcutree.c 	spin_lock_rcu_node(sdp);
sdp               857 kernel/rcu/srcutree.c 	rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
sdp               858 kernel/rcu/srcutree.c 	rcu_segcblist_advance(&sdp->srcu_cblist,
sdp               861 kernel/rcu/srcutree.c 	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
sdp               862 kernel/rcu/srcutree.c 	if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
sdp               863 kernel/rcu/srcutree.c 		sdp->srcu_gp_seq_needed = s;
sdp               866 kernel/rcu/srcutree.c 	if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
sdp               867 kernel/rcu/srcutree.c 		sdp->srcu_gp_seq_needed_exp = s;
sdp               870 kernel/rcu/srcutree.c 	spin_unlock_irqrestore_rcu_node(sdp, flags);
sdp               872 kernel/rcu/srcutree.c 		srcu_funnel_gp_start(ssp, sdp, s, do_norm);
sdp               874 kernel/rcu/srcutree.c 		srcu_funnel_exp_start(ssp, sdp->mynode, s);
sdp              1009 kernel/rcu/srcutree.c 	struct srcu_data *sdp;
sdp              1012 kernel/rcu/srcutree.c 	sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
sdp              1013 kernel/rcu/srcutree.c 	ssp = sdp->ssp;
sdp              1025 kernel/rcu/srcutree.c 	struct srcu_data *sdp;
sdp              1050 kernel/rcu/srcutree.c 		sdp = per_cpu_ptr(ssp->sda, cpu);
sdp              1051 kernel/rcu/srcutree.c 		spin_lock_irq_rcu_node(sdp);
sdp              1053 kernel/rcu/srcutree.c 		sdp->srcu_barrier_head.func = srcu_barrier_cb;
sdp              1054 kernel/rcu/srcutree.c 		debug_rcu_head_queue(&sdp->srcu_barrier_head);
sdp              1055 kernel/rcu/srcutree.c 		if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
sdp              1056 kernel/rcu/srcutree.c 					   &sdp->srcu_barrier_head, 0)) {
sdp              1057 kernel/rcu/srcutree.c 			debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
sdp              1060 kernel/rcu/srcutree.c 		spin_unlock_irq_rcu_node(sdp);
sdp              1162 kernel/rcu/srcutree.c 	struct srcu_data *sdp;
sdp              1165 kernel/rcu/srcutree.c 	sdp = container_of(work, struct srcu_data, work);
sdp              1167 kernel/rcu/srcutree.c 	ssp = sdp->ssp;
sdp              1169 kernel/rcu/srcutree.c 	spin_lock_irq_rcu_node(sdp);
sdp              1170 kernel/rcu/srcutree.c 	rcu_segcblist_advance(&sdp->srcu_cblist,
sdp              1172 kernel/rcu/srcutree.c 	if (sdp->srcu_cblist_invoking ||
sdp              1173 kernel/rcu/srcutree.c 	    !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
sdp              1174 kernel/rcu/srcutree.c 		spin_unlock_irq_rcu_node(sdp);
sdp              1179 kernel/rcu/srcutree.c 	sdp->srcu_cblist_invoking = true;
sdp              1180 kernel/rcu/srcutree.c 	rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
sdp              1181 kernel/rcu/srcutree.c 	spin_unlock_irq_rcu_node(sdp);
sdp              1194 kernel/rcu/srcutree.c 	spin_lock_irq_rcu_node(sdp);
sdp              1195 kernel/rcu/srcutree.c 	rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
sdp              1196 kernel/rcu/srcutree.c 	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
sdp              1198 kernel/rcu/srcutree.c 	sdp->srcu_cblist_invoking = false;
sdp              1199 kernel/rcu/srcutree.c 	more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
sdp              1200 kernel/rcu/srcutree.c 	spin_unlock_irq_rcu_node(sdp);
sdp              1202 kernel/rcu/srcutree.c 		srcu_schedule_cbs_sdp(sdp, 0);
sdp              1266 kernel/rcu/srcutree.c 		struct srcu_data *sdp;
sdp              1268 kernel/rcu/srcutree.c 		sdp = per_cpu_ptr(ssp->sda, cpu);
sdp              1269 kernel/rcu/srcutree.c 		u0 = sdp->srcu_unlock_count[!idx];
sdp              1270 kernel/rcu/srcutree.c 		u1 = sdp->srcu_unlock_count[idx];
sdp              1278 kernel/rcu/srcutree.c 		l0 = sdp->srcu_lock_count[!idx];
sdp              1279 kernel/rcu/srcutree.c 		l1 = sdp->srcu_lock_count[idx];
sdp              1285 kernel/rcu/srcutree.c 			"C."[rcu_segcblist_empty(&sdp->srcu_cblist)]);
sdp               234 net/nfc/llcp.h void nfc_llcp_free_sdp_tlv(struct nfc_llcp_sdp_tlv *sdp);
sdp               174 net/nfc/llcp_commands.c void nfc_llcp_free_sdp_tlv(struct nfc_llcp_sdp_tlv *sdp)
sdp               176 net/nfc/llcp_commands.c 	kfree(sdp->tlv);
sdp               177 net/nfc/llcp_commands.c 	kfree(sdp);
sdp               182 net/nfc/llcp_commands.c 	struct nfc_llcp_sdp_tlv *sdp;
sdp               185 net/nfc/llcp_commands.c 	hlist_for_each_entry_safe(sdp, n, head, node) {
sdp               186 net/nfc/llcp_commands.c 		hlist_del(&sdp->node);
sdp               188 net/nfc/llcp_commands.c 		nfc_llcp_free_sdp_tlv(sdp);
sdp               555 net/nfc/llcp_commands.c 	struct nfc_llcp_sdp_tlv *sdp;
sdp               563 net/nfc/llcp_commands.c 	hlist_for_each_entry_safe(sdp, n, tlv_list, node) {
sdp               564 net/nfc/llcp_commands.c 		skb_put_data(skb, sdp->tlv, sdp->tlv_len);
sdp               566 net/nfc/llcp_commands.c 		hlist_del(&sdp->node);
sdp               568 net/nfc/llcp_commands.c 		nfc_llcp_free_sdp_tlv(sdp);
sdp               247 net/nfc/llcp_core.c 	struct nfc_llcp_sdp_tlv *sdp;
sdp               255 net/nfc/llcp_core.c 	hlist_for_each_entry_safe(sdp, n, &local->pending_sdreqs, node) {
sdp               256 net/nfc/llcp_core.c 		if (time_after(sdp->time, time))
sdp               259 net/nfc/llcp_core.c 		sdp->sap = LLCP_SDP_UNBOUND;
sdp               261 net/nfc/llcp_core.c 		hlist_del(&sdp->node);
sdp               263 net/nfc/llcp_core.c 		hlist_add_head(&sdp->node, &nl_sdres_list);
sdp               451 net/nfc/llcp_core.c 	unsigned long *sdp;
sdp               455 net/nfc/llcp_core.c 		sdp = &local->local_wks;
sdp               460 net/nfc/llcp_core.c 		sdp = &local->local_sdp;
sdp               472 net/nfc/llcp_core.c 			clear_bit(local_ssap, sdp);
sdp               487 net/nfc/llcp_core.c 		sdp = &local->local_sap;
sdp               494 net/nfc/llcp_core.c 	clear_bit(local_ssap, sdp);
sdp              1236 net/nfc/llcp_core.c 	struct nfc_llcp_sdp_tlv *sdp;
sdp              1314 net/nfc/llcp_core.c 			sdp = nfc_llcp_build_sdres_tlv(tid, sap);
sdp              1315 net/nfc/llcp_core.c 			if (sdp == NULL)
sdp              1318 net/nfc/llcp_core.c 			sdres_tlvs_len += sdp->tlv_len;
sdp              1319 net/nfc/llcp_core.c 			hlist_add_head(&sdp->node, &llc_sdres_list);
sdp              1327 net/nfc/llcp_core.c 			hlist_for_each_entry(sdp, &local->pending_sdreqs, node) {
sdp              1328 net/nfc/llcp_core.c 				if (sdp->tid != tlv[2])
sdp              1331 net/nfc/llcp_core.c 				sdp->sap = tlv[3];
sdp              1334 net/nfc/llcp_core.c 					 sdp->uri, sdp->sap);
sdp              1336 net/nfc/llcp_core.c 				hlist_del(&sdp->node);
sdp              1338 net/nfc/llcp_core.c 				hlist_add_head(&sdp->node, &nl_sdres_list);
sdp                57 net/nfc/nfc.h  void nfc_llcp_free_sdp_tlv(struct nfc_llcp_sdp_tlv *sdp);
sdp               282 sound/sparc/dbri.c 	u32 sdp;		/* SDP command word */
sdp               824 sound/sparc/dbri.c 	int sdp;
sdp               834 sound/sparc/dbri.c 	sdp = dbri->pipes[pipe].sdp;
sdp               835 sound/sparc/dbri.c 	if (sdp == 0) {
sdp               842 sound/sparc/dbri.c 	*(cmd++) = DBRI_CMD(D_SDP, 0, sdp | D_SDP_C | D_SDP_P);
sdp               862 sound/sparc/dbri.c static void setup_pipe(struct snd_dbri *dbri, int pipe, int sdp)
sdp               870 sound/sparc/dbri.c 	if ((sdp & 0xf800) != sdp) {
sdp               879 sound/sparc/dbri.c 	if (D_SDP_MODE(sdp) == D_SDP_FIXED && !(sdp & D_SDP_TO_SER))
sdp               880 sound/sparc/dbri.c 		sdp |= D_SDP_CHANGE;
sdp               882 sound/sparc/dbri.c 	sdp |= D_PIPE(pipe);
sdp               883 sound/sparc/dbri.c 	dbri->pipes[pipe].sdp = sdp;
sdp               908 sound/sparc/dbri.c 	if (dbri->pipes[pipe].sdp == 0
sdp               909 sound/sparc/dbri.c 			|| dbri->pipes[prevpipe].sdp == 0
sdp               910 sound/sparc/dbri.c 			|| dbri->pipes[nextpipe].sdp == 0) {
sdp               922 sound/sparc/dbri.c 	if (dbri->pipes[pipe].sdp & D_SDP_TO_SER) {
sdp              1012 sound/sparc/dbri.c 	if (D_SDP_MODE(dbri->pipes[pipe].sdp) == 0) {
sdp              1018 sound/sparc/dbri.c 	if (D_SDP_MODE(dbri->pipes[pipe].sdp) != D_SDP_FIXED) {
sdp              1023 sound/sparc/dbri.c 	if (!(dbri->pipes[pipe].sdp & D_SDP_TO_SER)) {
sdp              1031 sound/sparc/dbri.c 	if (dbri->pipes[pipe].sdp & D_SDP_MSB)
sdp              1055 sound/sparc/dbri.c 	if (D_SDP_MODE(dbri->pipes[pipe].sdp) != D_SDP_FIXED) {
sdp              1061 sound/sparc/dbri.c 	if (dbri->pipes[pipe].sdp & D_SDP_TO_SER) {
sdp              1101 sound/sparc/dbri.c 	if (dbri->pipes[info->pipe].sdp == 0) {
sdp              1111 sound/sparc/dbri.c 		if (!(dbri->pipes[info->pipe].sdp & D_SDP_TO_SER)) {
sdp              1117 sound/sparc/dbri.c 		if (dbri->pipes[info->pipe].sdp & D_SDP_TO_SER) {
sdp              1259 sound/sparc/dbri.c 	dbri->pipes[16].sdp = 1;
sdp              1731 sound/sparc/dbri.c 					    dbri->pipes[info->pipe].sdp
sdp              1753 sound/sparc/dbri.c 					    dbri->pipes[info->pipe].sdp
sdp              1886 sound/sparc/dbri.c 					    dbri->pipes[pipe].sdp
sdp              1895 sound/sparc/dbri.c 		if (dbri->pipes[channel].sdp & D_SDP_MSB)
sdp              2503 sound/sparc/dbri.c 				   (pptr->sdp & D_SDP_TO_SER) ? "output" :
sdp              2505 sound/sparc/dbri.c 				    pptr->sdp, pptr->desc,