im                113 arch/arm/mach-ux500/pm.c 	u32 it, im;
im                118 arch/arm/mach-ux500/pm.c 		im = readl(PRCM_ARMITMSK31TO0 + i * 4);
im                119 arch/arm/mach-ux500/pm.c 		if (it & im)
im               1142 arch/ia64/include/asm/pal.h 				im			: 1,
im                194 arch/ia64/kernel/palinfo.c 		if (halt_info[i].pal_power_mgmt_info_s.im == 1) {
im                637 arch/ia64/kernel/process.c 		if (power_info[i].pal_power_mgmt_info_s.im
im                 80 arch/mips/lantiq/irq.c 	unsigned long im = offset / INT_NUM_IM_OFFSET;
im                 88 arch/mips/lantiq/irq.c 		ltq_icu_w32(vpe, im,
im                 89 arch/mips/lantiq/irq.c 			    ltq_icu_r32(vpe, im, LTQ_ICU_IER) & ~BIT(offset),
im                 98 arch/mips/lantiq/irq.c 	unsigned long im = offset / INT_NUM_IM_OFFSET;
im                106 arch/mips/lantiq/irq.c 		ltq_icu_w32(vpe, im,
im                107 arch/mips/lantiq/irq.c 			    ltq_icu_r32(vpe, im, LTQ_ICU_IER) & ~BIT(offset),
im                109 arch/mips/lantiq/irq.c 		ltq_icu_w32(vpe, im, BIT(offset), LTQ_ICU_ISR);
im                117 arch/mips/lantiq/irq.c 	unsigned long im = offset / INT_NUM_IM_OFFSET;
im                125 arch/mips/lantiq/irq.c 		ltq_icu_w32(vpe, im, BIT(offset), LTQ_ICU_ISR);
im                133 arch/mips/lantiq/irq.c 	unsigned long im = offset / INT_NUM_IM_OFFSET;
im                147 arch/mips/lantiq/irq.c 	ltq_icu_w32(vpe, im, ltq_icu_r32(vpe, im, LTQ_ICU_IER) | BIT(offset),
im                449 arch/powerpc/kvm/book3s_64_mmu_radix.c 	unsigned long im;
im                452 arch/powerpc/kvm/book3s_64_mmu_radix.c 	for (im = 0; im < PTRS_PER_PMD; ++im, ++p) {
im                 23 arch/powerpc/platforms/83xx/mpc837x_rdb.c 	void __iomem *im;
im                 25 arch/powerpc/platforms/83xx/mpc837x_rdb.c 	im = ioremap(get_immrbase(), 0x1000);
im                 26 arch/powerpc/platforms/83xx/mpc837x_rdb.c 	if (!im) {
im                 35 arch/powerpc/platforms/83xx/mpc837x_rdb.c 	clrsetbits_be32(im + MPC83XX_SICRL_OFFS, MPC837X_SICRL_USBB_MASK,
im                 37 arch/powerpc/platforms/83xx/mpc837x_rdb.c 	clrsetbits_be32(im + MPC83XX_SICRH_OFFS, MPC837X_SICRH_SPI_MASK,
im                 39 arch/powerpc/platforms/83xx/mpc837x_rdb.c 	iounmap(im);
im               1433 drivers/auxdisplay/panel.c 	u8 im, om;
im               1437 drivers/auxdisplay/panel.c 	im = 0;
im               1451 drivers/auxdisplay/panel.c 		im |= BIT(in);
im               1473 drivers/auxdisplay/panel.c 		*imask |= im;
im                237 drivers/gpio/gpio-ml-ioh.c 	u32 im;
im                286 drivers/gpio/gpio-ml-ioh.c 	im = ioread32(im_reg) & ~(IOH_IM_MASK << (im_pos * 4));
im                287 drivers/gpio/gpio-ml-ioh.c 	iowrite32(im | (val << (im_pos * 4)), im_reg);
im                223 drivers/gpio/gpio-pch.c 	u32 im, im_pos, val;
im                261 drivers/gpio/gpio-pch.c 	im = ioread32(im_reg) & ~(PCH_IM_MASK << (im_pos * 4));
im                262 drivers/gpio/gpio-pch.c 	iowrite32(im | (val << (im_pos * 4)), im_reg);
im                358 drivers/i2c/busses/i2c-iop3xx.c 	int im = 0;
im                366 drivers/i2c/busses/i2c-iop3xx.c 	for (im = 0; ret == 0 && im != num; im++) {
im                367 drivers/i2c/busses/i2c-iop3xx.c 		ret = iop3xx_i2c_handle_msg(i2c_adap, &msgs[im]);
im                375 drivers/i2c/busses/i2c-iop3xx.c 	return im;
im                285 drivers/i2c/busses/i2c-sprd.c 	int im, ret;
im                291 drivers/i2c/busses/i2c-sprd.c 	for (im = 0; im < num - 1; im++) {
im                292 drivers/i2c/busses/i2c-sprd.c 		ret = sprd_i2c_handle_msg(i2c_adap, &msgs[im], 0);
im                297 drivers/i2c/busses/i2c-sprd.c 	ret = sprd_i2c_handle_msg(i2c_adap, &msgs[im++], 1);
im                303 drivers/i2c/busses/i2c-sprd.c 	return ret < 0 ? ret : im;
im                404 drivers/input/serio/hp_sdc.c 		hp_sdc_status_out8(hp_sdc.im | HP_SDC_CMD_SET_IM);
im                675 drivers/input/serio/hp_sdc.c 	hp_sdc.im &= ~HP_SDC_IM_FH;
im                676 drivers/input/serio/hp_sdc.c         hp_sdc.im &= ~HP_SDC_IM_PT;
im                677 drivers/input/serio/hp_sdc.c 	hp_sdc.im &= ~HP_SDC_IM_TIMERS;
im                698 drivers/input/serio/hp_sdc.c 	hp_sdc.im &= ~(HP_SDC_IM_HIL | HP_SDC_IM_RESET);
im                720 drivers/input/serio/hp_sdc.c 	hp_sdc.im &= ~(HP_SDC_IM_HIL | HP_SDC_IM_RESET);
im                740 drivers/input/serio/hp_sdc.c 	hp_sdc.im |= HP_SDC_IM_TIMERS;
im                741 drivers/input/serio/hp_sdc.c 	hp_sdc.im |= HP_SDC_IM_FH;
im                742 drivers/input/serio/hp_sdc.c 	hp_sdc.im |= HP_SDC_IM_PT;
im                762 drivers/input/serio/hp_sdc.c 		hp_sdc.im |= (HP_SDC_IM_HIL | HP_SDC_IM_RESET);
im                783 drivers/input/serio/hp_sdc.c 		hp_sdc.im |= (HP_SDC_IM_HIL | HP_SDC_IM_RESET);
im                844 drivers/input/serio/hp_sdc.c 	hp_sdc.im		= HP_SDC_IM_MASK;  /* Mask maskable irqs */
im                 45 drivers/irqchip/irq-tb10x.c 	uint32_t im, mod, pol;
im                 47 drivers/irqchip/irq-tb10x.c 	im = data->mask;
im                 51 drivers/irqchip/irq-tb10x.c 	mod = ab_irqctl_readreg(gc, AB_IRQCTL_SRC_MODE) | im;
im                 52 drivers/irqchip/irq-tb10x.c 	pol = ab_irqctl_readreg(gc, AB_IRQCTL_SRC_POLARITY) | im;
im                 56 drivers/irqchip/irq-tb10x.c 		pol ^= im;
im                 59 drivers/irqchip/irq-tb10x.c 		mod ^= im;
im                 64 drivers/irqchip/irq-tb10x.c 		mod ^= im;
im                 65 drivers/irqchip/irq-tb10x.c 		pol ^= im;
im                 81 drivers/irqchip/irq-tb10x.c 	ab_irqctl_writereg(gc, AB_IRQCTL_INT_STATUS, im);
im               1036 drivers/media/dvb-frontends/drx39xyj/drx_driver.h 	s16 im;
im                108 drivers/net/ethernet/stmicro/stmmac/stmmac.h 		u8 im:1;
im                892 drivers/net/wireless/ath/ath9k/ar9003_calib.c 	int i, im, nmeasurement;
im                941 drivers/net/wireless/ath/ath9k/ar9003_calib.c 		for (im = 0; im < nmeasurement; im++) {
im                942 drivers/net/wireless/ath/ath9k/ar9003_calib.c 			magnitude = coeff->mag_coeff[i][im][0];
im                943 drivers/net/wireless/ath/ath9k/ar9003_calib.c 			phase = coeff->phs_coeff[i][im][0];
im                948 drivers/net/wireless/ath/ath9k/ar9003_calib.c 			if ((im % 2) == 0)
im                949 drivers/net/wireless/ath/ath9k/ar9003_calib.c 				REG_RMW_FIELD(ah, tx_corr_coeff[im][i],
im                953 drivers/net/wireless/ath/ath9k/ar9003_calib.c 				REG_RMW_FIELD(ah, tx_corr_coeff[im][i],
im                958 drivers/net/wireless/ath/ath9k/ar9003_calib.c 				caldata->tx_corr_coeff[im][i] =
im               1008 drivers/net/wireless/ath/ath9k/ar9003_calib.c 	int im, ix, iy, temp;
im               1010 drivers/net/wireless/ath/ath9k/ar9003_calib.c 	for (im = 0; im < nmeasurement; im++) {
im               1013 drivers/net/wireless/ath/ath9k/ar9003_calib.c 				if (coeff->mag_coeff[i][im][iy] <
im               1014 drivers/net/wireless/ath/ath9k/ar9003_calib.c 				    coeff->mag_coeff[i][im][ix]) {
im               1015 drivers/net/wireless/ath/ath9k/ar9003_calib.c 					temp = coeff->mag_coeff[i][im][ix];
im               1016 drivers/net/wireless/ath/ath9k/ar9003_calib.c 					coeff->mag_coeff[i][im][ix] =
im               1017 drivers/net/wireless/ath/ath9k/ar9003_calib.c 						coeff->mag_coeff[i][im][iy];
im               1018 drivers/net/wireless/ath/ath9k/ar9003_calib.c 					coeff->mag_coeff[i][im][iy] = temp;
im               1020 drivers/net/wireless/ath/ath9k/ar9003_calib.c 				if (coeff->phs_coeff[i][im][iy] <
im               1021 drivers/net/wireless/ath/ath9k/ar9003_calib.c 				    coeff->phs_coeff[i][im][ix]) {
im               1022 drivers/net/wireless/ath/ath9k/ar9003_calib.c 					temp = coeff->phs_coeff[i][im][ix];
im               1023 drivers/net/wireless/ath/ath9k/ar9003_calib.c 					coeff->phs_coeff[i][im][ix] =
im               1024 drivers/net/wireless/ath/ath9k/ar9003_calib.c 						coeff->phs_coeff[i][im][iy];
im               1025 drivers/net/wireless/ath/ath9k/ar9003_calib.c 					coeff->phs_coeff[i][im][iy] = temp;
im               1029 drivers/net/wireless/ath/ath9k/ar9003_calib.c 		coeff->mag_coeff[i][im][0] = coeff->mag_coeff[i][im][MAXIQCAL / 2];
im               1030 drivers/net/wireless/ath/ath9k/ar9003_calib.c 		coeff->phs_coeff[i][im][0] = coeff->phs_coeff[i][im][MAXIQCAL / 2];
im               1034 drivers/net/wireless/ath/ath9k/ar9003_calib.c 			i, im,
im               1035 drivers/net/wireless/ath/ath9k/ar9003_calib.c 			coeff->mag_coeff[i][im][0],
im               1036 drivers/net/wireless/ath/ath9k/ar9003_calib.c 			coeff->phs_coeff[i][im][0]);
im               1074 drivers/net/wireless/ath/ath9k/ar9003_calib.c 	int i, im, j;
im               1088 drivers/net/wireless/ath/ath9k/ar9003_calib.c 		for (im = 0; im < nmeasurement; im++) {
im               1100 drivers/net/wireless/ath/ath9k/ar9003_calib.c 				u32 idx = 2 * j, offset = 4 * (3 * im + j);
im               1134 drivers/net/wireless/ath/ath9k/ar9003_calib.c 			coeff.phs_coeff[i][im][iqcal_idx] =
im               1136 drivers/net/wireless/ath/ath9k/ar9003_calib.c 			coeff.mag_coeff[i][im][iqcal_idx] =
im               1139 drivers/net/wireless/ath/ath9k/ar9003_calib.c 			if (coeff.mag_coeff[i][im][iqcal_idx] > 63)
im               1140 drivers/net/wireless/ath/ath9k/ar9003_calib.c 				coeff.mag_coeff[i][im][iqcal_idx] -= 128;
im               1141 drivers/net/wireless/ath/ath9k/ar9003_calib.c 			if (coeff.phs_coeff[i][im][iqcal_idx] > 63)
im               1142 drivers/net/wireless/ath/ath9k/ar9003_calib.c 				coeff.phs_coeff[i][im][iqcal_idx] -= 128;
im               1163 drivers/net/wireless/ath/ath9k/ar9003_calib.c 	int i, im;
im               1184 drivers/net/wireless/ath/ath9k/ar9003_calib.c 		for (im = 0; im < caldata->num_measures[i]; im++) {
im               1185 drivers/net/wireless/ath/ath9k/ar9003_calib.c 			if ((im % 2) == 0)
im               1186 drivers/net/wireless/ath/ath9k/ar9003_calib.c 				REG_RMW_FIELD(ah, tx_corr_coeff[im][i],
im               1188 drivers/net/wireless/ath/ath9k/ar9003_calib.c 				     caldata->tx_corr_coeff[im][i]);
im               1190 drivers/net/wireless/ath/ath9k/ar9003_calib.c 				REG_RMW_FIELD(ah, tx_corr_coeff[im][i],
im               1192 drivers/net/wireless/ath/ath9k/ar9003_calib.c 				     caldata->tx_corr_coeff[im][i]);
im                164 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 	s16 im;
im                169 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 	u16 im;
im               3538 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 	cc.im = 0;
im               3543 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 		cc.im = b;
im               3550 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 		cc.im = (u16) dq0;
im               3555 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 		cc.im = (u16) eq;
im               3560 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 		cc.im = (u16) fq;
im               3717 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 	phy_c16 = (s16) phy_c3.im;
im               3721 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 		if (phy_c3.im > 127)
im               3722 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 			phy_c16 = phy_c3.im - 256;
im               3802 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 					phy_c2.im = phy_c1[phy_c6].im;
im               3805 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c 					phy_c19 = phy_c19 + phy_c17 * phy_c2.im;
im                884 drivers/scsi/bfa/bfad.c 	if (bfad->im)
im                885 drivers/scsi/bfa/bfad.c 		flush_workqueue(bfad->im->drv_workq);
im                343 drivers/scsi/bfa/bfad_attr.c 	struct bfad_s         *bfad = itnim->im->bfad;
im                212 drivers/scsi/bfa/bfad_drv.h 	struct bfad_im_s *im;		/* IM specific data */
im                433 drivers/scsi/bfa/bfad_im.c 	(*itnim_drv)->im = bfad->im;
im                456 drivers/scsi/bfa/bfad_im.c 	struct bfad_im_s	*im = itnim_drv->im;
im                480 drivers/scsi/bfa/bfad_im.c 		queue_work(im->drv_workq, &itnim_drv->itnim_work);
im                491 drivers/scsi/bfa/bfad_im.c 	struct bfad_im_s	*im = itnim_drv->im;
im                501 drivers/scsi/bfa/bfad_im.c 		queue_work(im->drv_workq, &itnim_drv->itnim_work);
im                513 drivers/scsi/bfa/bfad_im.c 	struct bfad_im_s	*im = itnim_drv->im;
im                528 drivers/scsi/bfa/bfad_im.c 		queue_work(im->drv_workq, &itnim_drv->itnim_work);
im                647 drivers/scsi/bfa/bfad_im.c 	queue_work(bfad->im->drv_workq,
im                673 drivers/scsi/bfa/bfad_im.c 	struct bfad_im_s *im =
im                676 drivers/scsi/bfa/bfad_im.c 	struct bfad_s *bfad = im->bfad;
im                699 drivers/scsi/bfa/bfad_im.c 	struct bfad_im_s      *im;
im                701 drivers/scsi/bfa/bfad_im.c 	im = kzalloc(sizeof(struct bfad_im_s), GFP_KERNEL);
im                702 drivers/scsi/bfa/bfad_im.c 	if (im == NULL)
im                705 drivers/scsi/bfa/bfad_im.c 	bfad->im = im;
im                706 drivers/scsi/bfa/bfad_im.c 	im->bfad = bfad;
im                709 drivers/scsi/bfa/bfad_im.c 		kfree(im);
im                713 drivers/scsi/bfa/bfad_im.c 	INIT_WORK(&im->aen_im_notify_work, bfad_aen_im_notify_handler);
im                720 drivers/scsi/bfa/bfad_im.c 	if (bfad->im) {
im                721 drivers/scsi/bfa/bfad_im.c 		bfad_destroy_workq(bfad->im);
im                722 drivers/scsi/bfa/bfad_im.c 		kfree(bfad->im);
im                723 drivers/scsi/bfa/bfad_im.c 		bfad->im = NULL;
im                749 drivers/scsi/bfa/bfad_im.c 		flush_workqueue(bfad->im->drv_workq);
im                756 drivers/scsi/bfa/bfad_im.c bfad_destroy_workq(struct bfad_im_s *im)
im                758 drivers/scsi/bfa/bfad_im.c 	if (im && im->drv_workq) {
im                759 drivers/scsi/bfa/bfad_im.c 		flush_workqueue(im->drv_workq);
im                760 drivers/scsi/bfa/bfad_im.c 		destroy_workqueue(im->drv_workq);
im                761 drivers/scsi/bfa/bfad_im.c 		im->drv_workq = NULL;
im                768 drivers/scsi/bfa/bfad_im.c 	struct bfad_im_s      *im = bfad->im;
im                771 drivers/scsi/bfa/bfad_im.c 	snprintf(im->drv_workq_name, KOBJ_NAME_LEN, "bfad_wq_%d",
im                773 drivers/scsi/bfa/bfad_im.c 	im->drv_workq = create_singlethread_workqueue(im->drv_workq_name);
im                774 drivers/scsi/bfa/bfad_im.c 	if (!im->drv_workq)
im               1108 drivers/scsi/bfa/bfad_im.c 	struct bfad_im_s      *im = itnim->im;
im               1109 drivers/scsi/bfa/bfad_im.c 	struct bfad_s         *bfad = im->bfad;
im                 91 drivers/scsi/bfa/bfad_im.h 	struct bfad_im_s *im;
im                155 drivers/scsi/bfa/bfad_im.h 		queue_work(drv->im->drv_workq, &drv->im->aen_im_notify_work);
im                161 drivers/scsi/bfa/bfad_im.h void bfad_destroy_workq(struct bfad_im_s *im);
im                485 drivers/staging/media/imx/imx-media-csc-scaler.c static void ipu_image_from_q_data(struct ipu_image *im,
im                490 drivers/staging/media/imx/imx-media-csc-scaler.c 	im->pix = *fmt;
im                492 drivers/staging/media/imx/imx-media-csc-scaler.c 		im->pix.ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
im                494 drivers/staging/media/imx/imx-media-csc-scaler.c 		im->pix.ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
im                495 drivers/staging/media/imx/imx-media-csc-scaler.c 	im->rect = q_data->rect;
im                267 drivers/tty/serial/amba-pl011.c 	unsigned int		im;		/* interrupt mask */
im                713 drivers/tty/serial/amba-pl011.c 		uap->im &= ~UART011_TXIM;
im                714 drivers/tty/serial/amba-pl011.c 		pl011_write(uap->im, uap, REG_IMSC);
im                723 drivers/tty/serial/amba-pl011.c 		uap->im &= ~UART011_TXIM;
im                724 drivers/tty/serial/amba-pl011.c 		pl011_write(uap->im, uap, REG_IMSC);
im                763 drivers/tty/serial/amba-pl011.c 				uap->im &= ~UART011_TXIM;
im                764 drivers/tty/serial/amba-pl011.c 				pl011_write(uap->im, uap, REG_IMSC);
im                866 drivers/tty/serial/amba-pl011.c 	uap->im &= ~UART011_RXIM;
im                867 drivers/tty/serial/amba-pl011.c 	pl011_write(uap->im, uap, REG_IMSC);
im                993 drivers/tty/serial/amba-pl011.c 		uap->im |= UART011_RXIM;
im                994 drivers/tty/serial/amba-pl011.c 		pl011_write(uap->im, uap, REG_IMSC);
im               1041 drivers/tty/serial/amba-pl011.c 		uap->im |= UART011_RXIM;
im               1042 drivers/tty/serial/amba-pl011.c 		pl011_write(uap->im, uap, REG_IMSC);
im               1098 drivers/tty/serial/amba-pl011.c 		uap->im |= UART011_RXIM;
im               1099 drivers/tty/serial/amba-pl011.c 		pl011_write(uap->im, uap, REG_IMSC);
im               1298 drivers/tty/serial/amba-pl011.c 	uap->im &= ~UART011_TXIM;
im               1299 drivers/tty/serial/amba-pl011.c 	pl011_write(uap->im, uap, REG_IMSC);
im               1309 drivers/tty/serial/amba-pl011.c 		uap->im |= UART011_TXIM;
im               1310 drivers/tty/serial/amba-pl011.c 		pl011_write(uap->im, uap, REG_IMSC);
im               1328 drivers/tty/serial/amba-pl011.c 	uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
im               1330 drivers/tty/serial/amba-pl011.c 	pl011_write(uap->im, uap, REG_IMSC);
im               1340 drivers/tty/serial/amba-pl011.c 	uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
im               1341 drivers/tty/serial/amba-pl011.c 	pl011_write(uap->im, uap, REG_IMSC);
im               1360 drivers/tty/serial/amba-pl011.c 			uap->im |= UART011_RXIM;
im               1361 drivers/tty/serial/amba-pl011.c 			pl011_write(uap->im, uap, REG_IMSC);
im               1484 drivers/tty/serial/amba-pl011.c 	status = pl011_read(uap, REG_RIS) & uap->im;
im               1508 drivers/tty/serial/amba-pl011.c 			status = pl011_read(uap, REG_RIS) & uap->im;
im               1680 drivers/tty/serial/amba-pl011.c 	uap->im = pl011_read(uap, REG_IMSC);
im               1716 drivers/tty/serial/amba-pl011.c 	pl011_write(uap->im, uap, REG_IMSC);
im               1748 drivers/tty/serial/amba-pl011.c 	uap->im = UART011_RTIM;
im               1750 drivers/tty/serial/amba-pl011.c 		uap->im |= UART011_RXIM;
im               1751 drivers/tty/serial/amba-pl011.c 	pl011_write(uap->im, uap, REG_IMSC);
im               1861 drivers/tty/serial/amba-pl011.c 	uap->im = 0;
im               1862 drivers/tty/serial/amba-pl011.c 	pl011_write(uap->im, uap, REG_IMSC);
im               3717 fs/ceph/caps.c 			      struct inode *inode, struct ceph_mds_caps *im,
im               3727 fs/ceph/caps.c 	unsigned caps = le32_to_cpu(im->caps);
im               3728 fs/ceph/caps.c 	unsigned wanted = le32_to_cpu(im->wanted);
im               3729 fs/ceph/caps.c 	unsigned seq = le32_to_cpu(im->seq);
im               3730 fs/ceph/caps.c 	unsigned mseq = le32_to_cpu(im->migrate_seq);
im               3731 fs/ceph/caps.c 	u64 realmino = le64_to_cpu(im->realm);
im               3732 fs/ceph/caps.c 	u64 cap_id = le64_to_cpu(im->cap_id);
im                461 fs/f2fs/checkpoint.c 	struct inode_management *im = &sbi->im[type];
im                468 fs/f2fs/checkpoint.c 	spin_lock(&im->ino_lock);
im                469 fs/f2fs/checkpoint.c 	e = radix_tree_lookup(&im->ino_root, ino);
im                472 fs/f2fs/checkpoint.c 		if (unlikely(radix_tree_insert(&im->ino_root, ino, e)))
im                478 fs/f2fs/checkpoint.c 		list_add_tail(&e->list, &im->ino_list);
im                480 fs/f2fs/checkpoint.c 			im->ino_num++;
im                486 fs/f2fs/checkpoint.c 	spin_unlock(&im->ino_lock);
im                495 fs/f2fs/checkpoint.c 	struct inode_management *im = &sbi->im[type];
im                498 fs/f2fs/checkpoint.c 	spin_lock(&im->ino_lock);
im                499 fs/f2fs/checkpoint.c 	e = radix_tree_lookup(&im->ino_root, ino);
im                502 fs/f2fs/checkpoint.c 		radix_tree_delete(&im->ino_root, ino);
im                503 fs/f2fs/checkpoint.c 		im->ino_num--;
im                504 fs/f2fs/checkpoint.c 		spin_unlock(&im->ino_lock);
im                508 fs/f2fs/checkpoint.c 	spin_unlock(&im->ino_lock);
im                526 fs/f2fs/checkpoint.c 	struct inode_management *im = &sbi->im[mode];
im                529 fs/f2fs/checkpoint.c 	spin_lock(&im->ino_lock);
im                530 fs/f2fs/checkpoint.c 	e = radix_tree_lookup(&im->ino_root, ino);
im                531 fs/f2fs/checkpoint.c 	spin_unlock(&im->ino_lock);
im                541 fs/f2fs/checkpoint.c 		struct inode_management *im = &sbi->im[i];
im                543 fs/f2fs/checkpoint.c 		spin_lock(&im->ino_lock);
im                544 fs/f2fs/checkpoint.c 		list_for_each_entry_safe(e, tmp, &im->ino_list, list) {
im                546 fs/f2fs/checkpoint.c 			radix_tree_delete(&im->ino_root, e->ino);
im                548 fs/f2fs/checkpoint.c 			im->ino_num--;
im                550 fs/f2fs/checkpoint.c 		spin_unlock(&im->ino_lock);
im                563 fs/f2fs/checkpoint.c 	struct inode_management *im = &sbi->im[type];
im                567 fs/f2fs/checkpoint.c 	spin_lock(&im->ino_lock);
im                568 fs/f2fs/checkpoint.c 	e = radix_tree_lookup(&im->ino_root, ino);
im                571 fs/f2fs/checkpoint.c 	spin_unlock(&im->ino_lock);
im                577 fs/f2fs/checkpoint.c 	struct inode_management *im = &sbi->im[ORPHAN_INO];
im                580 fs/f2fs/checkpoint.c 	spin_lock(&im->ino_lock);
im                583 fs/f2fs/checkpoint.c 		spin_unlock(&im->ino_lock);
im                588 fs/f2fs/checkpoint.c 	if (unlikely(im->ino_num >= sbi->max_orphans))
im                591 fs/f2fs/checkpoint.c 		im->ino_num++;
im                592 fs/f2fs/checkpoint.c 	spin_unlock(&im->ino_lock);
im                599 fs/f2fs/checkpoint.c 	struct inode_management *im = &sbi->im[ORPHAN_INO];
im                601 fs/f2fs/checkpoint.c 	spin_lock(&im->ino_lock);
im                602 fs/f2fs/checkpoint.c 	f2fs_bug_on(sbi, im->ino_num == 0);
im                603 fs/f2fs/checkpoint.c 	im->ino_num--;
im                604 fs/f2fs/checkpoint.c 	spin_unlock(&im->ino_lock);
im                748 fs/f2fs/checkpoint.c 	struct inode_management *im = &sbi->im[ORPHAN_INO];
im                750 fs/f2fs/checkpoint.c 	orphan_blocks = GET_ORPHAN_BLOCKS(im->ino_num);
im                757 fs/f2fs/checkpoint.c 	head = &im->ino_list;
im               1273 fs/f2fs/checkpoint.c 	unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
im               1378 fs/f2fs/checkpoint.c 	unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num, flags;
im               1644 fs/f2fs/checkpoint.c 		struct inode_management *im = &sbi->im[i];
im               1646 fs/f2fs/checkpoint.c 		INIT_RADIX_TREE(&im->ino_root, GFP_ATOMIC);
im               1647 fs/f2fs/checkpoint.c 		spin_lock_init(&im->ino_lock);
im               1648 fs/f2fs/checkpoint.c 		INIT_LIST_HEAD(&im->ino_list);
im               1649 fs/f2fs/checkpoint.c 		im->ino_num = 0;
im                 97 fs/f2fs/debug.c 	si->append = sbi->im[APPEND_INO].ino_num;
im                 98 fs/f2fs/debug.c 	si->update = sbi->im[UPDATE_INO].ino_num;
im                 99 fs/f2fs/debug.c 	si->orphans = sbi->im[ORPHAN_INO].ino_num;
im                262 fs/f2fs/debug.c 		si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry);
im               1216 fs/f2fs/f2fs.h 	struct inode_management im[MAX_INO_ENTRY];      /* manage inode cache */
im                 79 fs/f2fs/node.c 			mem_size += sbi->im[i].ino_num *
im                511 fs/nfs/nfs4idmap.c 				     struct idmap_msg *im,
im                517 fs/nfs/nfs4idmap.c 	im->im_type = IDMAP_TYPE_GROUP;
im                522 fs/nfs/nfs4idmap.c 		im->im_type = IDMAP_TYPE_USER;
im                525 fs/nfs/nfs4idmap.c 		im->im_conv = IDMAP_CONV_NAMETOID;
im                526 fs/nfs/nfs4idmap.c 		ret = match_strlcpy(im->im_name, &substr, IDMAP_NAMESZ);
im                530 fs/nfs/nfs4idmap.c 		im->im_type = IDMAP_TYPE_USER;
im                533 fs/nfs/nfs4idmap.c 		im->im_conv = IDMAP_CONV_IDTONAME;
im                534 fs/nfs/nfs4idmap.c 		ret = match_int(&substr, &im->im_id);
im                544 fs/nfs/nfs4idmap.c 	msg->data = im;
im                586 fs/nfs/nfs4idmap.c 	struct idmap_msg *im;
im                601 fs/nfs/nfs4idmap.c 	im = &data->idmap_msg;
im                605 fs/nfs/nfs4idmap.c 	ret = nfs_idmap_prepare_message(key->description, idmap, im, msg);
im                632 fs/nfs/nfs4idmap.c static int nfs_idmap_read_and_verify_message(struct idmap_msg *im,
im                641 fs/nfs/nfs4idmap.c 	if (upcall->im_type != im->im_type || upcall->im_conv != im->im_conv)
im                643 fs/nfs/nfs4idmap.c 	switch (im->im_conv) {
im                645 fs/nfs/nfs4idmap.c 		if (strcmp(upcall->im_name, im->im_name) != 0)
im                648 fs/nfs/nfs4idmap.c 		len = 1 + nfs_map_numeric_to_string(im->im_id, id_str,
im                653 fs/nfs/nfs4idmap.c 		if (upcall->im_id != im->im_id)
im                655 fs/nfs/nfs4idmap.c 		len = strlen(im->im_name);
im                656 fs/nfs/nfs4idmap.c 		ret = nfs_idmap_instantiate(key, authkey, im->im_name, len);
im                672 fs/nfs/nfs4idmap.c 	struct idmap_msg im;
im                686 fs/nfs/nfs4idmap.c 	if (mlen != sizeof(im)) {
im                691 fs/nfs/nfs4idmap.c 	if (copy_from_user(&im, src, mlen) != 0) {
im                696 fs/nfs/nfs4idmap.c 	if (!(im.im_status & IDMAP_STATUS_SUCCESS)) {
im                701 fs/nfs/nfs4idmap.c 	namelen_in = strnlen(im.im_name, IDMAP_NAMESZ);
im                707 fs/nfs/nfs4idmap.c 	ret = nfs_idmap_read_and_verify_message(&im,
im                270 include/linux/hp_sdc.h 	uint8_t		im;		/* Interrupt mask */
im                158 net/ipv4/igmp.c static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im,
im                160 net/ipv4/igmp.c static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im);
im                169 net/ipv4/igmp.c static void ip_ma_put(struct ip_mc_list *im)
im                171 net/ipv4/igmp.c 	if (refcount_dec_and_test(&im->refcnt)) {
im                172 net/ipv4/igmp.c 		in_dev_put(im->interface);
im                173 net/ipv4/igmp.c 		kfree_rcu(im, rcu);
im                204 net/ipv4/igmp.c static void igmp_stop_timer(struct ip_mc_list *im)
im                206 net/ipv4/igmp.c 	spin_lock_bh(&im->lock);
im                207 net/ipv4/igmp.c 	if (del_timer(&im->timer))
im                208 net/ipv4/igmp.c 		refcount_dec(&im->refcnt);
im                209 net/ipv4/igmp.c 	im->tm_running = 0;
im                210 net/ipv4/igmp.c 	im->reporter = 0;
im                211 net/ipv4/igmp.c 	im->unsolicit_count = 0;
im                212 net/ipv4/igmp.c 	spin_unlock_bh(&im->lock);
im                216 net/ipv4/igmp.c static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
im                220 net/ipv4/igmp.c 	im->tm_running = 1;
im                221 net/ipv4/igmp.c 	if (!mod_timer(&im->timer, jiffies+tv+2))
im                222 net/ipv4/igmp.c 		refcount_inc(&im->refcnt);
im                247 net/ipv4/igmp.c static void igmp_mod_timer(struct ip_mc_list *im, int max_delay)
im                249 net/ipv4/igmp.c 	spin_lock_bh(&im->lock);
im                250 net/ipv4/igmp.c 	im->unsolicit_count = 0;
im                251 net/ipv4/igmp.c 	if (del_timer(&im->timer)) {
im                252 net/ipv4/igmp.c 		if ((long)(im->timer.expires-jiffies) < max_delay) {
im                253 net/ipv4/igmp.c 			add_timer(&im->timer);
im                254 net/ipv4/igmp.c 			im->tm_running = 1;
im                255 net/ipv4/igmp.c 			spin_unlock_bh(&im->lock);
im                258 net/ipv4/igmp.c 		refcount_dec(&im->refcnt);
im                260 net/ipv4/igmp.c 	igmp_start_timer(im, max_delay);
im                261 net/ipv4/igmp.c 	spin_unlock_bh(&im->lock);
im                830 net/ipv4/igmp.c 	struct ip_mc_list *im = from_timer(im, t, timer);
im                831 net/ipv4/igmp.c 	struct in_device *in_dev = im->interface;
im                833 net/ipv4/igmp.c 	spin_lock(&im->lock);
im                834 net/ipv4/igmp.c 	im->tm_running = 0;
im                836 net/ipv4/igmp.c 	if (im->unsolicit_count && --im->unsolicit_count)
im                837 net/ipv4/igmp.c 		igmp_start_timer(im, unsolicited_report_interval(in_dev));
im                839 net/ipv4/igmp.c 	im->reporter = 1;
im                840 net/ipv4/igmp.c 	spin_unlock(&im->lock);
im                843 net/ipv4/igmp.c 		igmp_send_report(in_dev, im, IGMP_HOST_MEMBERSHIP_REPORT);
im                845 net/ipv4/igmp.c 		igmp_send_report(in_dev, im, IGMPV2_HOST_MEMBERSHIP_REPORT);
im                847 net/ipv4/igmp.c 		igmp_send_report(in_dev, im, IGMPV3_HOST_MEMBERSHIP_REPORT);
im                849 net/ipv4/igmp.c 	ip_ma_put(im);
im                911 net/ipv4/igmp.c 	struct ip_mc_list *im;
im                922 net/ipv4/igmp.c 	for_each_pmc_rcu(in_dev, im) {
im                923 net/ipv4/igmp.c 		if (im->multiaddr == group) {
im                924 net/ipv4/igmp.c 			igmp_stop_timer(im);
im                938 net/ipv4/igmp.c 	struct ip_mc_list	*im;
im               1035 net/ipv4/igmp.c 	for_each_pmc_rcu(in_dev, im) {
im               1038 net/ipv4/igmp.c 		if (group && group != im->multiaddr)
im               1040 net/ipv4/igmp.c 		if (im->multiaddr == IGMP_ALL_HOSTS)
im               1042 net/ipv4/igmp.c 		if (ipv4_is_local_multicast(im->multiaddr) &&
im               1045 net/ipv4/igmp.c 		spin_lock_bh(&im->lock);
im               1046 net/ipv4/igmp.c 		if (im->tm_running)
im               1047 net/ipv4/igmp.c 			im->gsquery = im->gsquery && mark;
im               1049 net/ipv4/igmp.c 			im->gsquery = mark;
im               1050 net/ipv4/igmp.c 		changed = !im->gsquery ||
im               1051 net/ipv4/igmp.c 			igmp_marksources(im, ntohs(ih3->nsrcs), ih3->srcs);
im               1052 net/ipv4/igmp.c 		spin_unlock_bh(&im->lock);
im               1054 net/ipv4/igmp.c 			igmp_mod_timer(im, max_delay);
im               1164 net/ipv4/igmp.c static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im,
im               1180 net/ipv4/igmp.c 	spin_lock_bh(&im->lock);
im               1181 net/ipv4/igmp.c 	pmc->interface = im->interface;
im               1183 net/ipv4/igmp.c 	pmc->multiaddr = im->multiaddr;
im               1185 net/ipv4/igmp.c 	pmc->sfmode = im->sfmode;
im               1189 net/ipv4/igmp.c 		pmc->tomb = im->tomb;
im               1190 net/ipv4/igmp.c 		pmc->sources = im->sources;
im               1191 net/ipv4/igmp.c 		im->tomb = im->sources = NULL;
im               1195 net/ipv4/igmp.c 	spin_unlock_bh(&im->lock);
im               1206 net/ipv4/igmp.c static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
im               1211 net/ipv4/igmp.c 	__be32 multiaddr = im->multiaddr;
im               1228 net/ipv4/igmp.c 	spin_lock_bh(&im->lock);
im               1230 net/ipv4/igmp.c 		im->interface = pmc->interface;
im               1231 net/ipv4/igmp.c 		if (im->sfmode == MCAST_INCLUDE) {
im               1232 net/ipv4/igmp.c 			swap(im->tomb, pmc->tomb);
im               1233 net/ipv4/igmp.c 			swap(im->sources, pmc->sources);
im               1234 net/ipv4/igmp.c 			for (psf = im->sources; psf; psf = psf->sf_next)
im               1237 net/ipv4/igmp.c 			im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
im               1242 net/ipv4/igmp.c 	spin_unlock_bh(&im->lock);
im               1278 net/ipv4/igmp.c static void __igmp_group_dropped(struct ip_mc_list *im, gfp_t gfp)
im               1280 net/ipv4/igmp.c 	struct in_device *in_dev = im->interface;
im               1286 net/ipv4/igmp.c 	if (im->loaded) {
im               1287 net/ipv4/igmp.c 		im->loaded = 0;
im               1288 net/ipv4/igmp.c 		ip_mc_filter_del(in_dev, im->multiaddr);
im               1292 net/ipv4/igmp.c 	if (im->multiaddr == IGMP_ALL_HOSTS)
im               1294 net/ipv4/igmp.c 	if (ipv4_is_local_multicast(im->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports)
im               1297 net/ipv4/igmp.c 	reporter = im->reporter;
im               1298 net/ipv4/igmp.c 	igmp_stop_timer(im);
im               1305 net/ipv4/igmp.c 				igmp_send_report(in_dev, im, IGMP_HOST_LEAVE_MESSAGE);
im               1309 net/ipv4/igmp.c 		igmpv3_add_delrec(in_dev, im, gfp);
im               1316 net/ipv4/igmp.c static void igmp_group_dropped(struct ip_mc_list *im)
im               1318 net/ipv4/igmp.c 	__igmp_group_dropped(im, GFP_KERNEL);
im               1321 net/ipv4/igmp.c static void igmp_group_added(struct ip_mc_list *im)
im               1323 net/ipv4/igmp.c 	struct in_device *in_dev = im->interface;
im               1328 net/ipv4/igmp.c 	if (im->loaded == 0) {
im               1329 net/ipv4/igmp.c 		im->loaded = 1;
im               1330 net/ipv4/igmp.c 		ip_mc_filter_add(in_dev, im->multiaddr);
im               1334 net/ipv4/igmp.c 	if (im->multiaddr == IGMP_ALL_HOSTS)
im               1336 net/ipv4/igmp.c 	if (ipv4_is_local_multicast(im->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports)
im               1342 net/ipv4/igmp.c 	im->unsolicit_count = net->ipv4.sysctl_igmp_qrv;
im               1344 net/ipv4/igmp.c 		spin_lock_bh(&im->lock);
im               1345 net/ipv4/igmp.c 		igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY);
im               1346 net/ipv4/igmp.c 		spin_unlock_bh(&im->lock);
im               1355 net/ipv4/igmp.c 	if (im->sfmode == MCAST_EXCLUDE)
im               1356 net/ipv4/igmp.c 		im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
im               1367 net/ipv4/igmp.c static u32 ip_mc_hash(const struct ip_mc_list *im)
im               1369 net/ipv4/igmp.c 	return hash_32((__force u32)im->multiaddr, MC_HASH_SZ_LOG);
im               1373 net/ipv4/igmp.c 			   struct ip_mc_list *im)
im               1380 net/ipv4/igmp.c 		hash = ip_mc_hash(im);
im               1381 net/ipv4/igmp.c 		im->next_hash = mc_hash[hash];
im               1382 net/ipv4/igmp.c 		rcu_assign_pointer(mc_hash[hash], im);
im               1395 net/ipv4/igmp.c 	for_each_pmc_rtnl(in_dev, im) {
im               1396 net/ipv4/igmp.c 		hash = ip_mc_hash(im);
im               1397 net/ipv4/igmp.c 		im->next_hash = mc_hash[hash];
im               1398 net/ipv4/igmp.c 		RCU_INIT_POINTER(mc_hash[hash], im);
im               1405 net/ipv4/igmp.c 			      struct ip_mc_list *im)
im               1412 net/ipv4/igmp.c 	mc_hash += ip_mc_hash(im);
im               1413 net/ipv4/igmp.c 	while ((aux = rtnl_dereference(*mc_hash)) != im)
im               1415 net/ipv4/igmp.c 	*mc_hash = im->next_hash;
im               1425 net/ipv4/igmp.c 	struct ip_mc_list *im;
im               1429 net/ipv4/igmp.c 	for_each_pmc_rtnl(in_dev, im) {
im               1430 net/ipv4/igmp.c 		if (im->multiaddr == addr) {
im               1431 net/ipv4/igmp.c 			im->users++;
im               1437 net/ipv4/igmp.c 	im = kzalloc(sizeof(*im), gfp);
im               1438 net/ipv4/igmp.c 	if (!im)
im               1441 net/ipv4/igmp.c 	im->users = 1;
im               1442 net/ipv4/igmp.c 	im->interface = in_dev;
im               1444 net/ipv4/igmp.c 	im->multiaddr = addr;
im               1446 net/ipv4/igmp.c 	im->sfmode = mode;
im               1447 net/ipv4/igmp.c 	im->sfcount[mode] = 1;
im               1448 net/ipv4/igmp.c 	refcount_set(&im->refcnt, 1);
im               1449 net/ipv4/igmp.c 	spin_lock_init(&im->lock);
im               1451 net/ipv4/igmp.c 	timer_setup(&im->timer, igmp_timer_expire, 0);
im               1454 net/ipv4/igmp.c 	im->next_rcu = in_dev->mc_list;
im               1456 net/ipv4/igmp.c 	rcu_assign_pointer(in_dev->mc_list, im);
im               1458 net/ipv4/igmp.c 	ip_mc_hash_add(in_dev, im);
im               1461 net/ipv4/igmp.c 	igmpv3_del_delrec(in_dev, im);
im               1463 net/ipv4/igmp.c 	igmp_group_added(im);
im               1630 net/ipv4/igmp.c 	struct ip_mc_list *im;
im               1636 net/ipv4/igmp.c 	for_each_pmc_rtnl(in_dev, im) {
im               1637 net/ipv4/igmp.c 		if (im->multiaddr == IGMP_ALL_HOSTS)
im               1639 net/ipv4/igmp.c 		if (ipv4_is_local_multicast(im->multiaddr) &&
im               1652 net/ipv4/igmp.c 		igmp_send_report(in_dev, im, type);
im               2700 net/ipv4/igmp.c 	struct ip_mc_list *im;
im               2709 net/ipv4/igmp.c 		for (im = rcu_dereference(mc_hash[hash]);
im               2710 net/ipv4/igmp.c 		     im != NULL;
im               2711 net/ipv4/igmp.c 		     im = rcu_dereference(im->next_hash)) {
im               2712 net/ipv4/igmp.c 			if (im->multiaddr == mc_addr)
im               2716 net/ipv4/igmp.c 		for_each_pmc_rcu(in_dev, im) {
im               2717 net/ipv4/igmp.c 			if (im->multiaddr == mc_addr)
im               2721 net/ipv4/igmp.c 	if (im && proto == IPPROTO_IGMP) {
im               2723 net/ipv4/igmp.c 	} else if (im) {
im               2725 net/ipv4/igmp.c 			for (psf = im->sources; psf; psf = psf->sf_next) {
im               2732 net/ipv4/igmp.c 					im->sfcount[MCAST_EXCLUDE];
im               2734 net/ipv4/igmp.c 				rv = im->sfcount[MCAST_EXCLUDE] != 0;
im               2753 net/ipv4/igmp.c 	struct ip_mc_list *im = NULL;
im               2763 net/ipv4/igmp.c 		im = rcu_dereference(in_dev->mc_list);
im               2764 net/ipv4/igmp.c 		if (im) {
im               2769 net/ipv4/igmp.c 	return im;
im               2772 net/ipv4/igmp.c static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_list *im)
im               2776 net/ipv4/igmp.c 	im = rcu_dereference(im->next_rcu);
im               2777 net/ipv4/igmp.c 	while (!im) {
im               2786 net/ipv4/igmp.c 		im = rcu_dereference(state->in_dev->mc_list);
im               2788 net/ipv4/igmp.c 	return im;
im               2793 net/ipv4/igmp.c 	struct ip_mc_list *im = igmp_mc_get_first(seq);
im               2794 net/ipv4/igmp.c 	if (im)
im               2795 net/ipv4/igmp.c 		while (pos && (im = igmp_mc_get_next(seq, im)) != NULL)
im               2797 net/ipv4/igmp.c 	return pos ? NULL : im;
im               2809 net/ipv4/igmp.c 	struct ip_mc_list *im;
im               2811 net/ipv4/igmp.c 		im = igmp_mc_get_first(seq);
im               2813 net/ipv4/igmp.c 		im = igmp_mc_get_next(seq, v);
im               2815 net/ipv4/igmp.c 	return im;
im               2834 net/ipv4/igmp.c 		struct ip_mc_list *im = (struct ip_mc_list *)v;
im               2847 net/ipv4/igmp.c 		if (rcu_access_pointer(state->in_dev->mc_list) == im) {
im               2852 net/ipv4/igmp.c 		delta = im->timer.expires - jiffies;
im               2855 net/ipv4/igmp.c 			   im->multiaddr, im->users,
im               2856 net/ipv4/igmp.c 			   im->tm_running,
im               2857 net/ipv4/igmp.c 			   im->tm_running ? jiffies_delta_to_clock_t(delta) : 0,
im               2858 net/ipv4/igmp.c 			   im->reporter);
im               2874 net/ipv4/igmp.c 	struct ip_mc_list *im;
im               2883 net/ipv4/igmp.c 	struct ip_mc_list *im = NULL;
im               2887 net/ipv4/igmp.c 	state->im = NULL;
im               2893 net/ipv4/igmp.c 		im = rcu_dereference(idev->mc_list);
im               2894 net/ipv4/igmp.c 		if (likely(im)) {
im               2895 net/ipv4/igmp.c 			spin_lock_bh(&im->lock);
im               2896 net/ipv4/igmp.c 			psf = im->sources;
im               2898 net/ipv4/igmp.c 				state->im = im;
im               2902 net/ipv4/igmp.c 			spin_unlock_bh(&im->lock);
im               2914 net/ipv4/igmp.c 		spin_unlock_bh(&state->im->lock);
im               2915 net/ipv4/igmp.c 		state->im = state->im->next;
im               2916 net/ipv4/igmp.c 		while (!state->im) {
im               2925 net/ipv4/igmp.c 			state->im = rcu_dereference(state->idev->mc_list);
im               2927 net/ipv4/igmp.c 		if (!state->im)
im               2929 net/ipv4/igmp.c 		spin_lock_bh(&state->im->lock);
im               2930 net/ipv4/igmp.c 		psf = state->im->sources;
im               2967 net/ipv4/igmp.c 	if (likely(state->im)) {
im               2968 net/ipv4/igmp.c 		spin_unlock_bh(&state->im->lock);
im               2969 net/ipv4/igmp.c 		state->im = NULL;
im               2988 net/ipv4/igmp.c 			   ntohl(state->im->multiaddr),
im                480 net/ipv6/anycast.c 	struct ifacaddr6 *im = NULL;
im                491 net/ipv6/anycast.c 		im = idev->ac_list;
im                492 net/ipv6/anycast.c 		if (im) {
im                498 net/ipv6/anycast.c 	return im;
im                501 net/ipv6/anycast.c static struct ifacaddr6 *ac6_get_next(struct seq_file *seq, struct ifacaddr6 *im)
im                505 net/ipv6/anycast.c 	im = im->aca_next;
im                506 net/ipv6/anycast.c 	while (!im) {
im                519 net/ipv6/anycast.c 		im = state->idev->ac_list;
im                521 net/ipv6/anycast.c 	return im;
im                526 net/ipv6/anycast.c 	struct ifacaddr6 *im = ac6_get_first(seq);
im                527 net/ipv6/anycast.c 	if (im)
im                528 net/ipv6/anycast.c 		while (pos && (im = ac6_get_next(seq, im)) != NULL)
im                530 net/ipv6/anycast.c 	return pos ? NULL : im;
im                542 net/ipv6/anycast.c 	struct ifacaddr6 *im = ac6_get_next(seq, v);
im                545 net/ipv6/anycast.c 	return im;
im                562 net/ipv6/anycast.c 	struct ifacaddr6 *im = (struct ifacaddr6 *)v;
im                567 net/ipv6/anycast.c 		   &im->aca_addr, im->aca_users);
im                727 net/ipv6/mcast.c static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
im                741 net/ipv6/mcast.c 	spin_lock_bh(&im->mca_lock);
im                743 net/ipv6/mcast.c 	pmc->idev = im->idev;
im                745 net/ipv6/mcast.c 	pmc->mca_addr = im->mca_addr;
im                747 net/ipv6/mcast.c 	pmc->mca_sfmode = im->mca_sfmode;
im                751 net/ipv6/mcast.c 		pmc->mca_tomb = im->mca_tomb;
im                752 net/ipv6/mcast.c 		pmc->mca_sources = im->mca_sources;
im                753 net/ipv6/mcast.c 		im->mca_tomb = im->mca_sources = NULL;
im                757 net/ipv6/mcast.c 	spin_unlock_bh(&im->mca_lock);
im                765 net/ipv6/mcast.c static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
im                769 net/ipv6/mcast.c 	struct in6_addr *pmca = &im->mca_addr;
im                786 net/ipv6/mcast.c 	spin_lock_bh(&im->mca_lock);
im                788 net/ipv6/mcast.c 		im->idev = pmc->idev;
im                789 net/ipv6/mcast.c 		if (im->mca_sfmode == MCAST_INCLUDE) {
im                790 net/ipv6/mcast.c 			swap(im->mca_tomb, pmc->mca_tomb);
im                791 net/ipv6/mcast.c 			swap(im->mca_sources, pmc->mca_sources);
im                792 net/ipv6/mcast.c 			for (psf = im->mca_sources; psf; psf = psf->sf_next)
im                795 net/ipv6/mcast.c 			im->mca_crcount = idev->mc_qrv;
im                801 net/ipv6/mcast.c 	spin_unlock_bh(&im->mca_lock);
im               2676 net/ipv6/mcast.c 	struct ifmcaddr6 *im = NULL;
im               2687 net/ipv6/mcast.c 		im = idev->mc_list;
im               2688 net/ipv6/mcast.c 		if (im) {
im               2694 net/ipv6/mcast.c 	return im;
im               2697 net/ipv6/mcast.c static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr6 *im)
im               2701 net/ipv6/mcast.c 	im = im->next;
im               2702 net/ipv6/mcast.c 	while (!im) {
im               2715 net/ipv6/mcast.c 		im = state->idev->mc_list;
im               2717 net/ipv6/mcast.c 	return im;
im               2722 net/ipv6/mcast.c 	struct ifmcaddr6 *im = igmp6_mc_get_first(seq);
im               2723 net/ipv6/mcast.c 	if (im)
im               2724 net/ipv6/mcast.c 		while (pos && (im = igmp6_mc_get_next(seq, im)) != NULL)
im               2726 net/ipv6/mcast.c 	return pos ? NULL : im;
im               2738 net/ipv6/mcast.c 	struct ifmcaddr6 *im = igmp6_mc_get_next(seq, v);
im               2741 net/ipv6/mcast.c 	return im;
im               2759 net/ipv6/mcast.c 	struct ifmcaddr6 *im = (struct ifmcaddr6 *)v;
im               2765 net/ipv6/mcast.c 		   &im->mca_addr,
im               2766 net/ipv6/mcast.c 		   im->mca_users, im->mca_flags,
im               2767 net/ipv6/mcast.c 		   (im->mca_flags&MAF_TIMER_RUNNING) ?
im               2768 net/ipv6/mcast.c 		   jiffies_to_clock_t(im->mca_timer.expires-jiffies) : 0);
im               2783 net/ipv6/mcast.c 	struct ifmcaddr6 *im;
im               2791 net/ipv6/mcast.c 	struct ifmcaddr6 *im = NULL;
im               2796 net/ipv6/mcast.c 	state->im = NULL;
im               2803 net/ipv6/mcast.c 		im = idev->mc_list;
im               2804 net/ipv6/mcast.c 		if (likely(im)) {
im               2805 net/ipv6/mcast.c 			spin_lock_bh(&im->mca_lock);
im               2806 net/ipv6/mcast.c 			psf = im->mca_sources;
im               2808 net/ipv6/mcast.c 				state->im = im;
im               2812 net/ipv6/mcast.c 			spin_unlock_bh(&im->mca_lock);
im               2825 net/ipv6/mcast.c 		spin_unlock_bh(&state->im->mca_lock);
im               2826 net/ipv6/mcast.c 		state->im = state->im->next;
im               2827 net/ipv6/mcast.c 		while (!state->im) {
im               2840 net/ipv6/mcast.c 			state->im = state->idev->mc_list;
im               2842 net/ipv6/mcast.c 		if (!state->im)
im               2844 net/ipv6/mcast.c 		spin_lock_bh(&state->im->mca_lock);
im               2845 net/ipv6/mcast.c 		psf = state->im->mca_sources;
im               2882 net/ipv6/mcast.c 	if (likely(state->im)) {
im               2883 net/ipv6/mcast.c 		spin_unlock_bh(&state->im->mca_lock);
im               2884 net/ipv6/mcast.c 		state->im = NULL;
im               2905 net/ipv6/mcast.c 			   &state->im->mca_addr,
im                 42 net/sched/em_ipt.c static int check_match(struct net *net, struct em_ipt_match *im, int mdata_len)
im                 52 net/sched/em_ipt.c 	mtpar.hook_mask	= 1 << im->hook;
im                 53 net/sched/em_ipt.c 	mtpar.family	= im->match->family;
im                 54 net/sched/em_ipt.c 	mtpar.match	= im->match;
im                 56 net/sched/em_ipt.c 	mtpar.matchinfo	= (void *)im->match_data;
im                130 net/sched/em_ipt.c 	struct em_ipt_match *im = NULL;
im                160 net/sched/em_ipt.c 	im = kzalloc(sizeof(*im) + mdata_len, GFP_KERNEL);
im                161 net/sched/em_ipt.c 	if (!im) {
im                166 net/sched/em_ipt.c 	im->match = match;
im                167 net/sched/em_ipt.c 	im->hook = nla_get_u32(tb[TCA_EM_IPT_HOOK]);
im                168 net/sched/em_ipt.c 	im->nfproto = nfproto;
im                169 net/sched/em_ipt.c 	nla_memcpy(im->match_data, tb[TCA_EM_IPT_MATCH_DATA], mdata_len);
im                171 net/sched/em_ipt.c 	ret = check_match(net, im, mdata_len);
im                175 net/sched/em_ipt.c 	em->datalen = sizeof(*im) + mdata_len;
im                176 net/sched/em_ipt.c 	em->data = (unsigned long)im;
im                180 net/sched/em_ipt.c 	kfree(im);
im                187 net/sched/em_ipt.c 	struct em_ipt_match *im = (void *)em->data;
im                189 net/sched/em_ipt.c 	if (!im)
im                192 net/sched/em_ipt.c 	if (im->match->destroy) {
im                195 net/sched/em_ipt.c 			.match = im->match,
im                196 net/sched/em_ipt.c 			.matchinfo = im->match_data,
im                197 net/sched/em_ipt.c 			.family = im->match->family
im                199 net/sched/em_ipt.c 		im->match->destroy(&par);
im                201 net/sched/em_ipt.c 	module_put(im->match->me);
im                202 net/sched/em_ipt.c 	kfree((void *)im);
im                208 net/sched/em_ipt.c 	const struct em_ipt_match *im = (const void *)em->data;
im                211 net/sched/em_ipt.c 	u8 nfproto = im->match->family;
im                237 net/sched/em_ipt.c 	nf_hook_state_init(&state, im->hook, nfproto,
im                240 net/sched/em_ipt.c 	acpar.match = im->match;
im                241 net/sched/em_ipt.c 	acpar.matchinfo = im->match_data;
im                244 net/sched/em_ipt.c 	ret = im->match->match(skb, &acpar);
im                252 net/sched/em_ipt.c 	struct em_ipt_match *im = (void *)em->data;
im                254 net/sched/em_ipt.c 	if (nla_put_string(skb, TCA_EM_IPT_MATCH_NAME, im->match->name) < 0)
im                256 net/sched/em_ipt.c 	if (nla_put_u32(skb, TCA_EM_IPT_HOOK, im->hook) < 0)
im                258 net/sched/em_ipt.c 	if (nla_put_u8(skb, TCA_EM_IPT_MATCH_REVISION, im->match->revision) < 0)
im                260 net/sched/em_ipt.c 	if (nla_put_u8(skb, TCA_EM_IPT_NFPROTO, im->nfproto) < 0)
im                263 net/sched/em_ipt.c 		    im->match->usersize ?: im->match->matchsize,
im                264 net/sched/em_ipt.c 		    im->match_data) < 0)
im                107 net/sunrpc/svcauth_unix.c 	struct ip_map *im = container_of(item, struct ip_map,h);
im                111 net/sunrpc/svcauth_unix.c 		auth_domain_put(&im->m_client->h);
im                112 net/sunrpc/svcauth_unix.c 	kfree_rcu(im, m_rcu);
im                156 net/sunrpc/svcauth_unix.c 	struct ip_map *im = container_of(h, struct ip_map, h);
im                158 net/sunrpc/svcauth_unix.c 	if (ipv6_addr_v4mapped(&(im->m_addr))) {
im                159 net/sunrpc/svcauth_unix.c 		snprintf(text_addr, 20, "%pI4", &im->m_addr.s6_addr32[3]);
im                161 net/sunrpc/svcauth_unix.c 		snprintf(text_addr, 40, "%pI6", &im->m_addr);
im                163 net/sunrpc/svcauth_unix.c 	qword_add(bpp, blen, im->m_class);
im                257 net/sunrpc/svcauth_unix.c 	struct ip_map *im;
im                265 net/sunrpc/svcauth_unix.c 	im = container_of(h, struct ip_map, h);
im                267 net/sunrpc/svcauth_unix.c 	addr = im->m_addr;
im                271 net/sunrpc/svcauth_unix.c 		dom = im->m_client->h.name;
im                275 net/sunrpc/svcauth_unix.c 			im->m_class, &addr.s6_addr32[3], dom);
im                277 net/sunrpc/svcauth_unix.c 		seq_printf(m, "%s %pI6 %s\n", im->m_class, &addr, dom);
im               1863 sound/pci/ali5451/ali5451.c 	struct snd_ali_image *im;
im               1866 sound/pci/ali5451/ali5451.c 	im = chip->image;
im               1867 sound/pci/ali5451/ali5451.c 	if (!im)
im               1876 sound/pci/ali5451/ali5451.c 	im->regs[ALI_MISCINT >> 2] = inl(ALI_REG(chip, ALI_MISCINT));
im               1878 sound/pci/ali5451/ali5451.c 	im->regs[ALI_STOP >> 2] = inl(ALI_REG(chip, ALI_STOP));
im               1886 sound/pci/ali5451/ali5451.c 		im->regs[i] = inl(ALI_REG(chip, i*4));
im               1892 sound/pci/ali5451/ali5451.c 			im->channel_regs[i][j] = inl(ALI_REG(chip, j*4 + 0xe0));
im               1906 sound/pci/ali5451/ali5451.c 	struct snd_ali_image *im;
im               1909 sound/pci/ali5451/ali5451.c 	im = chip->image;
im               1910 sound/pci/ali5451/ali5451.c 	if (!im)
im               1918 sound/pci/ali5451/ali5451.c 			outl(im->channel_regs[i][j], ALI_REG(chip, j*4 + 0xe0));
im               1925 sound/pci/ali5451/ali5451.c 		outl(im->regs[i], ALI_REG(chip, i*4));
im               1929 sound/pci/ali5451/ali5451.c 	outl(im->regs[ALI_START >> 2], ALI_REG(chip, ALI_START));
im               1931 sound/pci/ali5451/ali5451.c 	outl(im->regs[ALI_MISCINT >> 2], ALI_REG(chip, ALI_MISCINT));