pmc                41 arch/arm/mach-at91/pm.c 	int (*config_pmc_ws)(void __iomem *pmc, u32 mode, u32 polarity);
pmc               138 arch/arm/mach-at91/pm.c 	if (!soc_pm.data.pmc || !soc_pm.data.shdwc || !soc_pm.ws_ids)
pmc               142 arch/arm/mach-at91/pm.c 		writel(mode, soc_pm.data.pmc + AT91_PMC_FSMR);
pmc               176 arch/arm/mach-at91/pm.c 			soc_pm.config_pmc_ws(soc_pm.data.pmc, mode, polarity);
pmc               197 arch/arm/mach-at91/pm.c static int at91_sama5d2_config_pmc_ws(void __iomem *pmc, u32 mode, u32 polarity)
pmc               199 arch/arm/mach-at91/pm.c 	writel(mode, pmc + AT91_PMC_FSMR);
pmc               200 arch/arm/mach-at91/pm.c 	writel(polarity, pmc + AT91_PMC_FSPR);
pmc               205 arch/arm/mach-at91/pm.c static int at91_sam9x60_config_pmc_ws(void __iomem *pmc, u32 mode, u32 polarity)
pmc               207 arch/arm/mach-at91/pm.c 	writel(mode, pmc + AT91_PMC_FSMR);
pmc               242 arch/arm/mach-at91/pm.c 	scsr = readl(soc_pm.data.pmc + AT91_PMC_SCSR);
pmc               256 arch/arm/mach-at91/pm.c 		css = readl(soc_pm.data.pmc + AT91_PMC_PCKR(i)) & AT91_PMC_CSS;
pmc               557 arch/arm/mach-at91/pm.c 	writel(AT91_PMC_PCK, soc_pm.data.pmc + AT91_PMC_SCDR);
pmc               567 arch/arm/mach-at91/pm.c 	writel(AT91_PMC_PCK, soc_pm.data.pmc + AT91_PMC_SCDR);
pmc               768 arch/arm/mach-at91/pm.c 	const struct pmc_info *pmc;
pmc               774 arch/arm/mach-at91/pm.c 	soc_pm.data.pmc = of_iomap(pmc_np, 0);
pmc               775 arch/arm/mach-at91/pm.c 	if (!soc_pm.data.pmc) {
pmc               780 arch/arm/mach-at91/pm.c 	pmc = of_id->data;
pmc               781 arch/arm/mach-at91/pm.c 	soc_pm.data.uhp_udp_mask = pmc->uhp_udp_mask;
pmc                27 arch/arm/mach-at91/pm.h 	void __iomem *pmc;
pmc                 8 arch/arm/mach-at91/pm_data-offsets.c 	DEFINE(PM_DATA_PMC,		offsetof(struct at91_pm_data, pmc));
pmc               636 arch/powerpc/include/asm/kvm_host.h 	u32 pmc[8];
pmc                44 arch/powerpc/include/asm/perf_event_server.h 	void		(*disable_pmc)(unsigned int pmc, unsigned long mmcr[]);
pmc               542 arch/powerpc/kernel/asm-offsets.c 	OFFSET(VCPU_PMC, kvm_vcpu, arch.pmc);
pmc              1662 arch/powerpc/kvm/book3s_hv.c 		*val = get_reg_val(id, vcpu->arch.pmc[i]);
pmc              1883 arch/powerpc/kvm/book3s_hv.c 		vcpu->arch.pmc[i] = set_reg_val(id, *val);
pmc                83 arch/powerpc/oprofile/op_model_pa6t.c 	int pmc;
pmc                92 arch/powerpc/oprofile/op_model_pa6t.c 	for (pmc = 0; pmc < cur_cpu_spec->num_pmcs; pmc++)
pmc                93 arch/powerpc/oprofile/op_model_pa6t.c 		if (!ctr[pmc].enabled) {
pmc                94 arch/powerpc/oprofile/op_model_pa6t.c 			sys->mmcr0 &= ~(0x1UL << pmc);
pmc                95 arch/powerpc/oprofile/op_model_pa6t.c 			sys->mmcr0 &= ~(0x1UL << (pmc+12));
pmc                96 arch/powerpc/oprofile/op_model_pa6t.c 			pr_debug("turned off counter %u\n", pmc);
pmc               119 arch/powerpc/oprofile/op_model_pa6t.c 	for (pmc = 0; pmc < cur_cpu_spec->num_pmcs; pmc++) {
pmc               121 arch/powerpc/oprofile/op_model_pa6t.c 		reset_value[pmc] = (0x1UL << 39) - ctr[pmc].count;
pmc               123 arch/powerpc/oprofile/op_model_pa6t.c 				 pmc, reset_value[pmc]);
pmc                41 arch/powerpc/oprofile/op_model_power4.c 	int pmc, cntr_marked_events = 0;
pmc                47 arch/powerpc/oprofile/op_model_power4.c 	for (pmc = 0; pmc < 4; pmc++) {
pmc                49 arch/powerpc/oprofile/op_model_power4.c 				<< (OPROFILE_MAX_PMC_NUM - pmc)
pmc                51 arch/powerpc/oprofile/op_model_power4.c 		psel = (psel >> ((OPROFILE_MAX_PMC_NUM - pmc)
pmc                55 arch/powerpc/oprofile/op_model_power4.c 				    - (pmc * OPROFILE_PMSEL_FIELD_WIDTH )));
pmc                57 arch/powerpc/oprofile/op_model_power4.c 				- (pmc * OPROFILE_PMSEL_FIELD_WIDTH));
pmc                61 arch/powerpc/oprofile/op_model_power4.c 			cntr_marked_events |= (pmc == 1 || pmc == 3) << pmc;
pmc                65 arch/powerpc/oprofile/op_model_power4.c 				cntr_marked_events |= (pmc == 0) << pmc;
pmc                70 arch/powerpc/oprofile/op_model_power4.c 				cntr_marked_events |= (pmc != 1) << pmc;
pmc                74 arch/powerpc/oprofile/op_model_power4.c 			cntr_marked_events |= 1 << pmc;
pmc                78 arch/powerpc/oprofile/op_model_power4.c 			cntr_marked_events |= (unit == 0xd) << pmc;
pmc                82 arch/powerpc/oprofile/op_model_power4.c 				cntr_marked_events |= (pmc >= 2) << pmc;
pmc                85 arch/powerpc/oprofile/op_model_power4.c 			cntr_marked_events |= (unit == 0xd) << pmc;
pmc                69 arch/powerpc/perf/generic-compat-pmu.c PMU_FORMAT_ATTR(pmc,		"config:16-19");
pmc                16 arch/powerpc/perf/isa207-common.c PMU_FORMAT_ATTR(pmc,		"config:16-19");
pmc               117 arch/powerpc/perf/isa207-common.c static unsigned long combine_shift(unsigned long pmc)
pmc               120 arch/powerpc/perf/isa207-common.c 		return p9_MMCR1_COMBINE_SHIFT(pmc);
pmc               122 arch/powerpc/perf/isa207-common.c 	return MMCR1_COMBINE_SHIFT(pmc);
pmc               244 arch/powerpc/perf/isa207-common.c 	unsigned int unit, pmc, cache, ebb;
pmc               252 arch/powerpc/perf/isa207-common.c 	pmc   = (event >> EVENT_PMC_SHIFT)        & EVENT_PMC_MASK;
pmc               257 arch/powerpc/perf/isa207-common.c 	if (pmc) {
pmc               260 arch/powerpc/perf/isa207-common.c 		if (pmc > 6)
pmc               266 arch/powerpc/perf/isa207-common.c 		if (pmc >= 5 && base_event != 0x500fa &&
pmc               270 arch/powerpc/perf/isa207-common.c 		mask  |= CNST_PMC_MASK(pmc);
pmc               271 arch/powerpc/perf/isa207-common.c 		value |= CNST_PMC_VAL(pmc);
pmc               274 arch/powerpc/perf/isa207-common.c 	if (pmc <= 4) {
pmc               291 arch/powerpc/perf/isa207-common.c 			if (pmc == 4)
pmc               338 arch/powerpc/perf/isa207-common.c 	if (!pmc && ebb)
pmc               370 arch/powerpc/perf/isa207-common.c 	unsigned int pmc, pmc_inuse;
pmc               377 arch/powerpc/perf/isa207-common.c 		pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
pmc               378 arch/powerpc/perf/isa207-common.c 		if (pmc)
pmc               379 arch/powerpc/perf/isa207-common.c 			pmc_inuse |= 1 << pmc;
pmc               386 arch/powerpc/perf/isa207-common.c 		pmc     = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
pmc               391 arch/powerpc/perf/isa207-common.c 		if (!pmc) {
pmc               392 arch/powerpc/perf/isa207-common.c 			for (pmc = 1; pmc <= 4; ++pmc) {
pmc               393 arch/powerpc/perf/isa207-common.c 				if (!(pmc_inuse & (1 << pmc)))
pmc               397 arch/powerpc/perf/isa207-common.c 			pmc_inuse |= 1 << pmc;
pmc               400 arch/powerpc/perf/isa207-common.c 		if (pmc <= 4) {
pmc               401 arch/powerpc/perf/isa207-common.c 			mmcr1 |= unit << MMCR1_UNIT_SHIFT(pmc);
pmc               402 arch/powerpc/perf/isa207-common.c 			mmcr1 |= combine << combine_shift(pmc);
pmc               403 arch/powerpc/perf/isa207-common.c 			mmcr1 |= psel << MMCR1_PMCSEL_SHIFT(pmc);
pmc               451 arch/powerpc/perf/isa207-common.c 			mmcr2 |= MMCR2_FCP(pmc);
pmc               454 arch/powerpc/perf/isa207-common.c 			mmcr2 |= MMCR2_FCH(pmc);
pmc               458 arch/powerpc/perf/isa207-common.c 				mmcr2 |= MMCR2_FCH(pmc);
pmc               460 arch/powerpc/perf/isa207-common.c 				mmcr2 |= MMCR2_FCS(pmc);
pmc               463 arch/powerpc/perf/isa207-common.c 		hwc[i] = pmc - 1;
pmc               487 arch/powerpc/perf/isa207-common.c void isa207_disable_pmc(unsigned int pmc, unsigned long mmcr[])
pmc               489 arch/powerpc/perf/isa207-common.c 	if (pmc <= 3)
pmc               490 arch/powerpc/perf/isa207-common.c 		mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1));
pmc               153 arch/powerpc/perf/isa207-common.h #define CNST_PMC_SHIFT(pmc)	((pmc - 1) * 2)
pmc               154 arch/powerpc/perf/isa207-common.h #define CNST_PMC_VAL(pmc)	(1 << CNST_PMC_SHIFT(pmc))
pmc               155 arch/powerpc/perf/isa207-common.h #define CNST_PMC_MASK(pmc)	(2 << CNST_PMC_SHIFT(pmc))
pmc               163 arch/powerpc/perf/isa207-common.h #define MMCR1_UNIT_SHIFT(pmc)		(60 - (4 * ((pmc) - 1)))
pmc               164 arch/powerpc/perf/isa207-common.h #define MMCR1_COMBINE_SHIFT(pmc)	(35 - ((pmc) - 1))
pmc               165 arch/powerpc/perf/isa207-common.h #define MMCR1_PMCSEL_SHIFT(pmc)		(24 - (((pmc) - 1)) * 8)
pmc               171 arch/powerpc/perf/isa207-common.h #define p9_MMCR1_COMBINE_SHIFT(pmc)	(38 - ((pmc - 1) * 2))
pmc               198 arch/powerpc/perf/isa207-common.h #define MMCR2_FCS(pmc)			(1ull << (63 - (((pmc) - 1) * 9)))
pmc               199 arch/powerpc/perf/isa207-common.h #define MMCR2_FCP(pmc)			(1ull << (62 - (((pmc) - 1) * 9)))
pmc               200 arch/powerpc/perf/isa207-common.h #define MMCR2_FCH(pmc)			(1ull << (57 - (((pmc) - 1) * 9)))
pmc               222 arch/powerpc/perf/isa207-common.h void isa207_disable_pmc(unsigned int pmc, unsigned long mmcr[]);
pmc                39 arch/powerpc/perf/mpc7450-pmu.c 	int pmc;
pmc                41 arch/powerpc/perf/mpc7450-pmu.c 	pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
pmc                42 arch/powerpc/perf/mpc7450-pmu.c 	if (pmc) {
pmc                43 arch/powerpc/perf/mpc7450-pmu.c 		if (pmc > N_COUNTER)
pmc                79 arch/powerpc/perf/mpc7450-pmu.c 	int pmc, sel;
pmc                81 arch/powerpc/perf/mpc7450-pmu.c 	pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
pmc                83 arch/powerpc/perf/mpc7450-pmu.c 	switch (pmc) {
pmc               153 arch/powerpc/perf/mpc7450-pmu.c 	int pmc, class;
pmc               161 arch/powerpc/perf/mpc7450-pmu.c 		pmc = ((unsigned int)event >> PM_PMC_SH) & PM_PMC_MSK;
pmc               162 arch/powerpc/perf/mpc7450-pmu.c 		mask  = pmcbits[pmc - 1][0];
pmc               163 arch/powerpc/perf/mpc7450-pmu.c 		value = pmcbits[pmc - 1][1];
pmc               268 arch/powerpc/perf/mpc7450-pmu.c 	u32 ev, pmc, thresh;
pmc               289 arch/powerpc/perf/mpc7450-pmu.c 				pmc = (ev >> PM_PMC_SH) & PM_PMC_MSK;
pmc               290 arch/powerpc/perf/mpc7450-pmu.c 				if (pmc_inuse & (1 << (pmc - 1)))
pmc               297 arch/powerpc/perf/mpc7450-pmu.c 				pmc = ffs(pmc_avail);
pmc               299 arch/powerpc/perf/mpc7450-pmu.c 			pmc_inuse |= 1 << (pmc - 1);
pmc               308 arch/powerpc/perf/mpc7450-pmu.c 			ev &= pmcsel_mask[pmc - 1];
pmc               309 arch/powerpc/perf/mpc7450-pmu.c 			ev <<= pmcsel_shift[pmc - 1];
pmc               310 arch/powerpc/perf/mpc7450-pmu.c 			if (pmc <= 2)
pmc               314 arch/powerpc/perf/mpc7450-pmu.c 			hwc[event_index[class][i]] = pmc - 1;
pmc               334 arch/powerpc/perf/mpc7450-pmu.c static void mpc7450_disable_pmc(unsigned int pmc, unsigned long mmcr[])
pmc               336 arch/powerpc/perf/mpc7450-pmu.c 	if (pmc <= 1)
pmc               337 arch/powerpc/perf/mpc7450-pmu.c 		mmcr[0] &= ~(pmcsel_mask[pmc] << pmcsel_shift[pmc]);
pmc               339 arch/powerpc/perf/mpc7450-pmu.c 		mmcr[1] &= ~(pmcsel_mask[pmc] << pmcsel_shift[pmc]);
pmc               135 arch/powerpc/perf/power5+-pmu.c 	int pmc, byte, unit, sh;
pmc               139 arch/powerpc/perf/power5+-pmu.c 	pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
pmc               140 arch/powerpc/perf/power5+-pmu.c 	if (pmc) {
pmc               141 arch/powerpc/perf/power5+-pmu.c 		if (pmc > 6)
pmc               143 arch/powerpc/perf/power5+-pmu.c 		sh = (pmc - 1) * 2;
pmc               146 arch/powerpc/perf/power5+-pmu.c 		if (pmc >= 5 && !(event == 0x500009 || event == 0x600005))
pmc               177 arch/powerpc/perf/power5+-pmu.c 	if (pmc < 5) {
pmc               189 arch/powerpc/perf/power5+-pmu.c 	int pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
pmc               191 arch/powerpc/perf/power5+-pmu.c 	return pmc == 5 || pmc == 6;
pmc               243 arch/powerpc/perf/power5+-pmu.c 	int pmc, altpmc, pp, j;
pmc               245 arch/powerpc/perf/power5+-pmu.c 	pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
pmc               246 arch/powerpc/perf/power5+-pmu.c 	if (pmc == 0 || pmc > 4)
pmc               248 arch/powerpc/perf/power5+-pmu.c 	altpmc = 5 - pmc;	/* 1 <-> 4, 2 <-> 3 */
pmc               251 arch/powerpc/perf/power5+-pmu.c 		if (bytedecode_alternatives[pmc - 1][j] == pp) {
pmc               259 arch/powerpc/perf/power5+-pmu.c 	if (pmc == 1 && (pp == 0x0d || pp == 0x0e))
pmc               261 arch/powerpc/perf/power5+-pmu.c 	if (pmc == 3 && (pp == 0x2e || pp == 0x2f))
pmc               403 arch/powerpc/perf/power5+-pmu.c 	int pmc, psel;
pmc               407 arch/powerpc/perf/power5+-pmu.c 	pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
pmc               409 arch/powerpc/perf/power5+-pmu.c 	if (pmc >= 5)
pmc               414 arch/powerpc/perf/power5+-pmu.c 		if (direct_event_is_marked[psel] & (1 << pmc))
pmc               419 arch/powerpc/perf/power5+-pmu.c 			bit = pmc - 1;
pmc               421 arch/powerpc/perf/power5+-pmu.c 			bit = 4 - pmc;
pmc               422 arch/powerpc/perf/power5+-pmu.c 		else if (psel == 0x1b && (pmc == 1 || pmc == 3))
pmc               427 arch/powerpc/perf/power5+-pmu.c 		bit = pmc - 1;
pmc               428 arch/powerpc/perf/power5+-pmu.c 	} else if (pmc == 3 && (psel == 0x2e || psel == 0x2f)) {
pmc               455 arch/powerpc/perf/power5+-pmu.c 	unsigned int pmc, unit, byte, psel;
pmc               470 arch/powerpc/perf/power5+-pmu.c 		pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
pmc               471 arch/powerpc/perf/power5+-pmu.c 		if (pmc) {
pmc               472 arch/powerpc/perf/power5+-pmu.c 			if (pmc > 6)
pmc               474 arch/powerpc/perf/power5+-pmu.c 			if (pmc_inuse & (1 << (pmc - 1)))
pmc               476 arch/powerpc/perf/power5+-pmu.c 			pmc_inuse |= 1 << (pmc - 1);
pmc               548 arch/powerpc/perf/power5+-pmu.c 		pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
pmc               553 arch/powerpc/perf/power5+-pmu.c 		if (!pmc) {
pmc               555 arch/powerpc/perf/power5+-pmu.c 			for (pmc = 0; pmc < 4; ++pmc) {
pmc               556 arch/powerpc/perf/power5+-pmu.c 				if (!(pmc_inuse & (1 << pmc)))
pmc               559 arch/powerpc/perf/power5+-pmu.c 			if (pmc >= 4)
pmc               561 arch/powerpc/perf/power5+-pmu.c 			pmc_inuse |= 1 << pmc;
pmc               562 arch/powerpc/perf/power5+-pmu.c 		} else if (pmc <= 4) {
pmc               564 arch/powerpc/perf/power5+-pmu.c 			--pmc;
pmc               568 arch/powerpc/perf/power5+-pmu.c 				mmcr1 |= 1ul << (MMCR1_PMC1_ADDER_SEL_SH - pmc);
pmc               571 arch/powerpc/perf/power5+-pmu.c 			--pmc;
pmc               580 arch/powerpc/perf/power5+-pmu.c 		if ((psel & 0x58) == 0x40 && (byte & 1) != ((pmc >> 1) & 1))
pmc               583 arch/powerpc/perf/power5+-pmu.c 		if (pmc <= 3)
pmc               584 arch/powerpc/perf/power5+-pmu.c 			mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc);
pmc               585 arch/powerpc/perf/power5+-pmu.c 		hwc[i] = pmc;
pmc               599 arch/powerpc/perf/power5+-pmu.c static void power5p_disable_pmc(unsigned int pmc, unsigned long mmcr[])
pmc               601 arch/powerpc/perf/power5+-pmu.c 	if (pmc <= 3)
pmc               602 arch/powerpc/perf/power5+-pmu.c 		mmcr[1] &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc));
pmc               139 arch/powerpc/perf/power5-pmu.c 	int pmc, byte, unit, sh;
pmc               144 arch/powerpc/perf/power5-pmu.c 	pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
pmc               145 arch/powerpc/perf/power5-pmu.c 	if (pmc) {
pmc               146 arch/powerpc/perf/power5-pmu.c 		if (pmc > 6)
pmc               148 arch/powerpc/perf/power5-pmu.c 		sh = (pmc - 1) * 2;
pmc               151 arch/powerpc/perf/power5-pmu.c 		if (pmc <= 4)
pmc               152 arch/powerpc/perf/power5-pmu.c 			grp = (pmc - 1) >> 1;
pmc               184 arch/powerpc/perf/power5-pmu.c 		if (!pmc)
pmc               199 arch/powerpc/perf/power5-pmu.c 	if (pmc < 5) {
pmc               251 arch/powerpc/perf/power5-pmu.c 	int pmc, altpmc, pp, j;
pmc               253 arch/powerpc/perf/power5-pmu.c 	pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
pmc               254 arch/powerpc/perf/power5-pmu.c 	if (pmc == 0 || pmc > 4)
pmc               256 arch/powerpc/perf/power5-pmu.c 	altpmc = 5 - pmc;	/* 1 <-> 4, 2 <-> 3 */
pmc               259 arch/powerpc/perf/power5-pmu.c 		if (bytedecode_alternatives[pmc - 1][j] == pp) {
pmc               339 arch/powerpc/perf/power5-pmu.c 	int pmc, psel;
pmc               343 arch/powerpc/perf/power5-pmu.c 	pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
pmc               345 arch/powerpc/perf/power5-pmu.c 	if (pmc >= 5)
pmc               350 arch/powerpc/perf/power5-pmu.c 		if (direct_event_is_marked[psel] & (1 << pmc))
pmc               355 arch/powerpc/perf/power5-pmu.c 			bit = pmc - 1;
pmc               357 arch/powerpc/perf/power5-pmu.c 			bit = 4 - pmc;
pmc               358 arch/powerpc/perf/power5-pmu.c 		else if (psel == 0x1b && (pmc == 1 || pmc == 3))
pmc               386 arch/powerpc/perf/power5-pmu.c 	unsigned int pmc, unit, byte, psel;
pmc               403 arch/powerpc/perf/power5-pmu.c 		pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
pmc               404 arch/powerpc/perf/power5-pmu.c 		if (pmc) {
pmc               405 arch/powerpc/perf/power5-pmu.c 			if (pmc > 6)
pmc               407 arch/powerpc/perf/power5-pmu.c 			if (pmc_inuse & (1 << (pmc - 1)))
pmc               409 arch/powerpc/perf/power5-pmu.c 			pmc_inuse |= 1 << (pmc - 1);
pmc               411 arch/powerpc/perf/power5-pmu.c 			if (pmc <= 4)
pmc               412 arch/powerpc/perf/power5-pmu.c 				++pmc_grp_use[(pmc - 1) >> 1];
pmc               427 arch/powerpc/perf/power5-pmu.c 			if (!pmc)
pmc               488 arch/powerpc/perf/power5-pmu.c 		pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
pmc               493 arch/powerpc/perf/power5-pmu.c 		if (!pmc) {
pmc               495 arch/powerpc/perf/power5-pmu.c 			for (pmc = 0; pmc < 4; ++pmc) {
pmc               496 arch/powerpc/perf/power5-pmu.c 				if (pmc_inuse & (1 << pmc))
pmc               498 arch/powerpc/perf/power5-pmu.c 				grp = (pmc >> 1) & 1;
pmc               507 arch/powerpc/perf/power5-pmu.c 			pmc_inuse |= 1 << pmc;
pmc               508 arch/powerpc/perf/power5-pmu.c 		} else if (pmc <= 4) {
pmc               510 arch/powerpc/perf/power5-pmu.c 			--pmc;
pmc               513 arch/powerpc/perf/power5-pmu.c 				mmcr1 |= 1ul << (MMCR1_PMC1_ADDER_SEL_SH - pmc);
pmc               516 arch/powerpc/perf/power5-pmu.c 			--pmc;
pmc               525 arch/powerpc/perf/power5-pmu.c 		if (pmc <= 3)
pmc               526 arch/powerpc/perf/power5-pmu.c 			mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc);
pmc               527 arch/powerpc/perf/power5-pmu.c 		hwc[i] = pmc;
pmc               541 arch/powerpc/perf/power5-pmu.c static void power5_disable_pmc(unsigned int pmc, unsigned long mmcr[])
pmc               543 arch/powerpc/perf/power5-pmu.c 	if (pmc <= 3)
pmc               544 arch/powerpc/perf/power5-pmu.c 		mmcr[1] &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc));
pmc               137 arch/powerpc/perf/power6-pmu.c 	int pmc, psel, ptype;
pmc               141 arch/powerpc/perf/power6-pmu.c 	pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
pmc               143 arch/powerpc/perf/power6-pmu.c 	if (pmc >= 5)
pmc               149 arch/powerpc/perf/power6-pmu.c 		if (pmc == 0 || !(ptype & (1 << (pmc - 1))))
pmc               157 arch/powerpc/perf/power6-pmu.c 			bit = ptype ^ (pmc - 1);
pmc               179 arch/powerpc/perf/power6-pmu.c 	unsigned int pmc, ev, b, u, s, psel;
pmc               186 arch/powerpc/perf/power6-pmu.c 		pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
pmc               187 arch/powerpc/perf/power6-pmu.c 		if (pmc) {
pmc               188 arch/powerpc/perf/power6-pmu.c 			if (pmc_inuse & (1 << (pmc - 1)))
pmc               190 arch/powerpc/perf/power6-pmu.c 			pmc_inuse |= 1 << (pmc - 1);
pmc               195 arch/powerpc/perf/power6-pmu.c 		pmc = (ev >> PM_PMC_SH) & PM_PMC_MSK;
pmc               196 arch/powerpc/perf/power6-pmu.c 		if (pmc) {
pmc               197 arch/powerpc/perf/power6-pmu.c 			--pmc;
pmc               200 arch/powerpc/perf/power6-pmu.c 			for (pmc = 0; pmc < 4; ++pmc)
pmc               201 arch/powerpc/perf/power6-pmu.c 				if (!(pmc_inuse & (1 << pmc)))
pmc               203 arch/powerpc/perf/power6-pmu.c 			if (pmc >= 4)
pmc               205 arch/powerpc/perf/power6-pmu.c 			pmc_inuse |= 1 << pmc;
pmc               207 arch/powerpc/perf/power6-pmu.c 		hwc[i] = pmc;
pmc               230 arch/powerpc/perf/power6-pmu.c 					mmcr1 |= MMCR1_PMC1_ADDR_SEL >> pmc;
pmc               233 arch/powerpc/perf/power6-pmu.c 			if (pmc >= 2 && (psel & 0x90) == 0x80)
pmc               237 arch/powerpc/perf/power6-pmu.c 			mmcr1 |= MMCR1_PMC1_LLA >> pmc;
pmc               239 arch/powerpc/perf/power6-pmu.c 				mmcr1 |= MMCR1_PMC1_LLA_VALUE >> pmc;
pmc               243 arch/powerpc/perf/power6-pmu.c 		if (pmc < 4)
pmc               244 arch/powerpc/perf/power6-pmu.c 			mmcr1 |= (unsigned long)psel << MMCR1_PMCSEL_SH(pmc);
pmc               269 arch/powerpc/perf/power6-pmu.c 	int pmc, byte, sh, subunit;
pmc               272 arch/powerpc/perf/power6-pmu.c 	pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
pmc               273 arch/powerpc/perf/power6-pmu.c 	if (pmc) {
pmc               274 arch/powerpc/perf/power6-pmu.c 		if (pmc > 4 && !(event == 0x500009 || event == 0x600005))
pmc               276 arch/powerpc/perf/power6-pmu.c 		sh = (pmc - 1) * 2;
pmc               291 arch/powerpc/perf/power6-pmu.c 	if (pmc <= 4) {
pmc               302 arch/powerpc/perf/power6-pmu.c 	int pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
pmc               304 arch/powerpc/perf/power6-pmu.c 	return pmc == 5 || pmc == 6;
pmc               361 arch/powerpc/perf/power6-pmu.c 	unsigned int psel, pmc;
pmc               385 arch/powerpc/perf/power6-pmu.c 		pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
pmc               386 arch/powerpc/perf/power6-pmu.c 		if (pmc && (psel == 0x32 || psel == 0x34))
pmc               388 arch/powerpc/perf/power6-pmu.c 				((5 - pmc) << PM_PMC_SH);
pmc               391 arch/powerpc/perf/power6-pmu.c 		if (pmc && (psel == 0x38 || psel == 0x3a))
pmc               393 arch/powerpc/perf/power6-pmu.c 				((pmc > 2? pmc - 2: pmc + 2) << PM_PMC_SH);
pmc               460 arch/powerpc/perf/power6-pmu.c static void p6_disable_pmc(unsigned int pmc, unsigned long mmcr[])
pmc               463 arch/powerpc/perf/power6-pmu.c 	if (pmc <= 3)
pmc               464 arch/powerpc/perf/power6-pmu.c 		mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc));
pmc                84 arch/powerpc/perf/power7-pmu.c 	int pmc, sh, unit;
pmc                87 arch/powerpc/perf/power7-pmu.c 	pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
pmc                88 arch/powerpc/perf/power7-pmu.c 	if (pmc) {
pmc                89 arch/powerpc/perf/power7-pmu.c 		if (pmc > 6)
pmc                91 arch/powerpc/perf/power7-pmu.c 		sh = (pmc - 1) * 2;
pmc                94 arch/powerpc/perf/power7-pmu.c 		if (pmc >= 5 && !(event == 0x500fa || event == 0x600f4))
pmc                97 arch/powerpc/perf/power7-pmu.c 	if (pmc < 5) {
pmc               144 arch/powerpc/perf/power7-pmu.c 	int pmc, psel;
pmc               147 arch/powerpc/perf/power7-pmu.c 	pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
pmc               149 arch/powerpc/perf/power7-pmu.c 	if ((pmc == 2 || pmc == 4) && (psel & ~7) == 0x40)
pmc               151 arch/powerpc/perf/power7-pmu.c 	if ((pmc == 1 || pmc == 3) && (psel & ~7) == 0x48)
pmc               213 arch/powerpc/perf/power7-pmu.c 	int pmc, psel;
pmc               216 arch/powerpc/perf/power7-pmu.c 	pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
pmc               219 arch/powerpc/perf/power7-pmu.c 	if (pmc >= 5)
pmc               224 arch/powerpc/perf/power7-pmu.c 		return pmc == 2 || pmc == 4;
pmc               227 arch/powerpc/perf/power7-pmu.c 			return pmc == 1;
pmc               229 arch/powerpc/perf/power7-pmu.c 			return pmc != 2;
pmc               236 arch/powerpc/perf/power7-pmu.c 			return pmc >= 3;
pmc               249 arch/powerpc/perf/power7-pmu.c 	unsigned int pmc, unit, combine, l2sel, psel;
pmc               255 arch/powerpc/perf/power7-pmu.c 		pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
pmc               256 arch/powerpc/perf/power7-pmu.c 		if (pmc) {
pmc               257 arch/powerpc/perf/power7-pmu.c 			if (pmc > 6)
pmc               259 arch/powerpc/perf/power7-pmu.c 			if (pmc_inuse & (1 << (pmc - 1)))
pmc               261 arch/powerpc/perf/power7-pmu.c 			pmc_inuse |= 1 << (pmc - 1);
pmc               267 arch/powerpc/perf/power7-pmu.c 		pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
pmc               272 arch/powerpc/perf/power7-pmu.c 		if (!pmc) {
pmc               274 arch/powerpc/perf/power7-pmu.c 			for (pmc = 0; pmc < 4; ++pmc) {
pmc               275 arch/powerpc/perf/power7-pmu.c 				if (!(pmc_inuse & (1 << pmc)))
pmc               278 arch/powerpc/perf/power7-pmu.c 			if (pmc >= 4)
pmc               280 arch/powerpc/perf/power7-pmu.c 			pmc_inuse |= 1 << pmc;
pmc               283 arch/powerpc/perf/power7-pmu.c 			--pmc;
pmc               285 arch/powerpc/perf/power7-pmu.c 		if (pmc <= 3) {
pmc               287 arch/powerpc/perf/power7-pmu.c 				<< (MMCR1_TTM0SEL_SH - 4 * pmc);
pmc               289 arch/powerpc/perf/power7-pmu.c 				<< (MMCR1_PMC1_COMBINE_SH - pmc);
pmc               290 arch/powerpc/perf/power7-pmu.c 			mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc);
pmc               297 arch/powerpc/perf/power7-pmu.c 		hwc[i] = pmc;
pmc               311 arch/powerpc/perf/power7-pmu.c static void power7_disable_pmc(unsigned int pmc, unsigned long mmcr[])
pmc               313 arch/powerpc/perf/power7-pmu.c 	if (pmc <= 3)
pmc               314 arch/powerpc/perf/power7-pmu.c 		mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc));
pmc               216 arch/powerpc/perf/power9-pmu.c PMU_FORMAT_ATTR(pmc,		"config:16-19");
pmc               145 arch/powerpc/perf/ppc970-pmu.c 	int pmc, psel, unit, byte, bit;
pmc               148 arch/powerpc/perf/ppc970-pmu.c 	pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
pmc               150 arch/powerpc/perf/ppc970-pmu.c 	if (pmc) {
pmc               151 arch/powerpc/perf/ppc970-pmu.c 		if (direct_marked_event[pmc - 1] & (1 << psel))
pmc               154 arch/powerpc/perf/ppc970-pmu.c 			bit = (pmc <= 4)? pmc - 1: 8 - pmc;
pmc               193 arch/powerpc/perf/ppc970-pmu.c 	int pmc, byte, unit, sh, spcsel;
pmc               197 arch/powerpc/perf/ppc970-pmu.c 	pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
pmc               198 arch/powerpc/perf/ppc970-pmu.c 	if (pmc) {
pmc               199 arch/powerpc/perf/ppc970-pmu.c 		if (pmc > 8)
pmc               201 arch/powerpc/perf/ppc970-pmu.c 		sh = (pmc - 1) * 2;
pmc               204 arch/powerpc/perf/ppc970-pmu.c 		grp = ((pmc - 1) >> 1) & 1;
pmc               217 arch/powerpc/perf/ppc970-pmu.c 		if (!pmc)
pmc               259 arch/powerpc/perf/ppc970-pmu.c 	unsigned int pmc, unit, byte, psel;
pmc               279 arch/powerpc/perf/ppc970-pmu.c 		pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
pmc               280 arch/powerpc/perf/ppc970-pmu.c 		if (pmc) {
pmc               281 arch/powerpc/perf/ppc970-pmu.c 			if (pmc_inuse & (1 << (pmc - 1)))
pmc               283 arch/powerpc/perf/ppc970-pmu.c 			pmc_inuse |= 1 << (pmc - 1);
pmc               285 arch/powerpc/perf/ppc970-pmu.c 			++pmc_grp_use[((pmc - 1) >> 1) & 1];
pmc               292 arch/powerpc/perf/ppc970-pmu.c 			if (!pmc)
pmc               346 arch/powerpc/perf/ppc970-pmu.c 		pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
pmc               350 arch/powerpc/perf/ppc970-pmu.c 		if (!pmc) {
pmc               356 arch/powerpc/perf/ppc970-pmu.c 			for (pmc = 0; pmc < 8; ++pmc) {
pmc               357 arch/powerpc/perf/ppc970-pmu.c 				if (pmc_inuse & (1 << pmc))
pmc               359 arch/powerpc/perf/ppc970-pmu.c 				grp = (pmc >> 1) & 1;
pmc               368 arch/powerpc/perf/ppc970-pmu.c 			pmc_inuse |= 1 << pmc;
pmc               371 arch/powerpc/perf/ppc970-pmu.c 			--pmc;
pmc               374 arch/powerpc/perf/ppc970-pmu.c 				mmcr1 |= 1ull << mmcr1_adder_bits[pmc];
pmc               376 arch/powerpc/perf/ppc970-pmu.c 		pmcsel[pmc] = psel;
pmc               377 arch/powerpc/perf/ppc970-pmu.c 		hwc[i] = pmc;
pmc               383 arch/powerpc/perf/ppc970-pmu.c 	for (pmc = 0; pmc < 2; ++pmc)
pmc               384 arch/powerpc/perf/ppc970-pmu.c 		mmcr0 |= pmcsel[pmc] << (MMCR0_PMC1SEL_SH - 7 * pmc);
pmc               385 arch/powerpc/perf/ppc970-pmu.c 	for (; pmc < 8; ++pmc)
pmc               386 arch/powerpc/perf/ppc970-pmu.c 		mmcr1 |= (unsigned long)pmcsel[pmc]
pmc               387 arch/powerpc/perf/ppc970-pmu.c 			<< (MMCR1_PMC3SEL_SH - 5 * (pmc - 2));
pmc               402 arch/powerpc/perf/ppc970-pmu.c static void p970_disable_pmc(unsigned int pmc, unsigned long mmcr[])
pmc               406 arch/powerpc/perf/ppc970-pmu.c 	if (pmc <= 1) {
pmc               407 arch/powerpc/perf/ppc970-pmu.c 		shift = MMCR0_PMC1SEL_SH - 7 * pmc;
pmc               410 arch/powerpc/perf/ppc970-pmu.c 		shift = MMCR1_PMC3SEL_SH - 5 * (pmc - 2);
pmc               214 arch/x86/include/asm/kvm_emulate.h 	int (*check_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc);
pmc               215 arch/x86/include/asm/kvm_emulate.h 	int (*read_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata);
pmc              3637 arch/x86/kvm/emulate.c 	u64 pmc;
pmc              3639 arch/x86/kvm/emulate.c 	if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
pmc              3641 arch/x86/kvm/emulate.c 	*reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
pmc              3642 arch/x86/kvm/emulate.c 	*reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
pmc                62 arch/x86/kvm/pmu.c 	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
pmc                63 arch/x86/kvm/pmu.c 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
pmc                65 arch/x86/kvm/pmu.c 	if (!test_and_set_bit(pmc->idx,
pmc                67 arch/x86/kvm/pmu.c 		__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
pmc                68 arch/x86/kvm/pmu.c 		kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
pmc                76 arch/x86/kvm/pmu.c 	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
pmc                77 arch/x86/kvm/pmu.c 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
pmc                79 arch/x86/kvm/pmu.c 	if (!test_and_set_bit(pmc->idx,
pmc                81 arch/x86/kvm/pmu.c 		__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
pmc                82 arch/x86/kvm/pmu.c 		kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
pmc                93 arch/x86/kvm/pmu.c 			irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
pmc                95 arch/x86/kvm/pmu.c 			kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
pmc                99 arch/x86/kvm/pmu.c static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
pmc               116 arch/x86/kvm/pmu.c 	attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
pmc               132 arch/x86/kvm/pmu.c 						 kvm_perf_overflow, pmc);
pmc               135 arch/x86/kvm/pmu.c 			    PTR_ERR(event), pmc->idx);
pmc               139 arch/x86/kvm/pmu.c 	pmc->perf_event = event;
pmc               140 arch/x86/kvm/pmu.c 	clear_bit(pmc->idx, (unsigned long*)&pmc_to_pmu(pmc)->reprogram_pmi);
pmc               143 arch/x86/kvm/pmu.c void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
pmc               147 arch/x86/kvm/pmu.c 	struct kvm *kvm = pmc->vcpu->kvm;
pmc               155 arch/x86/kvm/pmu.c 	pmc->eventsel = eventsel;
pmc               157 arch/x86/kvm/pmu.c 	pmc_stop_counter(pmc);
pmc               159 arch/x86/kvm/pmu.c 	if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
pmc               186 arch/x86/kvm/pmu.c 		config = kvm_x86_ops->pmu_ops->find_arch_event(pmc_to_pmu(pmc),
pmc               196 arch/x86/kvm/pmu.c 	pmc_reprogram_counter(pmc, type, config,
pmc               205 arch/x86/kvm/pmu.c void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
pmc               210 arch/x86/kvm/pmu.c 	struct kvm *kvm = pmc->vcpu->kvm;
pmc               212 arch/x86/kvm/pmu.c 	pmc_stop_counter(pmc);
pmc               214 arch/x86/kvm/pmu.c 	if (!en_field || !pmc_is_enabled(pmc))
pmc               227 arch/x86/kvm/pmu.c 	pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
pmc               237 arch/x86/kvm/pmu.c 	struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx);
pmc               239 arch/x86/kvm/pmu.c 	if (!pmc)
pmc               242 arch/x86/kvm/pmu.c 	if (pmc_is_gp(pmc))
pmc               243 arch/x86/kvm/pmu.c 		reprogram_gp_counter(pmc, pmc->eventsel);
pmc               248 arch/x86/kvm/pmu.c 		reprogram_fixed_counter(pmc, ctrl, idx);
pmc               262 arch/x86/kvm/pmu.c 		struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, bit);
pmc               264 arch/x86/kvm/pmu.c 		if (unlikely(!pmc || !pmc->perf_event)) {
pmc               317 arch/x86/kvm/pmu.c 	struct kvm_pmc *pmc;
pmc               326 arch/x86/kvm/pmu.c 	pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx, &mask);
pmc               327 arch/x86/kvm/pmu.c 	if (!pmc)
pmc               330 arch/x86/kvm/pmu.c 	*data = pmc_read_counter(pmc) & mask;
pmc                 9 arch/x86/kvm/pmu.h #define pmc_to_pmu(pmc)   (&(pmc)->vcpu->arch.pmu)
pmc                28 arch/x86/kvm/pmu.h 	bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
pmc                41 arch/x86/kvm/pmu.h static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
pmc                43 arch/x86/kvm/pmu.h 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
pmc                45 arch/x86/kvm/pmu.h 	return pmu->counter_bitmask[pmc->type];
pmc                48 arch/x86/kvm/pmu.h static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
pmc                52 arch/x86/kvm/pmu.h 	counter = pmc->counter;
pmc                53 arch/x86/kvm/pmu.h 	if (pmc->perf_event)
pmc                54 arch/x86/kvm/pmu.h 		counter += perf_event_read_value(pmc->perf_event,
pmc                57 arch/x86/kvm/pmu.h 	return counter & pmc_bitmask(pmc);
pmc                60 arch/x86/kvm/pmu.h static inline void pmc_stop_counter(struct kvm_pmc *pmc)
pmc                62 arch/x86/kvm/pmu.h 	if (pmc->perf_event) {
pmc                63 arch/x86/kvm/pmu.h 		pmc->counter = pmc_read_counter(pmc);
pmc                64 arch/x86/kvm/pmu.h 		perf_event_release_kernel(pmc->perf_event);
pmc                65 arch/x86/kvm/pmu.h 		pmc->perf_event = NULL;
pmc                69 arch/x86/kvm/pmu.h static inline bool pmc_is_gp(struct kvm_pmc *pmc)
pmc                71 arch/x86/kvm/pmu.h 	return pmc->type == KVM_PMC_GP;
pmc                74 arch/x86/kvm/pmu.h static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
pmc                76 arch/x86/kvm/pmu.h 	return pmc->type == KVM_PMC_FIXED;
pmc                79 arch/x86/kvm/pmu.h static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
pmc                81 arch/x86/kvm/pmu.h 	return kvm_x86_ops->pmu_ops->pmc_is_enabled(pmc);
pmc               116 arch/x86/kvm/pmu.h void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
pmc               117 arch/x86/kvm/pmu.h void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
pmc               122 arch/x86/kvm/pmu.h int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
pmc               155 arch/x86/kvm/pmu_amd.c static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
pmc               214 arch/x86/kvm/pmu_amd.c 	struct kvm_pmc *pmc;
pmc               217 arch/x86/kvm/pmu_amd.c 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
pmc               218 arch/x86/kvm/pmu_amd.c 	if (pmc) {
pmc               219 arch/x86/kvm/pmu_amd.c 		*data = pmc_read_counter(pmc);
pmc               223 arch/x86/kvm/pmu_amd.c 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
pmc               224 arch/x86/kvm/pmu_amd.c 	if (pmc) {
pmc               225 arch/x86/kvm/pmu_amd.c 		*data = pmc->eventsel;
pmc               235 arch/x86/kvm/pmu_amd.c 	struct kvm_pmc *pmc;
pmc               240 arch/x86/kvm/pmu_amd.c 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
pmc               241 arch/x86/kvm/pmu_amd.c 	if (pmc) {
pmc               242 arch/x86/kvm/pmu_amd.c 		pmc->counter += data - pmc_read_counter(pmc);
pmc               246 arch/x86/kvm/pmu_amd.c 	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
pmc               247 arch/x86/kvm/pmu_amd.c 	if (pmc) {
pmc               248 arch/x86/kvm/pmu_amd.c 		if (data == pmc->eventsel)
pmc               251 arch/x86/kvm/pmu_amd.c 			reprogram_gp_counter(pmc, data);
pmc               297 arch/x86/kvm/pmu_amd.c 		struct kvm_pmc *pmc = &pmu->gp_counters[i];
pmc               299 arch/x86/kvm/pmu_amd.c 		pmc_stop_counter(pmc);
pmc               300 arch/x86/kvm/pmu_amd.c 		pmc->counter = pmc->eventsel = 0;
pmc                42 arch/x86/kvm/vmx/pmu_intel.c 		struct kvm_pmc *pmc;
pmc                44 arch/x86/kvm/vmx/pmu_intel.c 		pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
pmc                49 arch/x86/kvm/vmx/pmu_intel.c 		reprogram_fixed_counter(pmc, new_ctrl, i);
pmc                98 arch/x86/kvm/vmx/pmu_intel.c static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
pmc               100 arch/x86/kvm/vmx/pmu_intel.c 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
pmc               102 arch/x86/kvm/vmx/pmu_intel.c 	return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
pmc               176 arch/x86/kvm/vmx/pmu_intel.c 	struct kvm_pmc *pmc;
pmc               192 arch/x86/kvm/vmx/pmu_intel.c 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
pmc               193 arch/x86/kvm/vmx/pmu_intel.c 			u64 val = pmc_read_counter(pmc);
pmc               196 arch/x86/kvm/vmx/pmu_intel.c 		} else if ((pmc = get_fixed_pmc(pmu, msr))) {
pmc               197 arch/x86/kvm/vmx/pmu_intel.c 			u64 val = pmc_read_counter(pmc);
pmc               200 arch/x86/kvm/vmx/pmu_intel.c 		} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
pmc               201 arch/x86/kvm/vmx/pmu_intel.c 			*data = pmc->eventsel;
pmc               212 arch/x86/kvm/vmx/pmu_intel.c 	struct kvm_pmc *pmc;
pmc               248 arch/x86/kvm/vmx/pmu_intel.c 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
pmc               250 arch/x86/kvm/vmx/pmu_intel.c 				pmc->counter = data;
pmc               252 arch/x86/kvm/vmx/pmu_intel.c 				pmc->counter = (s32)data;
pmc               254 arch/x86/kvm/vmx/pmu_intel.c 		} else if ((pmc = get_fixed_pmc(pmu, msr))) {
pmc               255 arch/x86/kvm/vmx/pmu_intel.c 			pmc->counter = data;
pmc               257 arch/x86/kvm/vmx/pmu_intel.c 		} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
pmc               258 arch/x86/kvm/vmx/pmu_intel.c 			if (data == pmc->eventsel)
pmc               261 arch/x86/kvm/vmx/pmu_intel.c 				reprogram_gp_counter(pmc, data);
pmc               351 arch/x86/kvm/vmx/pmu_intel.c 	struct kvm_pmc *pmc = NULL;
pmc               355 arch/x86/kvm/vmx/pmu_intel.c 		pmc = &pmu->gp_counters[i];
pmc               357 arch/x86/kvm/vmx/pmu_intel.c 		pmc_stop_counter(pmc);
pmc               358 arch/x86/kvm/vmx/pmu_intel.c 		pmc->counter = pmc->eventsel = 0;
pmc               362 arch/x86/kvm/vmx/pmu_intel.c 		pmc = &pmu->fixed_counters[i];
pmc               364 arch/x86/kvm/vmx/pmu_intel.c 		pmc_stop_counter(pmc);
pmc               365 arch/x86/kvm/vmx/pmu_intel.c 		pmc->counter = 0;
pmc              6212 arch/x86/kvm/x86.c 			      u32 pmc)
pmc              6214 arch/x86/kvm/x86.c 	return kvm_pmu_is_valid_msr_idx(emul_to_vcpu(ctxt), pmc);
pmc              6218 arch/x86/kvm/x86.c 			     u32 pmc, u64 *pdata)
pmc              6220 arch/x86/kvm/x86.c 	return kvm_pmu_rdpmc(emul_to_vcpu(ctxt), pmc, pdata);
pmc               233 drivers/clk/tegra/clk-pll.c #define pll_override_readl(offset, p) readl_relaxed(p->pmc + offset)
pmc               240 drivers/clk/tegra/clk-pll.c #define pll_override_writel(val, offset, p) writel(val, p->pmc + offset)
pmc               336 drivers/clk/tegra/clk-pll.c 		val = readl_relaxed(pll->pmc + PMC_PLLP_WB0_OVERRIDE);
pmc               373 drivers/clk/tegra/clk-pll.c 		val = readl_relaxed(pll->pmc + PMC_PLLP_WB0_OVERRIDE);
pmc               375 drivers/clk/tegra/clk-pll.c 		writel_relaxed(val, pll->pmc + PMC_PLLP_WB0_OVERRIDE);
pmc               391 drivers/clk/tegra/clk-pll.c 		val = readl_relaxed(pll->pmc + PMC_PLLP_WB0_OVERRIDE);
pmc               393 drivers/clk/tegra/clk-pll.c 		writel_relaxed(val, pll->pmc + PMC_PLLP_WB0_OVERRIDE);
pmc               896 drivers/clk/tegra/clk-pll.c 	if (!pll->pmc)
pmc               903 drivers/clk/tegra/clk-pll.c 	val = readl(pll->pmc + PMC_SATA_PWRGT);
pmc               905 drivers/clk/tegra/clk-pll.c 	writel(val, pll->pmc + PMC_SATA_PWRGT);
pmc               907 drivers/clk/tegra/clk-pll.c 	val = readl(pll->pmc + PMC_SATA_PWRGT);
pmc               909 drivers/clk/tegra/clk-pll.c 	writel(val, pll->pmc + PMC_SATA_PWRGT);
pmc               911 drivers/clk/tegra/clk-pll.c 	val = readl(pll->pmc + PMC_SATA_PWRGT);
pmc               913 drivers/clk/tegra/clk-pll.c 	writel(val, pll->pmc + PMC_SATA_PWRGT);
pmc              1808 drivers/clk/tegra/clk-pll.c 		void __iomem *pmc, struct tegra_clk_pll_params *pll_params,
pmc              1818 drivers/clk/tegra/clk-pll.c 	pll->pmc = pmc;
pmc              1859 drivers/clk/tegra/clk-pll.c 		void __iomem *clk_base, void __iomem *pmc,
pmc              1868 drivers/clk/tegra/clk-pll.c 	pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
pmc              1890 drivers/clk/tegra/clk-pll.c 		void __iomem *clk_base, void __iomem *pmc,
pmc              1902 drivers/clk/tegra/clk-pll.c 	pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
pmc              1981 drivers/clk/tegra/clk-pll.c 			  void __iomem *clk_base, void __iomem *pmc,
pmc              2032 drivers/clk/tegra/clk-pll.c 	pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
pmc              2045 drivers/clk/tegra/clk-pll.c 			  void __iomem *clk_base, void __iomem *pmc,
pmc              2060 drivers/clk/tegra/clk-pll.c 	pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
pmc              2094 drivers/clk/tegra/clk-pll.c 			  void __iomem *clk_base, void __iomem *pmc,
pmc              2123 drivers/clk/tegra/clk-pll.c 	pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
pmc              2136 drivers/clk/tegra/clk-pll.c 			  void __iomem *clk_base, void __iomem *pmc,
pmc              2162 drivers/clk/tegra/clk-pll.c 	pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
pmc              2365 drivers/clk/tegra/clk-pll.c 			  void __iomem *pmc, unsigned long flags,
pmc              2378 drivers/clk/tegra/clk-pll.c 	pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
pmc              2570 drivers/clk/tegra/clk-pll.c 			void __iomem *pmc, unsigned long flags,
pmc              2598 drivers/clk/tegra/clk-pll.c 	pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
pmc              2660 drivers/clk/tegra/clk-pll.c 			  void __iomem *clk_base, void __iomem *pmc,
pmc              2689 drivers/clk/tegra/clk-pll.c 	pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
pmc               917 drivers/clk/tegra/clk-tegra114.c 				     void __iomem *pmc)
pmc               923 drivers/clk/tegra/clk-tegra114.c 			pmc, 0, &pll_c_params, NULL);
pmc               936 drivers/clk/tegra/clk-tegra114.c 	clk = tegra_clk_register_pllc("pll_c2", "pll_ref", clk_base, pmc, 0,
pmc               941 drivers/clk/tegra/clk-tegra114.c 	clk = tegra_clk_register_pllc("pll_c3", "pll_ref", clk_base, pmc, 0,
pmc               946 drivers/clk/tegra/clk-tegra114.c 	clk = tegra_clk_register_pllm("pll_m", "pll_ref", clk_base, pmc,
pmc               990 drivers/clk/tegra/clk-tegra114.c 	clk = tegra_clk_register_pll("pll_d", "pll_ref", clk_base, pmc, 0,
pmc              1000 drivers/clk/tegra/clk-tegra114.c 	clk = tegra_clk_register_pll("pll_d2", "pll_ref", clk_base, pmc, 0,
pmc              1010 drivers/clk/tegra/clk-tegra114.c 	clk = tegra_clk_register_pllre("pll_re_vco", "pll_ref", clk_base, pmc,
pmc              1040 drivers/clk/tegra/clk-tegra124.c 				     void __iomem *pmc)
pmc              1046 drivers/clk/tegra/clk-tegra124.c 			pmc, 0, &pll_c_params, NULL);
pmc              1067 drivers/clk/tegra/clk-tegra124.c 	clk = tegra_clk_register_pllc("pll_c2", "pll_ref", clk_base, pmc, 0,
pmc              1073 drivers/clk/tegra/clk-tegra124.c 	clk = tegra_clk_register_pllc("pll_c3", "pll_ref", clk_base, pmc, 0,
pmc              1079 drivers/clk/tegra/clk-tegra124.c 	clk = tegra_clk_register_pllm("pll_m", "pll_ref", clk_base, pmc,
pmc              1132 drivers/clk/tegra/clk-tegra124.c 	clk = tegra_clk_register_pll("pll_d", "pll_ref", clk_base, pmc, 0,
pmc              1144 drivers/clk/tegra/clk-tegra124.c 	clk = tegra_clk_register_pllre("pll_re_vco", "pll_ref", clk_base, pmc,
pmc              3056 drivers/clk/tegra/clk-tegra210.c 				     void __iomem *pmc)
pmc              3062 drivers/clk/tegra/clk-tegra210.c 			pmc, 0, &pll_c_params, NULL);
pmc              3085 drivers/clk/tegra/clk-tegra210.c 			     pmc, 0, &pll_c2_params, NULL);
pmc              3091 drivers/clk/tegra/clk-tegra210.c 			     pmc, 0, &pll_c3_params, NULL);
pmc              3096 drivers/clk/tegra/clk-tegra210.c 	clk = tegra_clk_register_pllm("pll_m", "osc", clk_base, pmc,
pmc              3102 drivers/clk/tegra/clk-tegra210.c 	clk = tegra_clk_register_pllmb("pll_mb", "osc", clk_base, pmc,
pmc              3172 drivers/clk/tegra/clk-tegra210.c 	clk = tegra_clk_register_pll("pll_d", "pll_ref", clk_base, pmc, 0,
pmc              3185 drivers/clk/tegra/clk-tegra210.c 						clk_base, pmc, 0,
pmc              3213 drivers/clk/tegra/clk-tegra210.c 	clk = tegra_clk_register_pllre("pll_c4_vco", "pll_ref", clk_base, pmc,
pmc               315 drivers/clk/tegra/clk.h 	void __iomem	*pmc;
pmc               340 drivers/clk/tegra/clk.h 		void __iomem *clk_base, void __iomem *pmc,
pmc               345 drivers/clk/tegra/clk.h 		void __iomem *clk_base, void __iomem *pmc,
pmc               350 drivers/clk/tegra/clk.h 			    void __iomem *clk_base, void __iomem *pmc,
pmc               356 drivers/clk/tegra/clk.h 			   void __iomem *clk_base, void __iomem *pmc,
pmc               362 drivers/clk/tegra/clk.h 			   void __iomem *clk_base, void __iomem *pmc,
pmc               368 drivers/clk/tegra/clk.h 			   void __iomem *clk_base, void __iomem *pmc,
pmc               375 drivers/clk/tegra/clk.h 			   void __iomem *pmc, unsigned long flags,
pmc               393 drivers/clk/tegra/clk.h 				void __iomem *pmc, unsigned long flags,
pmc               409 drivers/clk/tegra/clk.h 			   void __iomem *clk_base, void __iomem *pmc,
pmc               553 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 	u32 pmc, ppc;
pmc               566 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 	pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS);
pmc               572 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 		pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA;
pmc               582 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 		pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE;
pmc               585 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 		pmc |= BIT(15); /* Unknown bit */
pmc               590 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 		pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX;
pmc               592 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 	pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ;
pmc               593 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 	pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ;
pmc               595 drivers/gpu/drm/etnaviv/etnaviv_gpu.c 	gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc);
pmc               217 drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c 	     int index, struct nvkm_mc **pmc)
pmc               220 drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c 	if (!(mc = *pmc = kzalloc(sizeof(*mc), GFP_KERNEL)))
pmc               222 drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c 	nvkm_mc_ctor(func, device, index, *pmc);
pmc                65 drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c g84_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
pmc                67 drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c 	return nvkm_mc_new_(&g84_mc, device, index, pmc);
pmc                65 drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c g98_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
pmc                67 drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c 	return nvkm_mc_new_(&g98_mc, device, index, pmc);
pmc               115 drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c gf100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
pmc               117 drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c 	return nvkm_mc_new_(&gf100_mc, device, index, pmc);
pmc                63 drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c gk104_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
pmc                65 drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c 	return nvkm_mc_new_(&gk104_mc, device, index, pmc);
pmc                38 drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c gk20a_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
pmc                40 drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c 	return nvkm_mc_new_(&gk20a_mc, device, index, pmc);
pmc               109 drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c 	      int index, struct nvkm_mc **pmc)
pmc               116 drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c 	*pmc = &mc->base;
pmc               125 drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c gp100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
pmc               127 drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c 	return gp100_mc_new_(&gp100_mc, device, index, pmc);
pmc                46 drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c gp10b_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
pmc                48 drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c 	return gp100_mc_new_(&gp10b_mc, device, index, pmc);
pmc                74 drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c gt215_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
pmc                76 drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c 	return nvkm_mc_new_(&gt215_mc, device, index, pmc);
pmc                83 drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c nv04_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
pmc                85 drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c 	return nvkm_mc_new_(&nv04_mc, device, index, pmc);
pmc                47 drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c nv11_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
pmc                49 drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c 	return nvkm_mc_new_(&nv11_mc, device, index, pmc);
pmc                56 drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c nv17_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
pmc                58 drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c 	return nvkm_mc_new_(&nv17_mc, device, index, pmc);
pmc                51 drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c nv44_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
pmc                53 drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c 	return nvkm_mc_new_(&nv44_mc, device, index, pmc);
pmc                58 drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c nv50_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
pmc                60 drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c 	return nvkm_mc_new_(&nv50_mc, device, index, pmc);
pmc                52 drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c tu102_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
pmc                54 drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu102.c 	return gp100_mc_new_(&tu102_mc, device, index, pmc);
pmc                70 drivers/memory/samsung/exynos-srom.c 	u32 bank, width, pmc = 0;
pmc                79 drivers/memory/samsung/exynos-srom.c 		pmc = 1 << EXYNOS_SROM_BCX__PMC__SHIFT;
pmc                94 drivers/memory/samsung/exynos-srom.c 	writel_relaxed(pmc | (timing[0] << EXYNOS_SROM_BCX__TACP__SHIFT) |
pmc               138 drivers/mfd/sm501.c 	unsigned long pmc = smc501_readl(sm->regs + SM501_POWER_MODE_CONTROL);
pmc               174 drivers/mfd/sm501.c 		 (pmc & 3 ) == 0 ? '*' : '-',
pmc               183 drivers/mfd/sm501.c 		(pmc & 3 ) == 1 ? '*' : '-',
pmc              9142 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c 		u16 pmc;
pmc              9157 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c 		pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc);
pmc              9158 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c 		pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
pmc              9159 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c 		pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc);
pmc              11005 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c 	u16 pmc;
pmc              11173 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c 	pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc);
pmc              11174 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c 	bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
pmc                32 drivers/net/wireless/ath/wil6210/pmc.c static int wil_is_pmc_allocated(struct pmc_ctx *pmc)
pmc                34 drivers/net/wireless/ath/wil6210/pmc.c 	return !!pmc->pring_va;
pmc                39 drivers/net/wireless/ath/wil6210/pmc.c 	memset(&wil->pmc, 0, sizeof(struct pmc_ctx));
pmc                40 drivers/net/wireless/ath/wil6210/pmc.c 	mutex_init(&wil->pmc.lock);
pmc                56 drivers/net/wireless/ath/wil6210/pmc.c 	struct pmc_ctx *pmc = &wil->pmc;
pmc                62 drivers/net/wireless/ath/wil6210/pmc.c 	mutex_lock(&pmc->lock);
pmc                64 drivers/net/wireless/ath/wil6210/pmc.c 	if (wil_is_pmc_allocated(pmc)) {
pmc                93 drivers/net/wireless/ath/wil6210/pmc.c 	pmc->num_descriptors = num_descriptors;
pmc                94 drivers/net/wireless/ath/wil6210/pmc.c 	pmc->descriptor_size = descriptor_size;
pmc               100 drivers/net/wireless/ath/wil6210/pmc.c 	pmc->descriptors = kcalloc(num_descriptors,
pmc               103 drivers/net/wireless/ath/wil6210/pmc.c 	if (!pmc->descriptors) {
pmc               109 drivers/net/wireless/ath/wil6210/pmc.c 		     pmc->descriptors);
pmc               127 drivers/net/wireless/ath/wil6210/pmc.c 	pmc->pring_va = dma_alloc_coherent(dev,
pmc               129 drivers/net/wireless/ath/wil6210/pmc.c 			&pmc->pring_pa,
pmc               138 drivers/net/wireless/ath/wil6210/pmc.c 		     pmc->pring_va, &pmc->pring_pa,
pmc               143 drivers/net/wireless/ath/wil6210/pmc.c 	if (!pmc->pring_va) {
pmc               153 drivers/net/wireless/ath/wil6210/pmc.c 		struct vring_tx_desc *_d = &pmc->pring_va[i];
pmc               157 drivers/net/wireless/ath/wil6210/pmc.c 		pmc->descriptors[i].va = dma_alloc_coherent(dev,
pmc               159 drivers/net/wireless/ath/wil6210/pmc.c 			&pmc->descriptors[i].pa,
pmc               162 drivers/net/wireless/ath/wil6210/pmc.c 		if (unlikely(!pmc->descriptors[i].va)) {
pmc               168 drivers/net/wireless/ath/wil6210/pmc.c 			u32 *p = (u32 *)pmc->descriptors[i].va + j;
pmc               174 drivers/net/wireless/ath/wil6210/pmc.c 			cpu_to_le32(lower_32_bits(pmc->descriptors[i].pa));
pmc               176 drivers/net/wireless/ath/wil6210/pmc.c 			cpu_to_le16((u16)upper_32_bits(pmc->descriptors[i].pa));
pmc               186 drivers/net/wireless/ath/wil6210/pmc.c 	pmc_cmd.ring_size = cpu_to_le16(pmc->num_descriptors);
pmc               187 drivers/net/wireless/ath/wil6210/pmc.c 	pmc_cmd.mem_base = cpu_to_le64(pmc->pring_pa);
pmc               190 drivers/net/wireless/ath/wil6210/pmc.c 	pmc->last_cmd_status = wmi_send(wil,
pmc               195 drivers/net/wireless/ath/wil6210/pmc.c 	if (pmc->last_cmd_status) {
pmc               198 drivers/net/wireless/ath/wil6210/pmc.c 			pmc->last_cmd_status);
pmc               202 drivers/net/wireless/ath/wil6210/pmc.c 	mutex_unlock(&pmc->lock);
pmc               208 drivers/net/wireless/ath/wil6210/pmc.c 	for (i = 0; i < num_descriptors && pmc->descriptors[i].va; i++) {
pmc               211 drivers/net/wireless/ath/wil6210/pmc.c 				  pmc->descriptors[i].va,
pmc               212 drivers/net/wireless/ath/wil6210/pmc.c 				  pmc->descriptors[i].pa);
pmc               214 drivers/net/wireless/ath/wil6210/pmc.c 		pmc->descriptors[i].va = NULL;
pmc               220 drivers/net/wireless/ath/wil6210/pmc.c 			  pmc->pring_va,
pmc               221 drivers/net/wireless/ath/wil6210/pmc.c 			  pmc->pring_pa);
pmc               223 drivers/net/wireless/ath/wil6210/pmc.c 	pmc->pring_va = NULL;
pmc               227 drivers/net/wireless/ath/wil6210/pmc.c 	kfree(pmc->descriptors);
pmc               228 drivers/net/wireless/ath/wil6210/pmc.c 	pmc->descriptors = NULL;
pmc               231 drivers/net/wireless/ath/wil6210/pmc.c 	pmc->last_cmd_status = last_cmd_err;
pmc               232 drivers/net/wireless/ath/wil6210/pmc.c 	mutex_unlock(&pmc->lock);
pmc               241 drivers/net/wireless/ath/wil6210/pmc.c 	struct pmc_ctx *pmc = &wil->pmc;
pmc               246 drivers/net/wireless/ath/wil6210/pmc.c 	mutex_lock(&pmc->lock);
pmc               248 drivers/net/wireless/ath/wil6210/pmc.c 	pmc->last_cmd_status = 0;
pmc               250 drivers/net/wireless/ath/wil6210/pmc.c 	if (!wil_is_pmc_allocated(pmc)) {
pmc               253 drivers/net/wireless/ath/wil6210/pmc.c 		pmc->last_cmd_status = -EPERM;
pmc               254 drivers/net/wireless/ath/wil6210/pmc.c 		mutex_unlock(&pmc->lock);
pmc               261 drivers/net/wireless/ath/wil6210/pmc.c 		pmc->last_cmd_status =
pmc               264 drivers/net/wireless/ath/wil6210/pmc.c 		if (pmc->last_cmd_status) {
pmc               267 drivers/net/wireless/ath/wil6210/pmc.c 				pmc->last_cmd_status);
pmc               275 drivers/net/wireless/ath/wil6210/pmc.c 	if (pmc->pring_va) {
pmc               277 drivers/net/wireless/ath/wil6210/pmc.c 				  pmc->num_descriptors;
pmc               280 drivers/net/wireless/ath/wil6210/pmc.c 			     pmc->pring_va);
pmc               281 drivers/net/wireless/ath/wil6210/pmc.c 		dma_free_coherent(dev, buf_size, pmc->pring_va, pmc->pring_pa);
pmc               283 drivers/net/wireless/ath/wil6210/pmc.c 		pmc->pring_va = NULL;
pmc               285 drivers/net/wireless/ath/wil6210/pmc.c 		pmc->last_cmd_status = -ENOENT;
pmc               288 drivers/net/wireless/ath/wil6210/pmc.c 	if (pmc->descriptors) {
pmc               292 drivers/net/wireless/ath/wil6210/pmc.c 		     i < pmc->num_descriptors && pmc->descriptors[i].va; i++) {
pmc               294 drivers/net/wireless/ath/wil6210/pmc.c 					  pmc->descriptor_size,
pmc               295 drivers/net/wireless/ath/wil6210/pmc.c 					  pmc->descriptors[i].va,
pmc               296 drivers/net/wireless/ath/wil6210/pmc.c 					  pmc->descriptors[i].pa);
pmc               297 drivers/net/wireless/ath/wil6210/pmc.c 			pmc->descriptors[i].va = NULL;
pmc               300 drivers/net/wireless/ath/wil6210/pmc.c 			     pmc->num_descriptors);
pmc               303 drivers/net/wireless/ath/wil6210/pmc.c 			     pmc->descriptors);
pmc               304 drivers/net/wireless/ath/wil6210/pmc.c 		kfree(pmc->descriptors);
pmc               305 drivers/net/wireless/ath/wil6210/pmc.c 		pmc->descriptors = NULL;
pmc               307 drivers/net/wireless/ath/wil6210/pmc.c 		pmc->last_cmd_status = -ENOENT;
pmc               310 drivers/net/wireless/ath/wil6210/pmc.c 	mutex_unlock(&pmc->lock);
pmc               320 drivers/net/wireless/ath/wil6210/pmc.c 		     wil->pmc.last_cmd_status);
pmc               322 drivers/net/wireless/ath/wil6210/pmc.c 	return wil->pmc.last_cmd_status;
pmc               333 drivers/net/wireless/ath/wil6210/pmc.c 	struct pmc_ctx *pmc = &wil->pmc;
pmc               339 drivers/net/wireless/ath/wil6210/pmc.c 	mutex_lock(&pmc->lock);
pmc               341 drivers/net/wireless/ath/wil6210/pmc.c 	if (!wil_is_pmc_allocated(pmc)) {
pmc               343 drivers/net/wireless/ath/wil6210/pmc.c 		pmc->last_cmd_status = -EPERM;
pmc               344 drivers/net/wireless/ath/wil6210/pmc.c 		mutex_unlock(&pmc->lock);
pmc               348 drivers/net/wireless/ath/wil6210/pmc.c 	pmc_size = pmc->descriptor_size * pmc->num_descriptors;
pmc               354 drivers/net/wireless/ath/wil6210/pmc.c 	pmc->last_cmd_status = 0;
pmc               357 drivers/net/wireless/ath/wil6210/pmc.c 	do_div(idx, pmc->descriptor_size);
pmc               358 drivers/net/wireless/ath/wil6210/pmc.c 	offset = *f_pos - (idx * pmc->descriptor_size);
pmc               364 drivers/net/wireless/ath/wil6210/pmc.c 		pmc->last_cmd_status = -ERANGE;
pmc               376 drivers/net/wireless/ath/wil6210/pmc.c 					 pmc->descriptors[idx].va,
pmc               377 drivers/net/wireless/ath/wil6210/pmc.c 					 pmc->descriptor_size);
pmc               380 drivers/net/wireless/ath/wil6210/pmc.c 	mutex_unlock(&pmc->lock);
pmc               389 drivers/net/wireless/ath/wil6210/pmc.c 	struct pmc_ctx *pmc = &wil->pmc;
pmc               392 drivers/net/wireless/ath/wil6210/pmc.c 	mutex_lock(&pmc->lock);
pmc               394 drivers/net/wireless/ath/wil6210/pmc.c 	if (!wil_is_pmc_allocated(pmc)) {
pmc               396 drivers/net/wireless/ath/wil6210/pmc.c 		pmc->last_cmd_status = -EPERM;
pmc               397 drivers/net/wireless/ath/wil6210/pmc.c 		mutex_unlock(&pmc->lock);
pmc               401 drivers/net/wireless/ath/wil6210/pmc.c 	pmc_size = pmc->descriptor_size * pmc->num_descriptors;
pmc               431 drivers/net/wireless/ath/wil6210/pmc.c 	mutex_unlock(&pmc->lock);
pmc               439 drivers/net/wireless/ath/wil6210/pmc.c 	struct pmc_ctx *pmc = &wil->pmc;
pmc               441 drivers/net/wireless/ath/wil6210/pmc.c 		sizeof(struct vring_rx_desc) * pmc->num_descriptors;
pmc               443 drivers/net/wireless/ath/wil6210/pmc.c 	mutex_lock(&pmc->lock);
pmc               445 drivers/net/wireless/ath/wil6210/pmc.c 	if (!wil_is_pmc_allocated(pmc)) {
pmc               447 drivers/net/wireless/ath/wil6210/pmc.c 		pmc->last_cmd_status = -EPERM;
pmc               448 drivers/net/wireless/ath/wil6210/pmc.c 		mutex_unlock(&pmc->lock);
pmc               454 drivers/net/wireless/ath/wil6210/pmc.c 	seq_write(s, pmc->pring_va, pmc_ring_size);
pmc               456 drivers/net/wireless/ath/wil6210/pmc.c 	mutex_unlock(&pmc->lock);
pmc              1033 drivers/net/wireless/ath/wil6210/wil6210.h 	struct pmc_ctx pmc;
pmc               321 drivers/net/wireless/intel/iwlegacy/3945.h 	u16 pmc;		/* abs.ofs: 20 */
pmc                38 drivers/net/wireless/realtek/rtw88/rtw8822b.h 	u8 pmc[2];
pmc                25 drivers/net/wireless/realtek/rtw88/rtw8822c.h 	u8 pmc[2];
pmc              2786 drivers/pci/pci.c 	u16 pmc;
pmc              2802 drivers/pci/pci.c 	pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
pmc              2804 drivers/pci/pci.c 	if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
pmc              2806 drivers/pci/pci.c 			pmc & PCI_PM_CAP_VER_MASK);
pmc              2819 drivers/pci/pci.c 		if (pmc & PCI_PM_CAP_D1)
pmc              2821 drivers/pci/pci.c 		if (pmc & PCI_PM_CAP_D2)
pmc              2830 drivers/pci/pci.c 	pmc &= PCI_PM_CAP_PME_MASK;
pmc              2831 drivers/pci/pci.c 	if (pmc) {
pmc              2833 drivers/pci/pci.c 			 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
pmc              2834 drivers/pci/pci.c 			 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
pmc              2835 drivers/pci/pci.c 			 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
pmc              2836 drivers/pci/pci.c 			 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
pmc              2837 drivers/pci/pci.c 			 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
pmc              2838 drivers/pci/pci.c 		dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
pmc              2128 drivers/pinctrl/tegra/pinctrl-tegra20.c 	MUX_PG(pmc,    PWR_ON,    PWR_INTR,  RSVD3,     RSVD4,         0x14, 23, 0x98, 18, -1,   -1),
pmc                33 drivers/platform/x86/intel_pmc_core.c static struct pmc_dev pmc;
pmc               405 drivers/platform/x86/intel_pmc_core.c 	struct pmc_dev *pmcdev = &pmc;
pmc               447 drivers/platform/x86/intel_pmc_core.c 	struct pmc_dev *pmcdev = &pmc;
pmc               456 drivers/platform/x86/intel_pmc_core.c 	struct pmc_dev *pmcdev = &pmc;
pmc               567 drivers/platform/x86/intel_pmc_core.c 	struct pmc_dev *pmcdev = &pmc;
pmc               836 drivers/platform/x86/intel_pmc_core.c 	struct pmc_dev *pmcdev = &pmc;
pmc               863 drivers/platform/x86/intel_pmc_core.c 	struct pmc_dev *pmcdev = &pmc;
pmc               510 drivers/platform/x86/intel_pmc_ipc.c 	struct intel_pmc_ipc_dev *pmc = &ipcdev;
pmc               514 drivers/platform/x86/intel_pmc_ipc.c 	if (pmc->dev)
pmc               517 drivers/platform/x86/intel_pmc_ipc.c 	pmc->irq_mode = IPC_TRIGGER_MODE_IRQ;
pmc               529 drivers/platform/x86/intel_pmc_ipc.c 	init_completion(&pmc->cmd_complete);
pmc               531 drivers/platform/x86/intel_pmc_ipc.c 	pmc->ipc_base = pcim_iomap_table(pdev)[0];
pmc               534 drivers/platform/x86/intel_pmc_ipc.c 				pmc);
pmc               540 drivers/platform/x86/intel_pmc_ipc.c 	pmc->dev = &pdev->dev;
pmc               542 drivers/platform/x86/intel_pmc_ipc.c 	pci_set_drvdata(pdev, pmc);
pmc               203 drivers/platform/x86/pmc_atom.c static inline u32 pmc_reg_read(struct pmc_dev *pmc, int reg_offset)
pmc               205 drivers/platform/x86/pmc_atom.c 	return readl(pmc->regmap + reg_offset);
pmc               208 drivers/platform/x86/pmc_atom.c static inline void pmc_reg_write(struct pmc_dev *pmc, int reg_offset, u32 val)
pmc               210 drivers/platform/x86/pmc_atom.c 	writel(val, pmc->regmap + reg_offset);
pmc               215 drivers/platform/x86/pmc_atom.c 	struct pmc_dev *pmc = &pmc_device;
pmc               217 drivers/platform/x86/pmc_atom.c 	if (!pmc->init)
pmc               220 drivers/platform/x86/pmc_atom.c 	*value = pmc_reg_read(pmc, offset);
pmc               227 drivers/platform/x86/pmc_atom.c 	struct pmc_dev *pmc = &pmc_device;
pmc               229 drivers/platform/x86/pmc_atom.c 	if (!pmc->init)
pmc               232 drivers/platform/x86/pmc_atom.c 	pmc_reg_write(pmc, offset, value);
pmc               254 drivers/platform/x86/pmc_atom.c static void pmc_hw_reg_setup(struct pmc_dev *pmc)
pmc               264 drivers/platform/x86/pmc_atom.c 	pmc_reg_write(pmc, PMC_S0IX_WAKE_EN, (u32)PMC_WAKE_EN_SETTING);
pmc               285 drivers/platform/x86/pmc_atom.c 	struct pmc_dev *pmc = s->private;
pmc               286 drivers/platform/x86/pmc_atom.c 	const struct pmc_reg_map *m = pmc->map;
pmc               290 drivers/platform/x86/pmc_atom.c 	func_dis = pmc_reg_read(pmc, PMC_FUNC_DIS);
pmc               291 drivers/platform/x86/pmc_atom.c 	func_dis_2 = pmc_reg_read(pmc, PMC_FUNC_DIS_2);
pmc               292 drivers/platform/x86/pmc_atom.c 	d3_sts_0 = pmc_reg_read(pmc, PMC_D3_STS_0);
pmc               293 drivers/platform/x86/pmc_atom.c 	d3_sts_1 = pmc_reg_read(pmc, PMC_D3_STS_1);
pmc               308 drivers/platform/x86/pmc_atom.c 	struct pmc_dev *pmc = s->private;
pmc               309 drivers/platform/x86/pmc_atom.c 	const struct pmc_bit_map *map = pmc->map->pss;
pmc               310 drivers/platform/x86/pmc_atom.c 	u32 pss = pmc_reg_read(pmc, PMC_PSS);
pmc               325 drivers/platform/x86/pmc_atom.c 	struct pmc_dev *pmc = s->private;
pmc               328 drivers/platform/x86/pmc_atom.c 	s0ir_tmr = (u64)pmc_reg_read(pmc, PMC_S0IR_TMR) << PMC_TMR_SHIFT;
pmc               329 drivers/platform/x86/pmc_atom.c 	s0i1_tmr = (u64)pmc_reg_read(pmc, PMC_S0I1_TMR) << PMC_TMR_SHIFT;
pmc               330 drivers/platform/x86/pmc_atom.c 	s0i2_tmr = (u64)pmc_reg_read(pmc, PMC_S0I2_TMR) << PMC_TMR_SHIFT;
pmc               331 drivers/platform/x86/pmc_atom.c 	s0i3_tmr = (u64)pmc_reg_read(pmc, PMC_S0I3_TMR) << PMC_TMR_SHIFT;
pmc               332 drivers/platform/x86/pmc_atom.c 	s0_tmr = (u64)pmc_reg_read(pmc, PMC_S0_TMR) << PMC_TMR_SHIFT;
pmc               344 drivers/platform/x86/pmc_atom.c static void pmc_dbgfs_register(struct pmc_dev *pmc)
pmc               350 drivers/platform/x86/pmc_atom.c 	pmc->dbgfs_dir = dir;
pmc               352 drivers/platform/x86/pmc_atom.c 	debugfs_create_file("dev_state", S_IFREG | S_IRUGO, dir, pmc,
pmc               354 drivers/platform/x86/pmc_atom.c 	debugfs_create_file("pss_state", S_IFREG | S_IRUGO, dir, pmc,
pmc               356 drivers/platform/x86/pmc_atom.c 	debugfs_create_file("sleep_state", S_IFREG | S_IRUGO, dir, pmc,
pmc               360 drivers/platform/x86/pmc_atom.c static void pmc_dbgfs_register(struct pmc_dev *pmc)
pmc               484 drivers/platform/x86/pmc_atom.c 	struct pmc_dev *pmc = &pmc_device;
pmc               497 drivers/platform/x86/pmc_atom.c 	pci_read_config_dword(pdev, PMC_BASE_ADDR_OFFSET, &pmc->base_addr);
pmc               498 drivers/platform/x86/pmc_atom.c 	pmc->base_addr &= PMC_BASE_ADDR_MASK;
pmc               500 drivers/platform/x86/pmc_atom.c 	pmc->regmap = ioremap_nocache(pmc->base_addr, PMC_MMIO_REG_LEN);
pmc               501 drivers/platform/x86/pmc_atom.c 	if (!pmc->regmap) {
pmc               506 drivers/platform/x86/pmc_atom.c 	pmc->map = map;
pmc               509 drivers/platform/x86/pmc_atom.c 	pmc_hw_reg_setup(pmc);
pmc               511 drivers/platform/x86/pmc_atom.c 	pmc_dbgfs_register(pmc);
pmc               514 drivers/platform/x86/pmc_atom.c 	ret = pmc_setup_clks(pdev, pmc->regmap, data);
pmc               519 drivers/platform/x86/pmc_atom.c 	pmc->init = true;
pmc               147 drivers/soc/tegra/pmc.c 	struct tegra_pmc *pmc;
pmc               225 drivers/soc/tegra/pmc.c 	void (*init)(struct tegra_pmc *pmc);
pmc               226 drivers/soc/tegra/pmc.c 	void (*setup_irq_polarity)(struct tegra_pmc *pmc,
pmc               349 drivers/soc/tegra/pmc.c static struct tegra_pmc *pmc = &(struct tegra_pmc) {
pmc               360 drivers/soc/tegra/pmc.c static u32 tegra_pmc_readl(struct tegra_pmc *pmc, unsigned long offset)
pmc               364 drivers/soc/tegra/pmc.c 	if (pmc->tz_only) {
pmc               368 drivers/soc/tegra/pmc.c 			if (pmc->dev)
pmc               369 drivers/soc/tegra/pmc.c 				dev_warn(pmc->dev, "%s(): SMC failed: %lu\n",
pmc               379 drivers/soc/tegra/pmc.c 	return readl(pmc->base + offset);
pmc               382 drivers/soc/tegra/pmc.c static void tegra_pmc_writel(struct tegra_pmc *pmc, u32 value,
pmc               387 drivers/soc/tegra/pmc.c 	if (pmc->tz_only) {
pmc               391 drivers/soc/tegra/pmc.c 			if (pmc->dev)
pmc               392 drivers/soc/tegra/pmc.c 				dev_warn(pmc->dev, "%s(): SMC failed: %lu\n",
pmc               399 drivers/soc/tegra/pmc.c 		writel(value, pmc->base + offset);
pmc               403 drivers/soc/tegra/pmc.c static u32 tegra_pmc_scratch_readl(struct tegra_pmc *pmc, unsigned long offset)
pmc               405 drivers/soc/tegra/pmc.c 	if (pmc->tz_only)
pmc               406 drivers/soc/tegra/pmc.c 		return tegra_pmc_readl(pmc, offset);
pmc               408 drivers/soc/tegra/pmc.c 	return readl(pmc->scratch + offset);
pmc               411 drivers/soc/tegra/pmc.c static void tegra_pmc_scratch_writel(struct tegra_pmc *pmc, u32 value,
pmc               414 drivers/soc/tegra/pmc.c 	if (pmc->tz_only)
pmc               415 drivers/soc/tegra/pmc.c 		tegra_pmc_writel(pmc, value, offset);
pmc               417 drivers/soc/tegra/pmc.c 		writel(value, pmc->scratch + offset);
pmc               427 drivers/soc/tegra/pmc.c 	if (id == TEGRA_POWERGATE_3D && pmc->soc->has_gpu_clamps)
pmc               428 drivers/soc/tegra/pmc.c 		return (tegra_pmc_readl(pmc, GPU_RG_CNTRL) & 0x1) == 0;
pmc               430 drivers/soc/tegra/pmc.c 		return (tegra_pmc_readl(pmc, PWRGATE_STATUS) & BIT(id)) != 0;
pmc               433 drivers/soc/tegra/pmc.c static inline bool tegra_powergate_is_valid(struct tegra_pmc *pmc, int id)
pmc               435 drivers/soc/tegra/pmc.c 	return (pmc->soc && pmc->soc->powergates[id]);
pmc               438 drivers/soc/tegra/pmc.c static inline bool tegra_powergate_is_available(struct tegra_pmc *pmc, int id)
pmc               440 drivers/soc/tegra/pmc.c 	return test_bit(id, pmc->powergates_available);
pmc               443 drivers/soc/tegra/pmc.c static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
pmc               447 drivers/soc/tegra/pmc.c 	if (!pmc || !pmc->soc || !name)
pmc               450 drivers/soc/tegra/pmc.c 	for (i = 0; i < pmc->soc->num_powergates; i++) {
pmc               451 drivers/soc/tegra/pmc.c 		if (!tegra_powergate_is_valid(pmc, i))
pmc               454 drivers/soc/tegra/pmc.c 		if (!strcmp(name, pmc->soc->powergates[i]))
pmc               467 drivers/soc/tegra/pmc.c static int tegra_powergate_set(struct tegra_pmc *pmc, unsigned int id,
pmc               473 drivers/soc/tegra/pmc.c 	if (id == TEGRA_POWERGATE_3D && pmc->soc->has_gpu_clamps)
pmc               476 drivers/soc/tegra/pmc.c 	mutex_lock(&pmc->powergates_lock);
pmc               479 drivers/soc/tegra/pmc.c 		mutex_unlock(&pmc->powergates_lock);
pmc               483 drivers/soc/tegra/pmc.c 	tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
pmc               488 drivers/soc/tegra/pmc.c 	mutex_unlock(&pmc->powergates_lock);
pmc               493 drivers/soc/tegra/pmc.c static int __tegra_powergate_remove_clamping(struct tegra_pmc *pmc,
pmc               498 drivers/soc/tegra/pmc.c 	mutex_lock(&pmc->powergates_lock);
pmc               505 drivers/soc/tegra/pmc.c 		if (pmc->soc->has_gpu_clamps) {
pmc               506 drivers/soc/tegra/pmc.c 			tegra_pmc_writel(pmc, 0, GPU_RG_CNTRL);
pmc               522 drivers/soc/tegra/pmc.c 	tegra_pmc_writel(pmc, mask, REMOVE_CLAMPING);
pmc               525 drivers/soc/tegra/pmc.c 	mutex_unlock(&pmc->powergates_lock);
pmc               574 drivers/soc/tegra/pmc.c 	err = tegra_powergate_set(pg->pmc, pg->id, true);
pmc               586 drivers/soc/tegra/pmc.c 	err = __tegra_powergate_remove_clamping(pg->pmc, pg->id);
pmc               598 drivers/soc/tegra/pmc.c 	if (pg->pmc->soc->needs_mbist_war)
pmc               613 drivers/soc/tegra/pmc.c 	tegra_powergate_set(pg->pmc, pg->id, false);
pmc               638 drivers/soc/tegra/pmc.c 	err = tegra_powergate_set(pg->pmc, pg->id, false);
pmc               659 drivers/soc/tegra/pmc.c 	struct device *dev = pg->pmc->dev;
pmc               678 drivers/soc/tegra/pmc.c 	struct device *dev = pg->pmc->dev;
pmc               703 drivers/soc/tegra/pmc.c 	if (!tegra_powergate_is_available(pmc, id))
pmc               706 drivers/soc/tegra/pmc.c 	return tegra_powergate_set(pmc, id, true);
pmc               716 drivers/soc/tegra/pmc.c 	if (!tegra_powergate_is_available(pmc, id))
pmc               719 drivers/soc/tegra/pmc.c 	return tegra_powergate_set(pmc, id, false);
pmc               728 drivers/soc/tegra/pmc.c static int tegra_powergate_is_powered(struct tegra_pmc *pmc, unsigned int id)
pmc               730 drivers/soc/tegra/pmc.c 	if (!tegra_powergate_is_valid(pmc, id))
pmc               742 drivers/soc/tegra/pmc.c 	if (!tegra_powergate_is_available(pmc, id))
pmc               745 drivers/soc/tegra/pmc.c 	return __tegra_powergate_remove_clamping(pmc, id);
pmc               763 drivers/soc/tegra/pmc.c 	if (!tegra_powergate_is_available(pmc, id))
pmc               774 drivers/soc/tegra/pmc.c 	pg->pmc = pmc;
pmc               778 drivers/soc/tegra/pmc.c 		dev_err(pmc->dev, "failed to turn on partition %d: %d\n", id,
pmc               795 drivers/soc/tegra/pmc.c static int tegra_get_cpu_powergate_id(struct tegra_pmc *pmc,
pmc               798 drivers/soc/tegra/pmc.c 	if (pmc->soc && cpuid < pmc->soc->num_cpu_powergates)
pmc               799 drivers/soc/tegra/pmc.c 		return pmc->soc->cpu_powergates[cpuid];
pmc               812 drivers/soc/tegra/pmc.c 	id = tegra_get_cpu_powergate_id(pmc, cpuid);
pmc               816 drivers/soc/tegra/pmc.c 	return tegra_powergate_is_powered(pmc, id);
pmc               827 drivers/soc/tegra/pmc.c 	id = tegra_get_cpu_powergate_id(pmc, cpuid);
pmc               831 drivers/soc/tegra/pmc.c 	return tegra_powergate_set(pmc, id, true);
pmc               842 drivers/soc/tegra/pmc.c 	id = tegra_get_cpu_powergate_id(pmc, cpuid);
pmc               855 drivers/soc/tegra/pmc.c 	value = tegra_pmc_scratch_readl(pmc, pmc->soc->regs->scratch0);
pmc               869 drivers/soc/tegra/pmc.c 	tegra_pmc_scratch_writel(pmc, value, pmc->soc->regs->scratch0);
pmc               872 drivers/soc/tegra/pmc.c 	value = tegra_pmc_readl(pmc, PMC_CNTRL);
pmc               874 drivers/soc/tegra/pmc.c 	tegra_pmc_writel(pmc, value, PMC_CNTRL);
pmc               892 drivers/soc/tegra/pmc.c 	for (i = 0; i < pmc->soc->num_powergates; i++) {
pmc               893 drivers/soc/tegra/pmc.c 		status = tegra_powergate_is_powered(pmc, i);
pmc               897 drivers/soc/tegra/pmc.c 		seq_printf(s, " %9s %7s\n", pmc->soc->powergates[i],
pmc               908 drivers/soc/tegra/pmc.c 	pmc->debugfs = debugfs_create_file("powergate", S_IRUGO, NULL, NULL,
pmc               910 drivers/soc/tegra/pmc.c 	if (!pmc->debugfs)
pmc               955 drivers/soc/tegra/pmc.c 	struct device *dev = pg->pmc->dev;
pmc               990 drivers/soc/tegra/pmc.c static int tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
pmc               992 drivers/soc/tegra/pmc.c 	struct device *dev = pmc->dev;
pmc              1001 drivers/soc/tegra/pmc.c 	id = tegra_powergate_lookup(pmc, np->name);
pmc              1012 drivers/soc/tegra/pmc.c 	clear_bit(id, pmc->powergates_available);
pmc              1018 drivers/soc/tegra/pmc.c 	pg->pmc = pmc;
pmc              1020 drivers/soc/tegra/pmc.c 	off = !tegra_powergate_is_powered(pmc, pg->id);
pmc              1072 drivers/soc/tegra/pmc.c 	set_bit(id, pmc->powergates_available);
pmc              1080 drivers/soc/tegra/pmc.c static int tegra_powergate_init(struct tegra_pmc *pmc,
pmc              1091 drivers/soc/tegra/pmc.c 		err = tegra_powergate_add(pmc, child);
pmc              1114 drivers/soc/tegra/pmc.c 	set_bit(pg->id, pmc->powergates_available);
pmc              1142 drivers/soc/tegra/pmc.c tegra_io_pad_find(struct tegra_pmc *pmc, enum tegra_io_pad id)
pmc              1146 drivers/soc/tegra/pmc.c 	for (i = 0; i < pmc->soc->num_io_pads; i++)
pmc              1147 drivers/soc/tegra/pmc.c 		if (pmc->soc->io_pads[i].id == id)
pmc              1148 drivers/soc/tegra/pmc.c 			return &pmc->soc->io_pads[i];
pmc              1153 drivers/soc/tegra/pmc.c static int tegra_io_pad_get_dpd_register_bit(struct tegra_pmc *pmc,
pmc              1161 drivers/soc/tegra/pmc.c 	pad = tegra_io_pad_find(pmc, id);
pmc              1163 drivers/soc/tegra/pmc.c 		dev_err(pmc->dev, "invalid I/O pad ID %u\n", id);
pmc              1173 drivers/soc/tegra/pmc.c 		*status = pmc->soc->regs->dpd_status;
pmc              1174 drivers/soc/tegra/pmc.c 		*request = pmc->soc->regs->dpd_req;
pmc              1176 drivers/soc/tegra/pmc.c 		*status = pmc->soc->regs->dpd2_status;
pmc              1177 drivers/soc/tegra/pmc.c 		*request = pmc->soc->regs->dpd2_req;
pmc              1183 drivers/soc/tegra/pmc.c static int tegra_io_pad_prepare(struct tegra_pmc *pmc, enum tegra_io_pad id,
pmc              1190 drivers/soc/tegra/pmc.c 	err = tegra_io_pad_get_dpd_register_bit(pmc, id, request, status, mask);
pmc              1194 drivers/soc/tegra/pmc.c 	if (pmc->clk) {
pmc              1195 drivers/soc/tegra/pmc.c 		rate = clk_get_rate(pmc->clk);
pmc              1197 drivers/soc/tegra/pmc.c 			dev_err(pmc->dev, "failed to get clock rate\n");
pmc              1201 drivers/soc/tegra/pmc.c 		tegra_pmc_writel(pmc, DPD_SAMPLE_ENABLE, DPD_SAMPLE);
pmc              1206 drivers/soc/tegra/pmc.c 		tegra_pmc_writel(pmc, value, SEL_DPD_TIM);
pmc              1212 drivers/soc/tegra/pmc.c static int tegra_io_pad_poll(struct tegra_pmc *pmc, unsigned long offset,
pmc              1220 drivers/soc/tegra/pmc.c 		value = tegra_pmc_readl(pmc, offset);
pmc              1230 drivers/soc/tegra/pmc.c static void tegra_io_pad_unprepare(struct tegra_pmc *pmc)
pmc              1232 drivers/soc/tegra/pmc.c 	if (pmc->clk)
pmc              1233 drivers/soc/tegra/pmc.c 		tegra_pmc_writel(pmc, DPD_SAMPLE_DISABLE, DPD_SAMPLE);
pmc              1248 drivers/soc/tegra/pmc.c 	mutex_lock(&pmc->powergates_lock);
pmc              1250 drivers/soc/tegra/pmc.c 	err = tegra_io_pad_prepare(pmc, id, &request, &status, &mask);
pmc              1252 drivers/soc/tegra/pmc.c 		dev_err(pmc->dev, "failed to prepare I/O pad: %d\n", err);
pmc              1256 drivers/soc/tegra/pmc.c 	tegra_pmc_writel(pmc, IO_DPD_REQ_CODE_OFF | mask, request);
pmc              1258 drivers/soc/tegra/pmc.c 	err = tegra_io_pad_poll(pmc, status, mask, 0, 250);
pmc              1260 drivers/soc/tegra/pmc.c 		dev_err(pmc->dev, "failed to enable I/O pad: %d\n", err);
pmc              1264 drivers/soc/tegra/pmc.c 	tegra_io_pad_unprepare(pmc);
pmc              1267 drivers/soc/tegra/pmc.c 	mutex_unlock(&pmc->powergates_lock);
pmc              1284 drivers/soc/tegra/pmc.c 	mutex_lock(&pmc->powergates_lock);
pmc              1286 drivers/soc/tegra/pmc.c 	err = tegra_io_pad_prepare(pmc, id, &request, &status, &mask);
pmc              1288 drivers/soc/tegra/pmc.c 		dev_err(pmc->dev, "failed to prepare I/O pad: %d\n", err);
pmc              1292 drivers/soc/tegra/pmc.c 	tegra_pmc_writel(pmc, IO_DPD_REQ_CODE_ON | mask, request);
pmc              1294 drivers/soc/tegra/pmc.c 	err = tegra_io_pad_poll(pmc, status, mask, mask, 250);
pmc              1296 drivers/soc/tegra/pmc.c 		dev_err(pmc->dev, "failed to disable I/O pad: %d\n", err);
pmc              1300 drivers/soc/tegra/pmc.c 	tegra_io_pad_unprepare(pmc);
pmc              1303 drivers/soc/tegra/pmc.c 	mutex_unlock(&pmc->powergates_lock);
pmc              1308 drivers/soc/tegra/pmc.c static int tegra_io_pad_is_powered(struct tegra_pmc *pmc, enum tegra_io_pad id)
pmc              1314 drivers/soc/tegra/pmc.c 	err = tegra_io_pad_get_dpd_register_bit(pmc, id, &request, &status,
pmc              1319 drivers/soc/tegra/pmc.c 	value = tegra_pmc_readl(pmc, status);
pmc              1324 drivers/soc/tegra/pmc.c static int tegra_io_pad_set_voltage(struct tegra_pmc *pmc, enum tegra_io_pad id,
pmc              1330 drivers/soc/tegra/pmc.c 	pad = tegra_io_pad_find(pmc, id);
pmc              1337 drivers/soc/tegra/pmc.c 	mutex_lock(&pmc->powergates_lock);
pmc              1339 drivers/soc/tegra/pmc.c 	if (pmc->soc->has_impl_33v_pwr) {
pmc              1340 drivers/soc/tegra/pmc.c 		value = tegra_pmc_readl(pmc, PMC_IMPL_E_33V_PWR);
pmc              1347 drivers/soc/tegra/pmc.c 		tegra_pmc_writel(pmc, value, PMC_IMPL_E_33V_PWR);
pmc              1350 drivers/soc/tegra/pmc.c 		value = tegra_pmc_readl(pmc, PMC_PWR_DET);
pmc              1352 drivers/soc/tegra/pmc.c 		tegra_pmc_writel(pmc, value, PMC_PWR_DET);
pmc              1355 drivers/soc/tegra/pmc.c 		value = tegra_pmc_readl(pmc, PMC_PWR_DET_VALUE);
pmc              1362 drivers/soc/tegra/pmc.c 		tegra_pmc_writel(pmc, value, PMC_PWR_DET_VALUE);
pmc              1365 drivers/soc/tegra/pmc.c 	mutex_unlock(&pmc->powergates_lock);
pmc              1372 drivers/soc/tegra/pmc.c static int tegra_io_pad_get_voltage(struct tegra_pmc *pmc, enum tegra_io_pad id)
pmc              1377 drivers/soc/tegra/pmc.c 	pad = tegra_io_pad_find(pmc, id);
pmc              1384 drivers/soc/tegra/pmc.c 	if (pmc->soc->has_impl_33v_pwr)
pmc              1385 drivers/soc/tegra/pmc.c 		value = tegra_pmc_readl(pmc, PMC_IMPL_E_33V_PWR);
pmc              1387 drivers/soc/tegra/pmc.c 		value = tegra_pmc_readl(pmc, PMC_PWR_DET_VALUE);
pmc              1422 drivers/soc/tegra/pmc.c 	return pmc->suspend_mode;
pmc              1430 drivers/soc/tegra/pmc.c 	pmc->suspend_mode = mode;
pmc              1444 drivers/soc/tegra/pmc.c 		rate = clk_get_rate(pmc->clk);
pmc              1454 drivers/soc/tegra/pmc.c 	if (rate != pmc->rate) {
pmc              1457 drivers/soc/tegra/pmc.c 		ticks = pmc->cpu_good_time * rate + USEC_PER_SEC - 1;
pmc              1459 drivers/soc/tegra/pmc.c 		tegra_pmc_writel(pmc, ticks, PMC_CPUPWRGOOD_TIMER);
pmc              1461 drivers/soc/tegra/pmc.c 		ticks = pmc->cpu_off_time * rate + USEC_PER_SEC - 1;
pmc              1463 drivers/soc/tegra/pmc.c 		tegra_pmc_writel(pmc, ticks, PMC_CPUPWROFF_TIMER);
pmc              1467 drivers/soc/tegra/pmc.c 		pmc->rate = rate;
pmc              1470 drivers/soc/tegra/pmc.c 	value = tegra_pmc_readl(pmc, PMC_CNTRL);
pmc              1473 drivers/soc/tegra/pmc.c 	tegra_pmc_writel(pmc, value, PMC_CNTRL);
pmc              1477 drivers/soc/tegra/pmc.c static int tegra_pmc_parse_dt(struct tegra_pmc *pmc, struct device_node *np)
pmc              1485 drivers/soc/tegra/pmc.c 			pmc->suspend_mode = TEGRA_SUSPEND_LP0;
pmc              1489 drivers/soc/tegra/pmc.c 			pmc->suspend_mode = TEGRA_SUSPEND_LP1;
pmc              1493 drivers/soc/tegra/pmc.c 			pmc->suspend_mode = TEGRA_SUSPEND_LP2;
pmc              1497 drivers/soc/tegra/pmc.c 			pmc->suspend_mode = TEGRA_SUSPEND_NONE;
pmc              1502 drivers/soc/tegra/pmc.c 	pmc->suspend_mode = tegra_pm_validate_suspend_mode(pmc->suspend_mode);
pmc              1505 drivers/soc/tegra/pmc.c 		pmc->suspend_mode = TEGRA_SUSPEND_NONE;
pmc              1507 drivers/soc/tegra/pmc.c 	pmc->cpu_good_time = value;
pmc              1510 drivers/soc/tegra/pmc.c 		pmc->suspend_mode = TEGRA_SUSPEND_NONE;
pmc              1512 drivers/soc/tegra/pmc.c 	pmc->cpu_off_time = value;
pmc              1516 drivers/soc/tegra/pmc.c 		pmc->suspend_mode = TEGRA_SUSPEND_NONE;
pmc              1518 drivers/soc/tegra/pmc.c 	pmc->core_osc_time = values[0];
pmc              1519 drivers/soc/tegra/pmc.c 	pmc->core_pmu_time = values[1];
pmc              1522 drivers/soc/tegra/pmc.c 		pmc->suspend_mode = TEGRA_SUSPEND_NONE;
pmc              1524 drivers/soc/tegra/pmc.c 	pmc->core_off_time = value;
pmc              1526 drivers/soc/tegra/pmc.c 	pmc->corereq_high = of_property_read_bool(np,
pmc              1529 drivers/soc/tegra/pmc.c 	pmc->sysclkreq_high = of_property_read_bool(np,
pmc              1532 drivers/soc/tegra/pmc.c 	pmc->combined_req = of_property_read_bool(np,
pmc              1535 drivers/soc/tegra/pmc.c 	pmc->cpu_pwr_good_en = of_property_read_bool(np,
pmc              1540 drivers/soc/tegra/pmc.c 		if (pmc->suspend_mode == TEGRA_SUSPEND_LP0)
pmc              1541 drivers/soc/tegra/pmc.c 			pmc->suspend_mode = TEGRA_SUSPEND_LP1;
pmc              1543 drivers/soc/tegra/pmc.c 	pmc->lp0_vec_phys = values[0];
pmc              1544 drivers/soc/tegra/pmc.c 	pmc->lp0_vec_size = values[1];
pmc              1549 drivers/soc/tegra/pmc.c static void tegra_pmc_init(struct tegra_pmc *pmc)
pmc              1551 drivers/soc/tegra/pmc.c 	if (pmc->soc->init)
pmc              1552 drivers/soc/tegra/pmc.c 		pmc->soc->init(pmc);
pmc              1555 drivers/soc/tegra/pmc.c static void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc)
pmc              1559 drivers/soc/tegra/pmc.c 	struct device *dev = pmc->dev;
pmc              1563 drivers/soc/tegra/pmc.c 	if (!pmc->soc->has_tsense_reset)
pmc              1566 drivers/soc/tegra/pmc.c 	np = of_get_child_by_name(pmc->dev->of_node, "i2c-thermtrip");
pmc              1595 drivers/soc/tegra/pmc.c 	value = tegra_pmc_readl(pmc, PMC_SENSOR_CTRL);
pmc              1597 drivers/soc/tegra/pmc.c 	tegra_pmc_writel(pmc, value, PMC_SENSOR_CTRL);
pmc              1601 drivers/soc/tegra/pmc.c 	tegra_pmc_writel(pmc, value, PMC_SCRATCH54);
pmc              1619 drivers/soc/tegra/pmc.c 	tegra_pmc_writel(pmc, value, PMC_SCRATCH55);
pmc              1621 drivers/soc/tegra/pmc.c 	value = tegra_pmc_readl(pmc, PMC_SENSOR_CTRL);
pmc              1623 drivers/soc/tegra/pmc.c 	tegra_pmc_writel(pmc, value, PMC_SENSOR_CTRL);
pmc              1625 drivers/soc/tegra/pmc.c 	dev_info(pmc->dev, "emergency thermal reset enabled\n");
pmc              1633 drivers/soc/tegra/pmc.c 	struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl_dev);
pmc              1635 drivers/soc/tegra/pmc.c 	return pmc->soc->num_io_pads;
pmc              1641 drivers/soc/tegra/pmc.c 	struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl);
pmc              1643 drivers/soc/tegra/pmc.c 	return pmc->soc->io_pads[group].name;
pmc              1651 drivers/soc/tegra/pmc.c 	struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl_dev);
pmc              1653 drivers/soc/tegra/pmc.c 	*pins = &pmc->soc->io_pads[group].id;
pmc              1671 drivers/soc/tegra/pmc.c 	struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl_dev);
pmc              1676 drivers/soc/tegra/pmc.c 	pad = tegra_io_pad_find(pmc, pin);
pmc              1682 drivers/soc/tegra/pmc.c 		ret = tegra_io_pad_get_voltage(pmc, pad->id);
pmc              1690 drivers/soc/tegra/pmc.c 		ret = tegra_io_pad_is_powered(pmc, pad->id);
pmc              1710 drivers/soc/tegra/pmc.c 	struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl_dev);
pmc              1717 drivers/soc/tegra/pmc.c 	pad = tegra_io_pad_find(pmc, pin);
pmc              1738 drivers/soc/tegra/pmc.c 			err = tegra_io_pad_set_voltage(pmc, pad->id, arg);
pmc              1761 drivers/soc/tegra/pmc.c static int tegra_pmc_pinctrl_init(struct tegra_pmc *pmc)
pmc              1765 drivers/soc/tegra/pmc.c 	if (!pmc->soc->num_pin_descs)
pmc              1768 drivers/soc/tegra/pmc.c 	tegra_pmc_pctl_desc.name = dev_name(pmc->dev);
pmc              1769 drivers/soc/tegra/pmc.c 	tegra_pmc_pctl_desc.pins = pmc->soc->pin_descs;
pmc              1770 drivers/soc/tegra/pmc.c 	tegra_pmc_pctl_desc.npins = pmc->soc->num_pin_descs;
pmc              1772 drivers/soc/tegra/pmc.c 	pmc->pctl_dev = devm_pinctrl_register(pmc->dev, &tegra_pmc_pctl_desc,
pmc              1773 drivers/soc/tegra/pmc.c 					      pmc);
pmc              1774 drivers/soc/tegra/pmc.c 	if (IS_ERR(pmc->pctl_dev)) {
pmc              1775 drivers/soc/tegra/pmc.c 		err = PTR_ERR(pmc->pctl_dev);
pmc              1776 drivers/soc/tegra/pmc.c 		dev_err(pmc->dev, "failed to register pin controller: %d\n",
pmc              1789 drivers/soc/tegra/pmc.c 	value = tegra_pmc_readl(pmc, pmc->soc->regs->rst_status);
pmc              1790 drivers/soc/tegra/pmc.c 	value &= pmc->soc->regs->rst_source_mask;
pmc              1791 drivers/soc/tegra/pmc.c 	value >>= pmc->soc->regs->rst_source_shift;
pmc              1793 drivers/soc/tegra/pmc.c 	if (WARN_ON(value >= pmc->soc->num_reset_sources))
pmc              1796 drivers/soc/tegra/pmc.c 	return sprintf(buf, "%s\n", pmc->soc->reset_sources[value]);
pmc              1806 drivers/soc/tegra/pmc.c 	value = tegra_pmc_readl(pmc, pmc->soc->regs->rst_status);
pmc              1807 drivers/soc/tegra/pmc.c 	value &= pmc->soc->regs->rst_level_mask;
pmc              1808 drivers/soc/tegra/pmc.c 	value >>= pmc->soc->regs->rst_level_shift;
pmc              1810 drivers/soc/tegra/pmc.c 	if (WARN_ON(value >= pmc->soc->num_reset_levels))
pmc              1813 drivers/soc/tegra/pmc.c 	return sprintf(buf, "%s\n", pmc->soc->reset_levels[value]);
pmc              1818 drivers/soc/tegra/pmc.c static void tegra_pmc_reset_sysfs_init(struct tegra_pmc *pmc)
pmc              1820 drivers/soc/tegra/pmc.c 	struct device *dev = pmc->dev;
pmc              1823 drivers/soc/tegra/pmc.c 	if (pmc->soc->reset_sources) {
pmc              1831 drivers/soc/tegra/pmc.c 	if (pmc->soc->reset_levels) {
pmc              1857 drivers/soc/tegra/pmc.c 	struct tegra_pmc *pmc = domain->host_data;
pmc              1858 drivers/soc/tegra/pmc.c 	const struct tegra_pmc_soc *soc = pmc->soc;
pmc              1877 drivers/soc/tegra/pmc.c 							    &pmc->irq, pmc);
pmc              1881 drivers/soc/tegra/pmc.c 			spec.fwnode = &pmc->dev->of_node->fwnode;
pmc              1900 drivers/soc/tegra/pmc.c 							    &pmc->irq, pmc);
pmc              1927 drivers/soc/tegra/pmc.c 						    &pmc->irq, pmc);
pmc              1951 drivers/soc/tegra/pmc.c 	struct tegra_pmc *pmc = irq_data_get_irq_chip_data(data);
pmc              1963 drivers/soc/tegra/pmc.c 	writel(0x1, pmc->wake + WAKE_AOWAKE_STATUS_W(data->hwirq));
pmc              1966 drivers/soc/tegra/pmc.c 	value = readl(pmc->wake + WAKE_AOWAKE_TIER2_ROUTING(offset));
pmc              1973 drivers/soc/tegra/pmc.c 	writel(value, pmc->wake + WAKE_AOWAKE_TIER2_ROUTING(offset));
pmc              1976 drivers/soc/tegra/pmc.c 	writel(!!on, pmc->wake + WAKE_AOWAKE_MASK_W(data->hwirq));
pmc              1983 drivers/soc/tegra/pmc.c 	struct tegra_pmc *pmc = irq_data_get_irq_chip_data(data);
pmc              1990 drivers/soc/tegra/pmc.c 	value = readl(pmc->wake + WAKE_AOWAKE_CNTRL(data->hwirq));
pmc              2011 drivers/soc/tegra/pmc.c 	writel(value, pmc->wake + WAKE_AOWAKE_CNTRL(data->hwirq));
pmc              2016 drivers/soc/tegra/pmc.c static int tegra_pmc_irq_init(struct tegra_pmc *pmc)
pmc              2021 drivers/soc/tegra/pmc.c 	np = of_irq_find_parent(pmc->dev->of_node);
pmc              2030 drivers/soc/tegra/pmc.c 	pmc->irq.name = dev_name(pmc->dev);
pmc              2031 drivers/soc/tegra/pmc.c 	pmc->irq.irq_mask = irq_chip_mask_parent;
pmc              2032 drivers/soc/tegra/pmc.c 	pmc->irq.irq_unmask = irq_chip_unmask_parent;
pmc              2033 drivers/soc/tegra/pmc.c 	pmc->irq.irq_eoi = irq_chip_eoi_parent;
pmc              2034 drivers/soc/tegra/pmc.c 	pmc->irq.irq_set_affinity = irq_chip_set_affinity_parent;
pmc              2035 drivers/soc/tegra/pmc.c 	pmc->irq.irq_set_type = tegra_pmc_irq_set_type;
pmc              2036 drivers/soc/tegra/pmc.c 	pmc->irq.irq_set_wake = tegra_pmc_irq_set_wake;
pmc              2038 drivers/soc/tegra/pmc.c 	pmc->domain = irq_domain_add_hierarchy(parent, 0, 96, pmc->dev->of_node,
pmc              2039 drivers/soc/tegra/pmc.c 					       &tegra_pmc_irq_domain_ops, pmc);
pmc              2040 drivers/soc/tegra/pmc.c 	if (!pmc->domain) {
pmc              2041 drivers/soc/tegra/pmc.c 		dev_err(pmc->dev, "failed to allocate domain\n");
pmc              2059 drivers/soc/tegra/pmc.c 	if (WARN_ON(!pmc->base || !pmc->soc))
pmc              2062 drivers/soc/tegra/pmc.c 	err = tegra_pmc_parse_dt(pmc, pdev->dev.of_node);
pmc              2074 drivers/soc/tegra/pmc.c 		pmc->wake = devm_ioremap_resource(&pdev->dev, res);
pmc              2075 drivers/soc/tegra/pmc.c 		if (IS_ERR(pmc->wake))
pmc              2076 drivers/soc/tegra/pmc.c 			return PTR_ERR(pmc->wake);
pmc              2078 drivers/soc/tegra/pmc.c 		pmc->wake = base;
pmc              2083 drivers/soc/tegra/pmc.c 		pmc->aotag = devm_ioremap_resource(&pdev->dev, res);
pmc              2084 drivers/soc/tegra/pmc.c 		if (IS_ERR(pmc->aotag))
pmc              2085 drivers/soc/tegra/pmc.c 			return PTR_ERR(pmc->aotag);
pmc              2087 drivers/soc/tegra/pmc.c 		pmc->aotag = base;
pmc              2092 drivers/soc/tegra/pmc.c 		pmc->scratch = devm_ioremap_resource(&pdev->dev, res);
pmc              2093 drivers/soc/tegra/pmc.c 		if (IS_ERR(pmc->scratch))
pmc              2094 drivers/soc/tegra/pmc.c 			return PTR_ERR(pmc->scratch);
pmc              2096 drivers/soc/tegra/pmc.c 		pmc->scratch = base;
pmc              2099 drivers/soc/tegra/pmc.c 	pmc->clk = devm_clk_get(&pdev->dev, "pclk");
pmc              2100 drivers/soc/tegra/pmc.c 	if (IS_ERR(pmc->clk)) {
pmc              2101 drivers/soc/tegra/pmc.c 		err = PTR_ERR(pmc->clk);
pmc              2108 drivers/soc/tegra/pmc.c 		pmc->clk = NULL;
pmc              2111 drivers/soc/tegra/pmc.c 	pmc->dev = &pdev->dev;
pmc              2113 drivers/soc/tegra/pmc.c 	tegra_pmc_init(pmc);
pmc              2115 drivers/soc/tegra/pmc.c 	tegra_pmc_init_tsense_reset(pmc);
pmc              2117 drivers/soc/tegra/pmc.c 	tegra_pmc_reset_sysfs_init(pmc);
pmc              2132 drivers/soc/tegra/pmc.c 	err = tegra_pmc_pinctrl_init(pmc);
pmc              2136 drivers/soc/tegra/pmc.c 	err = tegra_powergate_init(pmc, pdev->dev.of_node);
pmc              2140 drivers/soc/tegra/pmc.c 	err = tegra_pmc_irq_init(pmc);
pmc              2144 drivers/soc/tegra/pmc.c 	mutex_lock(&pmc->powergates_lock);
pmc              2145 drivers/soc/tegra/pmc.c 	iounmap(pmc->base);
pmc              2146 drivers/soc/tegra/pmc.c 	pmc->base = base;
pmc              2147 drivers/soc/tegra/pmc.c 	mutex_unlock(&pmc->powergates_lock);
pmc              2149 drivers/soc/tegra/pmc.c 	platform_set_drvdata(pdev, pmc);
pmc              2158 drivers/soc/tegra/pmc.c 	debugfs_remove(pmc->debugfs);
pmc              2168 drivers/soc/tegra/pmc.c 	struct tegra_pmc *pmc = dev_get_drvdata(dev);
pmc              2170 drivers/soc/tegra/pmc.c 	tegra_pmc_writel(pmc, virt_to_phys(tegra_resume), PMC_SCRATCH41);
pmc              2177 drivers/soc/tegra/pmc.c 	struct tegra_pmc *pmc = dev_get_drvdata(dev);
pmc              2179 drivers/soc/tegra/pmc.c 	tegra_pmc_writel(pmc, 0x0, PMC_SCRATCH41);
pmc              2211 drivers/soc/tegra/pmc.c static void tegra20_pmc_init(struct tegra_pmc *pmc)
pmc              2216 drivers/soc/tegra/pmc.c 	value = tegra_pmc_readl(pmc, PMC_CNTRL);
pmc              2218 drivers/soc/tegra/pmc.c 	tegra_pmc_writel(pmc, value, PMC_CNTRL);
pmc              2220 drivers/soc/tegra/pmc.c 	value = tegra_pmc_readl(pmc, PMC_CNTRL);
pmc              2222 drivers/soc/tegra/pmc.c 	if (pmc->sysclkreq_high)
pmc              2228 drivers/soc/tegra/pmc.c 	tegra_pmc_writel(pmc, value, PMC_CNTRL);
pmc              2231 drivers/soc/tegra/pmc.c 	value = tegra_pmc_readl(pmc, PMC_CNTRL);
pmc              2233 drivers/soc/tegra/pmc.c 	tegra_pmc_writel(pmc, value, PMC_CNTRL);
pmc              2236 drivers/soc/tegra/pmc.c static void tegra20_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
pmc              2242 drivers/soc/tegra/pmc.c 	value = tegra_pmc_readl(pmc, PMC_CNTRL);
pmc              2249 drivers/soc/tegra/pmc.c 	tegra_pmc_writel(pmc, value, PMC_CNTRL);
pmc              2652 drivers/soc/tegra/pmc.c static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
pmc              2663 drivers/soc/tegra/pmc.c 		dev_err(pmc->dev, "failed to find PMC wake registers\n");
pmc              2671 drivers/soc/tegra/pmc.c 		dev_err(pmc->dev, "failed to map PMC wake registers\n");
pmc              2816 drivers/soc/tegra/pmc.c static bool __init tegra_pmc_detect_tz_only(struct tegra_pmc *pmc)
pmc              2820 drivers/soc/tegra/pmc.c 	saved = readl(pmc->base + pmc->soc->regs->scratch0);
pmc              2827 drivers/soc/tegra/pmc.c 	writel(value, pmc->base + pmc->soc->regs->scratch0);
pmc              2828 drivers/soc/tegra/pmc.c 	value = readl(pmc->base + pmc->soc->regs->scratch0);
pmc              2837 drivers/soc/tegra/pmc.c 	writel(saved, pmc->base + pmc->soc->regs->scratch0);
pmc              2854 drivers/soc/tegra/pmc.c 	mutex_init(&pmc->powergates_lock);
pmc              2895 drivers/soc/tegra/pmc.c 	pmc->base = ioremap_nocache(regs.start, resource_size(&regs));
pmc              2896 drivers/soc/tegra/pmc.c 	if (!pmc->base) {
pmc              2903 drivers/soc/tegra/pmc.c 		pmc->soc = match->data;
pmc              2905 drivers/soc/tegra/pmc.c 		if (pmc->soc->maybe_tz_only)
pmc              2906 drivers/soc/tegra/pmc.c 			pmc->tz_only = tegra_pmc_detect_tz_only(pmc);
pmc              2909 drivers/soc/tegra/pmc.c 		for (i = 0; i < pmc->soc->num_powergates; i++)
pmc              2910 drivers/soc/tegra/pmc.c 			if (pmc->soc->powergates[i])
pmc              2911 drivers/soc/tegra/pmc.c 				set_bit(i, pmc->powergates_available);
pmc              2919 drivers/soc/tegra/pmc.c 		pmc->soc->setup_irq_polarity(pmc, np, invert);
pmc              2026 drivers/usb/gadget/udc/atmel_usba_udc.c 	regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN,
pmc              2032 drivers/usb/gadget/udc/atmel_usba_udc.c 	regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN, 0);
pmc              2033 drivers/usb/gadget/udc/atmel_usba_udc.c 	regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN,
pmc              2069 drivers/usb/gadget/udc/atmel_usba_udc.c 	udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9g45-pmc");
pmc              2070 drivers/usb/gadget/udc/atmel_usba_udc.c 	if (IS_ERR(udc->pmc))
pmc              2071 drivers/usb/gadget/udc/atmel_usba_udc.c 		udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9rl-pmc");
pmc              2072 drivers/usb/gadget/udc/atmel_usba_udc.c 	if (IS_ERR(udc->pmc))
pmc              2073 drivers/usb/gadget/udc/atmel_usba_udc.c 		udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9x5-pmc");
pmc              2074 drivers/usb/gadget/udc/atmel_usba_udc.c 	if (udc->errata && IS_ERR(udc->pmc))
pmc              2075 drivers/usb/gadget/udc/atmel_usba_udc.c 		return ERR_CAST(udc->pmc);
pmc               347 drivers/usb/gadget/udc/atmel_usba_udc.h 	struct regmap *pmc;
pmc                25 include/kvm/arm_pmu.h 	struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
pmc               376 net/batman-adv/multicast.c 	struct ip_mc_list *pmc;
pmc               390 net/batman-adv/multicast.c 	for (pmc = rcu_dereference(in_dev->mc_list); pmc;
pmc               391 net/batman-adv/multicast.c 	     pmc = rcu_dereference(pmc->next_rcu)) {
pmc               393 net/batman-adv/multicast.c 		    ipv4_is_local_multicast(pmc->multiaddr))
pmc               397 net/batman-adv/multicast.c 		    !ipv4_is_local_multicast(pmc->multiaddr))
pmc               400 net/batman-adv/multicast.c 		ip_eth_mc_map(pmc->multiaddr, mcast_addr);
pmc               162 net/ipv4/igmp.c static int sf_setstate(struct ip_mc_list *pmc);
pmc               163 net/ipv4/igmp.c static void sf_markstate(struct ip_mc_list *pmc);
pmc               165 net/ipv4/igmp.c static void ip_mc_clear_src(struct ip_mc_list *pmc);
pmc               177 net/ipv4/igmp.c #define for_each_pmc_rcu(in_dev, pmc)				\
pmc               178 net/ipv4/igmp.c 	for (pmc = rcu_dereference(in_dev->mc_list);		\
pmc               179 net/ipv4/igmp.c 	     pmc != NULL;					\
pmc               180 net/ipv4/igmp.c 	     pmc = rcu_dereference(pmc->next_rcu))
pmc               182 net/ipv4/igmp.c #define for_each_pmc_rtnl(in_dev, pmc)				\
pmc               183 net/ipv4/igmp.c 	for (pmc = rtnl_dereference(in_dev->mc_list);		\
pmc               184 net/ipv4/igmp.c 	     pmc != NULL;					\
pmc               185 net/ipv4/igmp.c 	     pmc = rtnl_dereference(pmc->next_rcu))
pmc               272 net/ipv4/igmp.c static int is_in(struct ip_mc_list *pmc, struct ip_sf_list *psf, int type,
pmc               280 net/ipv4/igmp.c 		if (!(pmc->gsquery && !psf->sf_gsresp)) {
pmc               281 net/ipv4/igmp.c 			if (pmc->sfmode == MCAST_INCLUDE)
pmc               288 net/ipv4/igmp.c 			return pmc->sfcount[MCAST_EXCLUDE] ==
pmc               299 net/ipv4/igmp.c 		if (pmc->sfcount[MCAST_EXCLUDE] == 0 ||
pmc               302 net/ipv4/igmp.c 		return pmc->sfcount[MCAST_EXCLUDE] ==
pmc               307 net/ipv4/igmp.c 		return (pmc->sfmode == MCAST_INCLUDE) ^ sdeleted;
pmc               309 net/ipv4/igmp.c 		if (pmc->sfmode == MCAST_INCLUDE)
pmc               317 net/ipv4/igmp.c igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted)
pmc               322 net/ipv4/igmp.c 	for (psf = pmc->sources; psf; psf = psf->sf_next) {
pmc               323 net/ipv4/igmp.c 		if (!is_in(pmc, psf, type, gdeleted, sdeleted))
pmc               429 net/ipv4/igmp.c static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel)
pmc               431 net/ipv4/igmp.c 	return sizeof(struct igmpv3_grec) + 4*igmp_scount(pmc, type, gdel, sdel);
pmc               434 net/ipv4/igmp.c static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc,
pmc               437 net/ipv4/igmp.c 	struct net_device *dev = pmc->interface->dev;
pmc               450 net/ipv4/igmp.c 	pgr->grec_mca = pmc->multiaddr;
pmc               459 net/ipv4/igmp.c static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
pmc               462 net/ipv4/igmp.c 	struct net_device *dev = pmc->interface->dev;
pmc               470 net/ipv4/igmp.c 	if (pmc->multiaddr == IGMP_ALL_HOSTS)
pmc               472 net/ipv4/igmp.c 	if (ipv4_is_local_multicast(pmc->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports)
pmc               486 net/ipv4/igmp.c 	psf_list = sdeleted ? &pmc->tomb : &pmc->sources;
pmc               496 net/ipv4/igmp.c 		    AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
pmc               509 net/ipv4/igmp.c 		if (!is_in(pmc, psf, type, gdeleted, sdeleted)) {
pmc               517 net/ipv4/igmp.c 		if (((gdeleted && pmc->sfmode == MCAST_EXCLUDE) ||
pmc               518 net/ipv4/igmp.c 		     (!gdeleted && pmc->crcount)) &&
pmc               540 net/ipv4/igmp.c 			skb = add_grhead(skb, pmc, type, &pgr, mtu);
pmc               569 net/ipv4/igmp.c 		if (pmc->crcount || isquery) {
pmc               575 net/ipv4/igmp.c 			skb = add_grhead(skb, pmc, type, &pgr, mtu);
pmc               582 net/ipv4/igmp.c 		pmc->gsquery = 0;	/* clear query state on report */
pmc               586 net/ipv4/igmp.c static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc)
pmc               592 net/ipv4/igmp.c 	if (!pmc) {
pmc               594 net/ipv4/igmp.c 		for_each_pmc_rcu(in_dev, pmc) {
pmc               595 net/ipv4/igmp.c 			if (pmc->multiaddr == IGMP_ALL_HOSTS)
pmc               597 net/ipv4/igmp.c 			if (ipv4_is_local_multicast(pmc->multiaddr) &&
pmc               600 net/ipv4/igmp.c 			spin_lock_bh(&pmc->lock);
pmc               601 net/ipv4/igmp.c 			if (pmc->sfcount[MCAST_EXCLUDE])
pmc               605 net/ipv4/igmp.c 			skb = add_grec(skb, pmc, type, 0, 0);
pmc               606 net/ipv4/igmp.c 			spin_unlock_bh(&pmc->lock);
pmc               610 net/ipv4/igmp.c 		spin_lock_bh(&pmc->lock);
pmc               611 net/ipv4/igmp.c 		if (pmc->sfcount[MCAST_EXCLUDE])
pmc               615 net/ipv4/igmp.c 		skb = add_grec(skb, pmc, type, 0, 0);
pmc               616 net/ipv4/igmp.c 		spin_unlock_bh(&pmc->lock);
pmc               644 net/ipv4/igmp.c static void kfree_pmc(struct ip_mc_list *pmc)
pmc               646 net/ipv4/igmp.c 	ip_sf_list_clear_all(pmc->sources);
pmc               647 net/ipv4/igmp.c 	ip_sf_list_clear_all(pmc->tomb);
pmc               648 net/ipv4/igmp.c 	kfree(pmc);
pmc               653 net/ipv4/igmp.c 	struct ip_mc_list *pmc, *pmc_prev, *pmc_next;
pmc               662 net/ipv4/igmp.c 	for (pmc = in_dev->mc_tomb; pmc; pmc = pmc_next) {
pmc               663 net/ipv4/igmp.c 		pmc_next = pmc->next;
pmc               664 net/ipv4/igmp.c 		if (pmc->sfmode == MCAST_INCLUDE) {
pmc               667 net/ipv4/igmp.c 			skb = add_grec(skb, pmc, type, 1, 0);
pmc               668 net/ipv4/igmp.c 			skb = add_grec(skb, pmc, dtype, 1, 1);
pmc               670 net/ipv4/igmp.c 		if (pmc->crcount) {
pmc               671 net/ipv4/igmp.c 			if (pmc->sfmode == MCAST_EXCLUDE) {
pmc               673 net/ipv4/igmp.c 				skb = add_grec(skb, pmc, type, 1, 0);
pmc               675 net/ipv4/igmp.c 			pmc->crcount--;
pmc               676 net/ipv4/igmp.c 			if (pmc->crcount == 0) {
pmc               677 net/ipv4/igmp.c 				igmpv3_clear_zeros(&pmc->tomb);
pmc               678 net/ipv4/igmp.c 				igmpv3_clear_zeros(&pmc->sources);
pmc               681 net/ipv4/igmp.c 		if (pmc->crcount == 0 && !pmc->tomb && !pmc->sources) {
pmc               686 net/ipv4/igmp.c 			in_dev_put(pmc->interface);
pmc               687 net/ipv4/igmp.c 			kfree_pmc(pmc);
pmc               689 net/ipv4/igmp.c 			pmc_prev = pmc;
pmc               694 net/ipv4/igmp.c 	for_each_pmc_rcu(in_dev, pmc) {
pmc               695 net/ipv4/igmp.c 		spin_lock_bh(&pmc->lock);
pmc               696 net/ipv4/igmp.c 		if (pmc->sfcount[MCAST_EXCLUDE]) {
pmc               703 net/ipv4/igmp.c 		skb = add_grec(skb, pmc, type, 0, 0);
pmc               704 net/ipv4/igmp.c 		skb = add_grec(skb, pmc, dtype, 0, 1);	/* deleted sources */
pmc               707 net/ipv4/igmp.c 		if (pmc->crcount) {
pmc               708 net/ipv4/igmp.c 			if (pmc->sfmode == MCAST_EXCLUDE)
pmc               712 net/ipv4/igmp.c 			skb = add_grec(skb, pmc, type, 0, 0);
pmc               713 net/ipv4/igmp.c 			pmc->crcount--;
pmc               715 net/ipv4/igmp.c 		spin_unlock_bh(&pmc->lock);
pmc               724 net/ipv4/igmp.c static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
pmc               733 net/ipv4/igmp.c 	__be32	group = pmc ? pmc->multiaddr : 0;
pmc               739 net/ipv4/igmp.c 		return igmpv3_send_report(in_dev, pmc);
pmc               853 net/ipv4/igmp.c static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
pmc               859 net/ipv4/igmp.c 	for (psf = pmc->sources; psf; psf = psf->sf_next) {
pmc               865 net/ipv4/igmp.c 			    pmc->sfcount[MCAST_EXCLUDE] !=
pmc               874 net/ipv4/igmp.c 	pmc->gsquery = 0;
pmc               880 net/ipv4/igmp.c static int igmp_marksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
pmc               885 net/ipv4/igmp.c 	if (pmc->sfmode == MCAST_EXCLUDE)
pmc               886 net/ipv4/igmp.c 		return igmp_xmarksources(pmc, nsrcs, srcs);
pmc               890 net/ipv4/igmp.c 	for (psf = pmc->sources; psf; psf = psf->sf_next) {
pmc               901 net/ipv4/igmp.c 		pmc->gsquery = 0;
pmc               904 net/ipv4/igmp.c 	pmc->gsquery = 1;
pmc              1167 net/ipv4/igmp.c 	struct ip_mc_list *pmc;
pmc              1176 net/ipv4/igmp.c 	pmc = kzalloc(sizeof(*pmc), gfp);
pmc              1177 net/ipv4/igmp.c 	if (!pmc)
pmc              1179 net/ipv4/igmp.c 	spin_lock_init(&pmc->lock);
pmc              1181 net/ipv4/igmp.c 	pmc->interface = im->interface;
pmc              1183 net/ipv4/igmp.c 	pmc->multiaddr = im->multiaddr;
pmc              1184 net/ipv4/igmp.c 	pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
pmc              1185 net/ipv4/igmp.c 	pmc->sfmode = im->sfmode;
pmc              1186 net/ipv4/igmp.c 	if (pmc->sfmode == MCAST_INCLUDE) {
pmc              1189 net/ipv4/igmp.c 		pmc->tomb = im->tomb;
pmc              1190 net/ipv4/igmp.c 		pmc->sources = im->sources;
pmc              1192 net/ipv4/igmp.c 		for (psf = pmc->sources; psf; psf = psf->sf_next)
pmc              1193 net/ipv4/igmp.c 			psf->sf_crcount = pmc->crcount;
pmc              1198 net/ipv4/igmp.c 	pmc->next = in_dev->mc_tomb;
pmc              1199 net/ipv4/igmp.c 	in_dev->mc_tomb = pmc;
pmc              1208 net/ipv4/igmp.c 	struct ip_mc_list *pmc, *pmc_prev;
pmc              1215 net/ipv4/igmp.c 	for (pmc = in_dev->mc_tomb; pmc; pmc = pmc->next) {
pmc              1216 net/ipv4/igmp.c 		if (pmc->multiaddr == multiaddr)
pmc              1218 net/ipv4/igmp.c 		pmc_prev = pmc;
pmc              1220 net/ipv4/igmp.c 	if (pmc) {
pmc              1222 net/ipv4/igmp.c 			pmc_prev->next = pmc->next;
pmc              1224 net/ipv4/igmp.c 			in_dev->mc_tomb = pmc->next;
pmc              1229 net/ipv4/igmp.c 	if (pmc) {
pmc              1230 net/ipv4/igmp.c 		im->interface = pmc->interface;
pmc              1232 net/ipv4/igmp.c 			swap(im->tomb, pmc->tomb);
pmc              1233 net/ipv4/igmp.c 			swap(im->sources, pmc->sources);
pmc              1239 net/ipv4/igmp.c 		in_dev_put(pmc->interface);
pmc              1240 net/ipv4/igmp.c 		kfree_pmc(pmc);
pmc              1250 net/ipv4/igmp.c 	struct ip_mc_list *pmc, *nextpmc;
pmc              1253 net/ipv4/igmp.c 	pmc = in_dev->mc_tomb;
pmc              1257 net/ipv4/igmp.c 	for (; pmc; pmc = nextpmc) {
pmc              1258 net/ipv4/igmp.c 		nextpmc = pmc->next;
pmc              1259 net/ipv4/igmp.c 		ip_mc_clear_src(pmc);
pmc              1260 net/ipv4/igmp.c 		in_dev_put(pmc->interface);
pmc              1261 net/ipv4/igmp.c 		kfree_pmc(pmc);
pmc              1265 net/ipv4/igmp.c 	for_each_pmc_rcu(in_dev, pmc) {
pmc              1268 net/ipv4/igmp.c 		spin_lock_bh(&pmc->lock);
pmc              1269 net/ipv4/igmp.c 		psf = pmc->tomb;
pmc              1270 net/ipv4/igmp.c 		pmc->tomb = NULL;
pmc              1271 net/ipv4/igmp.c 		spin_unlock_bh(&pmc->lock);
pmc              1695 net/ipv4/igmp.c 	struct ip_mc_list *pmc;
pmc              1699 net/ipv4/igmp.c 	for_each_pmc_rtnl(in_dev, pmc)
pmc              1700 net/ipv4/igmp.c 		igmp_group_dropped(pmc);
pmc              1705 net/ipv4/igmp.c 	struct ip_mc_list *pmc;
pmc              1709 net/ipv4/igmp.c 	for_each_pmc_rtnl(in_dev, pmc) {
pmc              1711 net/ipv4/igmp.c 		igmpv3_del_delrec(in_dev, pmc);
pmc              1713 net/ipv4/igmp.c 		igmp_group_added(pmc);
pmc              1721 net/ipv4/igmp.c 	struct ip_mc_list *pmc;
pmc              1725 net/ipv4/igmp.c 	for_each_pmc_rtnl(in_dev, pmc)
pmc              1726 net/ipv4/igmp.c 		igmp_group_dropped(pmc);
pmc              1772 net/ipv4/igmp.c 	struct ip_mc_list *pmc;
pmc              1779 net/ipv4/igmp.c 	for_each_pmc_rtnl(in_dev, pmc) {
pmc              1781 net/ipv4/igmp.c 		igmpv3_del_delrec(in_dev, pmc);
pmc              1783 net/ipv4/igmp.c 		igmp_group_added(pmc);
pmc              1846 net/ipv4/igmp.c static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode,
pmc              1853 net/ipv4/igmp.c 	for (psf = pmc->sources; psf; psf = psf->sf_next) {
pmc              1864 net/ipv4/igmp.c 		ip_rt_multicast_event(pmc->interface);
pmc              1868 net/ipv4/igmp.c 		struct in_device *in_dev = pmc->interface;
pmc              1876 net/ipv4/igmp.c 			pmc->sources = psf->sf_next;
pmc              1881 net/ipv4/igmp.c 			psf->sf_next = pmc->tomb;
pmc              1882 net/ipv4/igmp.c 			pmc->tomb = psf;
pmc              1898 net/ipv4/igmp.c 	struct ip_mc_list *pmc;
pmc              1905 net/ipv4/igmp.c 	for_each_pmc_rcu(in_dev, pmc) {
pmc              1906 net/ipv4/igmp.c 		if (*pmca == pmc->multiaddr)
pmc              1909 net/ipv4/igmp.c 	if (!pmc) {
pmc              1914 net/ipv4/igmp.c 	spin_lock_bh(&pmc->lock);
pmc              1917 net/ipv4/igmp.c 	sf_markstate(pmc);
pmc              1921 net/ipv4/igmp.c 		if (!pmc->sfcount[sfmode])
pmc              1923 net/ipv4/igmp.c 		pmc->sfcount[sfmode]--;
pmc              1927 net/ipv4/igmp.c 		int rv = ip_mc_del1_src(pmc, sfmode, &psfsrc[i]);
pmc              1933 net/ipv4/igmp.c 	if (pmc->sfmode == MCAST_EXCLUDE &&
pmc              1934 net/ipv4/igmp.c 	    pmc->sfcount[MCAST_EXCLUDE] == 0 &&
pmc              1935 net/ipv4/igmp.c 	    pmc->sfcount[MCAST_INCLUDE]) {
pmc              1942 net/ipv4/igmp.c 		pmc->sfmode = MCAST_INCLUDE;
pmc              1944 net/ipv4/igmp.c 		pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
pmc              1945 net/ipv4/igmp.c 		in_dev->mr_ifc_count = pmc->crcount;
pmc              1946 net/ipv4/igmp.c 		for (psf = pmc->sources; psf; psf = psf->sf_next)
pmc              1948 net/ipv4/igmp.c 		igmp_ifc_event(pmc->interface);
pmc              1949 net/ipv4/igmp.c 	} else if (sf_setstate(pmc) || changerec) {
pmc              1950 net/ipv4/igmp.c 		igmp_ifc_event(pmc->interface);
pmc              1954 net/ipv4/igmp.c 	spin_unlock_bh(&pmc->lock);
pmc              1961 net/ipv4/igmp.c static int ip_mc_add1_src(struct ip_mc_list *pmc, int sfmode,
pmc              1967 net/ipv4/igmp.c 	for (psf = pmc->sources; psf; psf = psf->sf_next) {
pmc              1980 net/ipv4/igmp.c 			pmc->sources = psf;
pmc              1984 net/ipv4/igmp.c 		ip_rt_multicast_event(pmc->interface);
pmc              1990 net/ipv4/igmp.c static void sf_markstate(struct ip_mc_list *pmc)
pmc              1993 net/ipv4/igmp.c 	int mca_xcount = pmc->sfcount[MCAST_EXCLUDE];
pmc              1995 net/ipv4/igmp.c 	for (psf = pmc->sources; psf; psf = psf->sf_next)
pmc              1996 net/ipv4/igmp.c 		if (pmc->sfcount[MCAST_EXCLUDE]) {
pmc              2004 net/ipv4/igmp.c static int sf_setstate(struct ip_mc_list *pmc)
pmc              2007 net/ipv4/igmp.c 	int mca_xcount = pmc->sfcount[MCAST_EXCLUDE];
pmc              2008 net/ipv4/igmp.c 	int qrv = pmc->interface->mr_qrv;
pmc              2012 net/ipv4/igmp.c 	for (psf = pmc->sources; psf; psf = psf->sf_next) {
pmc              2013 net/ipv4/igmp.c 		if (pmc->sfcount[MCAST_EXCLUDE]) {
pmc              2022 net/ipv4/igmp.c 				for (dpsf = pmc->tomb; dpsf; dpsf = dpsf->sf_next) {
pmc              2031 net/ipv4/igmp.c 						pmc->tomb = dpsf->sf_next;
pmc              2044 net/ipv4/igmp.c 			for (dpsf = pmc->tomb; dpsf; dpsf = dpsf->sf_next)
pmc              2053 net/ipv4/igmp.c 				dpsf->sf_next = pmc->tomb;
pmc              2054 net/ipv4/igmp.c 				pmc->tomb = dpsf;
pmc              2070 net/ipv4/igmp.c 	struct ip_mc_list *pmc;
pmc              2077 net/ipv4/igmp.c 	for_each_pmc_rcu(in_dev, pmc) {
pmc              2078 net/ipv4/igmp.c 		if (*pmca == pmc->multiaddr)
pmc              2081 net/ipv4/igmp.c 	if (!pmc) {
pmc              2086 net/ipv4/igmp.c 	spin_lock_bh(&pmc->lock);
pmc              2090 net/ipv4/igmp.c 	sf_markstate(pmc);
pmc              2092 net/ipv4/igmp.c 	isexclude = pmc->sfmode == MCAST_EXCLUDE;
pmc              2094 net/ipv4/igmp.c 		pmc->sfcount[sfmode]++;
pmc              2097 net/ipv4/igmp.c 		err = ip_mc_add1_src(pmc, sfmode, &psfsrc[i]);
pmc              2105 net/ipv4/igmp.c 			pmc->sfcount[sfmode]--;
pmc              2107 net/ipv4/igmp.c 			(void) ip_mc_del1_src(pmc, sfmode, &psfsrc[j]);
pmc              2108 net/ipv4/igmp.c 	} else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) {
pmc              2111 net/ipv4/igmp.c 		struct net *net = dev_net(pmc->interface->dev);
pmc              2112 net/ipv4/igmp.c 		in_dev = pmc->interface;
pmc              2116 net/ipv4/igmp.c 		if (pmc->sfcount[MCAST_EXCLUDE])
pmc              2117 net/ipv4/igmp.c 			pmc->sfmode = MCAST_EXCLUDE;
pmc              2118 net/ipv4/igmp.c 		else if (pmc->sfcount[MCAST_INCLUDE])
pmc              2119 net/ipv4/igmp.c 			pmc->sfmode = MCAST_INCLUDE;
pmc              2123 net/ipv4/igmp.c 		pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
pmc              2124 net/ipv4/igmp.c 		in_dev->mr_ifc_count = pmc->crcount;
pmc              2125 net/ipv4/igmp.c 		for (psf = pmc->sources; psf; psf = psf->sf_next)
pmc              2128 net/ipv4/igmp.c 	} else if (sf_setstate(pmc)) {
pmc              2132 net/ipv4/igmp.c 	spin_unlock_bh(&pmc->lock);
pmc              2136 net/ipv4/igmp.c static void ip_mc_clear_src(struct ip_mc_list *pmc)
pmc              2140 net/ipv4/igmp.c 	spin_lock_bh(&pmc->lock);
pmc              2141 net/ipv4/igmp.c 	tomb = pmc->tomb;
pmc              2142 net/ipv4/igmp.c 	pmc->tomb = NULL;
pmc              2143 net/ipv4/igmp.c 	sources = pmc->sources;
pmc              2144 net/ipv4/igmp.c 	pmc->sources = NULL;
pmc              2145 net/ipv4/igmp.c 	pmc->sfmode = MCAST_EXCLUDE;
pmc              2146 net/ipv4/igmp.c 	pmc->sfcount[MCAST_INCLUDE] = 0;
pmc              2147 net/ipv4/igmp.c 	pmc->sfcount[MCAST_EXCLUDE] = 1;
pmc              2148 net/ipv4/igmp.c 	spin_unlock_bh(&pmc->lock);
pmc              2296 net/ipv4/igmp.c 	struct ip_mc_socklist *pmc;
pmc              2320 net/ipv4/igmp.c 	for_each_pmc_rtnl(inet, pmc) {
pmc              2321 net/ipv4/igmp.c 		if ((pmc->multi.imr_multiaddr.s_addr ==
pmc              2323 net/ipv4/igmp.c 		    (pmc->multi.imr_ifindex == imr.imr_ifindex))
pmc              2326 net/ipv4/igmp.c 	if (!pmc) {		/* must have a prior join */
pmc              2331 net/ipv4/igmp.c 	if (pmc->sflist) {
pmc              2332 net/ipv4/igmp.c 		if (pmc->sfmode != omode) {
pmc              2336 net/ipv4/igmp.c 	} else if (pmc->sfmode != omode) {
pmc              2339 net/ipv4/igmp.c 		ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, pmc->sfmode, 0,
pmc              2341 net/ipv4/igmp.c 		pmc->sfmode = omode;
pmc              2344 net/ipv4/igmp.c 	psl = rtnl_dereference(pmc->sflist);
pmc              2400 net/ipv4/igmp.c 		rcu_assign_pointer(pmc->sflist, newpsl);
pmc              2431 net/ipv4/igmp.c 	struct ip_mc_socklist *pmc;
pmc              2462 net/ipv4/igmp.c 	for_each_pmc_rtnl(inet, pmc) {
pmc              2463 net/ipv4/igmp.c 		if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
pmc              2464 net/ipv4/igmp.c 		    pmc->multi.imr_ifindex == imr.imr_ifindex)
pmc              2467 net/ipv4/igmp.c 	if (!pmc) {		/* must have a prior join */
pmc              2492 net/ipv4/igmp.c 	psl = rtnl_dereference(pmc->sflist);
pmc              2494 net/ipv4/igmp.c 		(void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
pmc              2500 net/ipv4/igmp.c 		(void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
pmc              2502 net/ipv4/igmp.c 	rcu_assign_pointer(pmc->sflist, newpsl);
pmc              2503 net/ipv4/igmp.c 	pmc->sfmode = msf->imsf_fmode;
pmc              2517 net/ipv4/igmp.c 	struct ip_mc_socklist *pmc;
pmc              2539 net/ipv4/igmp.c 	for_each_pmc_rtnl(inet, pmc) {
pmc              2540 net/ipv4/igmp.c 		if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
pmc              2541 net/ipv4/igmp.c 		    pmc->multi.imr_ifindex == imr.imr_ifindex)
pmc              2544 net/ipv4/igmp.c 	if (!pmc)		/* must have a prior join */
pmc              2546 net/ipv4/igmp.c 	msf->imsf_fmode = pmc->sfmode;
pmc              2547 net/ipv4/igmp.c 	psl = rtnl_dereference(pmc->sflist);
pmc              2575 net/ipv4/igmp.c 	struct ip_mc_socklist *pmc;
pmc              2590 net/ipv4/igmp.c 	for_each_pmc_rtnl(inet, pmc) {
pmc              2591 net/ipv4/igmp.c 		if (pmc->multi.imr_multiaddr.s_addr == addr &&
pmc              2592 net/ipv4/igmp.c 		    pmc->multi.imr_ifindex == gsf->gf_interface)
pmc              2595 net/ipv4/igmp.c 	if (!pmc)		/* must have a prior join */
pmc              2597 net/ipv4/igmp.c 	gsf->gf_fmode = pmc->sfmode;
pmc              2598 net/ipv4/igmp.c 	psl = rtnl_dereference(pmc->sflist);
pmc              2628 net/ipv4/igmp.c 	struct ip_mc_socklist *pmc;
pmc              2638 net/ipv4/igmp.c 	for_each_pmc_rcu(inet, pmc) {
pmc              2639 net/ipv4/igmp.c 		if (pmc->multi.imr_multiaddr.s_addr == loc_addr &&
pmc              2640 net/ipv4/igmp.c 		    (pmc->multi.imr_ifindex == dif ||
pmc              2641 net/ipv4/igmp.c 		     (sdif && pmc->multi.imr_ifindex == sdif)))
pmc              2645 net/ipv4/igmp.c 	if (!pmc)
pmc              2647 net/ipv4/igmp.c 	psl = rcu_dereference(pmc->sflist);
pmc              2648 net/ipv4/igmp.c 	ret = (pmc->sfmode == MCAST_EXCLUDE);
pmc              2657 net/ipv4/igmp.c 	if (pmc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
pmc              2659 net/ipv4/igmp.c 	if (pmc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
pmc                79 net/ipv6/mcast.c static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
pmc                80 net/ipv6/mcast.c static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
pmc                83 net/ipv6/mcast.c static int sf_setstate(struct ifmcaddr6 *pmc);
pmc                84 net/ipv6/mcast.c static void sf_markstate(struct ifmcaddr6 *pmc);
pmc                85 net/ipv6/mcast.c static void ip6_mc_clear_src(struct ifmcaddr6 *pmc);
pmc               116 net/ipv6/mcast.c #define for_each_pmc_rcu(np, pmc)				\
pmc               117 net/ipv6/mcast.c 	for (pmc = rcu_dereference(np->ipv6_mc_list);		\
pmc               118 net/ipv6/mcast.c 	     pmc != NULL;					\
pmc               119 net/ipv6/mcast.c 	     pmc = rcu_dereference(pmc->next))
pmc               332 net/ipv6/mcast.c 	struct ipv6_mc_socklist *pmc;
pmc               357 net/ipv6/mcast.c 	for_each_pmc_rcu(inet6, pmc) {
pmc               358 net/ipv6/mcast.c 		if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface)
pmc               360 net/ipv6/mcast.c 		if (ipv6_addr_equal(&pmc->addr, group))
pmc               363 net/ipv6/mcast.c 	if (!pmc) {		/* must have a prior join */
pmc               368 net/ipv6/mcast.c 	if (pmc->sflist) {
pmc               369 net/ipv6/mcast.c 		if (pmc->sfmode != omode) {
pmc               373 net/ipv6/mcast.c 	} else if (pmc->sfmode != omode) {
pmc               376 net/ipv6/mcast.c 		ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
pmc               377 net/ipv6/mcast.c 		pmc->sfmode = omode;
pmc               380 net/ipv6/mcast.c 	write_lock(&pmc->sflock);
pmc               383 net/ipv6/mcast.c 	psl = pmc->sflist;
pmc               435 net/ipv6/mcast.c 		pmc->sflist = psl = newpsl;
pmc               452 net/ipv6/mcast.c 		write_unlock(&pmc->sflock);
pmc               463 net/ipv6/mcast.c 	struct ipv6_mc_socklist *pmc;
pmc               494 net/ipv6/mcast.c 	for_each_pmc_rcu(inet6, pmc) {
pmc               495 net/ipv6/mcast.c 		if (pmc->ifindex != gsf->gf_interface)
pmc               497 net/ipv6/mcast.c 		if (ipv6_addr_equal(&pmc->addr, group))
pmc               500 net/ipv6/mcast.c 	if (!pmc) {		/* must have a prior join */
pmc               529 net/ipv6/mcast.c 	write_lock(&pmc->sflock);
pmc               530 net/ipv6/mcast.c 	psl = pmc->sflist;
pmc               532 net/ipv6/mcast.c 		(void) ip6_mc_del_src(idev, group, pmc->sfmode,
pmc               536 net/ipv6/mcast.c 		(void) ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
pmc               537 net/ipv6/mcast.c 	pmc->sflist = newpsl;
pmc               538 net/ipv6/mcast.c 	pmc->sfmode = gsf->gf_fmode;
pmc               539 net/ipv6/mcast.c 	write_unlock(&pmc->sflock);
pmc               554 net/ipv6/mcast.c 	struct ipv6_mc_socklist *pmc;
pmc               579 net/ipv6/mcast.c 	for_each_pmc_rcu(inet6, pmc) {
pmc               580 net/ipv6/mcast.c 		if (pmc->ifindex != gsf->gf_interface)
pmc               582 net/ipv6/mcast.c 		if (ipv6_addr_equal(group, &pmc->addr))
pmc               585 net/ipv6/mcast.c 	if (!pmc)		/* must have a prior join */
pmc               587 net/ipv6/mcast.c 	gsf->gf_fmode = pmc->sfmode;
pmc               588 net/ipv6/mcast.c 	psl = pmc->sflist;
pmc               729 net/ipv6/mcast.c 	struct ifmcaddr6 *pmc;
pmc               737 net/ipv6/mcast.c 	pmc = kzalloc(sizeof(*pmc), GFP_ATOMIC);
pmc               738 net/ipv6/mcast.c 	if (!pmc)
pmc               742 net/ipv6/mcast.c 	spin_lock_init(&pmc->mca_lock);
pmc               743 net/ipv6/mcast.c 	pmc->idev = im->idev;
pmc               745 net/ipv6/mcast.c 	pmc->mca_addr = im->mca_addr;
pmc               746 net/ipv6/mcast.c 	pmc->mca_crcount = idev->mc_qrv;
pmc               747 net/ipv6/mcast.c 	pmc->mca_sfmode = im->mca_sfmode;
pmc               748 net/ipv6/mcast.c 	if (pmc->mca_sfmode == MCAST_INCLUDE) {
pmc               751 net/ipv6/mcast.c 		pmc->mca_tomb = im->mca_tomb;
pmc               752 net/ipv6/mcast.c 		pmc->mca_sources = im->mca_sources;
pmc               754 net/ipv6/mcast.c 		for (psf = pmc->mca_sources; psf; psf = psf->sf_next)
pmc               755 net/ipv6/mcast.c 			psf->sf_crcount = pmc->mca_crcount;
pmc               760 net/ipv6/mcast.c 	pmc->next = idev->mc_tomb;
pmc               761 net/ipv6/mcast.c 	idev->mc_tomb = pmc;
pmc               767 net/ipv6/mcast.c 	struct ifmcaddr6 *pmc, *pmc_prev;
pmc               773 net/ipv6/mcast.c 	for (pmc = idev->mc_tomb; pmc; pmc = pmc->next) {
pmc               774 net/ipv6/mcast.c 		if (ipv6_addr_equal(&pmc->mca_addr, pmca))
pmc               776 net/ipv6/mcast.c 		pmc_prev = pmc;
pmc               778 net/ipv6/mcast.c 	if (pmc) {
pmc               780 net/ipv6/mcast.c 			pmc_prev->next = pmc->next;
pmc               782 net/ipv6/mcast.c 			idev->mc_tomb = pmc->next;
pmc               787 net/ipv6/mcast.c 	if (pmc) {
pmc               788 net/ipv6/mcast.c 		im->idev = pmc->idev;
pmc               790 net/ipv6/mcast.c 			swap(im->mca_tomb, pmc->mca_tomb);
pmc               791 net/ipv6/mcast.c 			swap(im->mca_sources, pmc->mca_sources);
pmc               797 net/ipv6/mcast.c 		in6_dev_put(pmc->idev);
pmc               798 net/ipv6/mcast.c 		ip6_mc_clear_src(pmc);
pmc               799 net/ipv6/mcast.c 		kfree(pmc);
pmc               806 net/ipv6/mcast.c 	struct ifmcaddr6 *pmc, *nextpmc;
pmc               809 net/ipv6/mcast.c 	pmc = idev->mc_tomb;
pmc               813 net/ipv6/mcast.c 	for (; pmc; pmc = nextpmc) {
pmc               814 net/ipv6/mcast.c 		nextpmc = pmc->next;
pmc               815 net/ipv6/mcast.c 		ip6_mc_clear_src(pmc);
pmc               816 net/ipv6/mcast.c 		in6_dev_put(pmc->idev);
pmc               817 net/ipv6/mcast.c 		kfree(pmc);
pmc               822 net/ipv6/mcast.c 	for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
pmc               825 net/ipv6/mcast.c 		spin_lock_bh(&pmc->mca_lock);
pmc               826 net/ipv6/mcast.c 		psf = pmc->mca_tomb;
pmc               827 net/ipv6/mcast.c 		pmc->mca_tomb = NULL;
pmc               828 net/ipv6/mcast.c 		spin_unlock_bh(&pmc->mca_lock);
pmc              1106 net/ipv6/mcast.c static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
pmc              1113 net/ipv6/mcast.c 	for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
pmc              1119 net/ipv6/mcast.c 			    pmc->mca_sfcount[MCAST_EXCLUDE] !=
pmc              1128 net/ipv6/mcast.c 	pmc->mca_flags &= ~MAF_GSQUERY;
pmc              1134 net/ipv6/mcast.c static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
pmc              1140 net/ipv6/mcast.c 	if (pmc->mca_sfmode == MCAST_EXCLUDE)
pmc              1141 net/ipv6/mcast.c 		return mld_xmarksources(pmc, nsrcs, srcs);
pmc              1146 net/ipv6/mcast.c 	for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
pmc              1158 net/ipv6/mcast.c 		pmc->mca_flags &= ~MAF_GSQUERY;
pmc              1161 net/ipv6/mcast.c 	pmc->mca_flags |= MAF_GSQUERY;
pmc              1506 net/ipv6/mcast.c static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
pmc              1514 net/ipv6/mcast.c 		if (!((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp)) {
pmc              1515 net/ipv6/mcast.c 			if (pmc->mca_sfmode == MCAST_INCLUDE)
pmc              1522 net/ipv6/mcast.c 			return pmc->mca_sfcount[MCAST_EXCLUDE] ==
pmc              1533 net/ipv6/mcast.c 		if (pmc->mca_sfcount[MCAST_EXCLUDE] == 0 ||
pmc              1536 net/ipv6/mcast.c 		return pmc->mca_sfcount[MCAST_EXCLUDE] ==
pmc              1541 net/ipv6/mcast.c 		return (pmc->mca_sfmode == MCAST_INCLUDE) ^ sdeleted;
pmc              1543 net/ipv6/mcast.c 		if (pmc->mca_sfmode == MCAST_INCLUDE)
pmc              1551 net/ipv6/mcast.c mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted)
pmc              1556 net/ipv6/mcast.c 	for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
pmc              1557 net/ipv6/mcast.c 		if (!is_in(pmc, psf, type, gdeleted, sdeleted))
pmc              1701 net/ipv6/mcast.c static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel)
pmc              1703 net/ipv6/mcast.c 	return sizeof(struct mld2_grec) + 16 * mld_scount(pmc,type,gdel,sdel);
pmc              1706 net/ipv6/mcast.c static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
pmc              1713 net/ipv6/mcast.c 		skb = mld_newpack(pmc->idev, mtu);
pmc              1721 net/ipv6/mcast.c 	pgr->grec_mca = pmc->mca_addr;	/* structure copy */
pmc              1730 net/ipv6/mcast.c static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
pmc              1733 net/ipv6/mcast.c 	struct inet6_dev *idev = pmc->idev;
pmc              1741 net/ipv6/mcast.c 	if (pmc->mca_flags & MAF_NOREPORT)
pmc              1755 net/ipv6/mcast.c 	psf_list = sdeleted ? &pmc->mca_tomb : &pmc->mca_sources;
pmc              1765 net/ipv6/mcast.c 		    AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
pmc              1778 net/ipv6/mcast.c 		if (!is_in(pmc, psf, type, gdeleted, sdeleted) && !crsend) {
pmc              1786 net/ipv6/mcast.c 		if (((gdeleted && pmc->mca_sfmode == MCAST_EXCLUDE) ||
pmc              1787 net/ipv6/mcast.c 		     (!gdeleted && pmc->mca_crcount)) &&
pmc              1809 net/ipv6/mcast.c 			skb = add_grhead(skb, pmc, type, &pgr, mtu);
pmc              1838 net/ipv6/mcast.c 		if (pmc->mca_crcount || isquery || crsend) {
pmc              1844 net/ipv6/mcast.c 			skb = add_grhead(skb, pmc, type, &pgr, mtu);
pmc              1851 net/ipv6/mcast.c 		pmc->mca_flags &= ~MAF_GSQUERY;	/* clear query state */
pmc              1855 net/ipv6/mcast.c static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
pmc              1861 net/ipv6/mcast.c 	if (!pmc) {
pmc              1862 net/ipv6/mcast.c 		for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
pmc              1863 net/ipv6/mcast.c 			if (pmc->mca_flags & MAF_NOREPORT)
pmc              1865 net/ipv6/mcast.c 			spin_lock_bh(&pmc->mca_lock);
pmc              1866 net/ipv6/mcast.c 			if (pmc->mca_sfcount[MCAST_EXCLUDE])
pmc              1870 net/ipv6/mcast.c 			skb = add_grec(skb, pmc, type, 0, 0, 0);
pmc              1871 net/ipv6/mcast.c 			spin_unlock_bh(&pmc->mca_lock);
pmc              1874 net/ipv6/mcast.c 		spin_lock_bh(&pmc->mca_lock);
pmc              1875 net/ipv6/mcast.c 		if (pmc->mca_sfcount[MCAST_EXCLUDE])
pmc              1879 net/ipv6/mcast.c 		skb = add_grec(skb, pmc, type, 0, 0, 0);
pmc              1880 net/ipv6/mcast.c 		spin_unlock_bh(&pmc->mca_lock);
pmc              1910 net/ipv6/mcast.c 	struct ifmcaddr6 *pmc, *pmc_prev, *pmc_next;
pmc              1919 net/ipv6/mcast.c 	for (pmc = idev->mc_tomb; pmc; pmc = pmc_next) {
pmc              1920 net/ipv6/mcast.c 		pmc_next = pmc->next;
pmc              1921 net/ipv6/mcast.c 		if (pmc->mca_sfmode == MCAST_INCLUDE) {
pmc              1924 net/ipv6/mcast.c 			skb = add_grec(skb, pmc, type, 1, 0, 0);
pmc              1925 net/ipv6/mcast.c 			skb = add_grec(skb, pmc, dtype, 1, 1, 0);
pmc              1927 net/ipv6/mcast.c 		if (pmc->mca_crcount) {
pmc              1928 net/ipv6/mcast.c 			if (pmc->mca_sfmode == MCAST_EXCLUDE) {
pmc              1930 net/ipv6/mcast.c 				skb = add_grec(skb, pmc, type, 1, 0, 0);
pmc              1932 net/ipv6/mcast.c 			pmc->mca_crcount--;
pmc              1933 net/ipv6/mcast.c 			if (pmc->mca_crcount == 0) {
pmc              1934 net/ipv6/mcast.c 				mld_clear_zeros(&pmc->mca_tomb);
pmc              1935 net/ipv6/mcast.c 				mld_clear_zeros(&pmc->mca_sources);
pmc              1938 net/ipv6/mcast.c 		if (pmc->mca_crcount == 0 && !pmc->mca_tomb &&
pmc              1939 net/ipv6/mcast.c 		    !pmc->mca_sources) {
pmc              1944 net/ipv6/mcast.c 			in6_dev_put(pmc->idev);
pmc              1945 net/ipv6/mcast.c 			kfree(pmc);
pmc              1947 net/ipv6/mcast.c 			pmc_prev = pmc;
pmc              1952 net/ipv6/mcast.c 	for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
pmc              1953 net/ipv6/mcast.c 		spin_lock_bh(&pmc->mca_lock);
pmc              1954 net/ipv6/mcast.c 		if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
pmc              1961 net/ipv6/mcast.c 		skb = add_grec(skb, pmc, type, 0, 0, 0);
pmc              1962 net/ipv6/mcast.c 		skb = add_grec(skb, pmc, dtype, 0, 1, 0);	/* deleted sources */
pmc              1965 net/ipv6/mcast.c 		if (pmc->mca_crcount) {
pmc              1966 net/ipv6/mcast.c 			if (pmc->mca_sfmode == MCAST_EXCLUDE)
pmc              1970 net/ipv6/mcast.c 			skb = add_grec(skb, pmc, type, 0, 0, 0);
pmc              1971 net/ipv6/mcast.c 			pmc->mca_crcount--;
pmc              1973 net/ipv6/mcast.c 		spin_unlock_bh(&pmc->mca_lock);
pmc              2080 net/ipv6/mcast.c 	struct ifmcaddr6 *pmc;
pmc              2088 net/ipv6/mcast.c 	for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
pmc              2089 net/ipv6/mcast.c 		spin_lock_bh(&pmc->mca_lock);
pmc              2090 net/ipv6/mcast.c 		if (pmc->mca_sfcount[MCAST_EXCLUDE])
pmc              2094 net/ipv6/mcast.c 		skb = add_grec(skb, pmc, type, 0, 0, 1);
pmc              2095 net/ipv6/mcast.c 		spin_unlock_bh(&pmc->mca_lock);
pmc              2128 net/ipv6/mcast.c static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
pmc              2135 net/ipv6/mcast.c 	for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
pmc              2146 net/ipv6/mcast.c 		struct inet6_dev *idev = pmc->idev;
pmc              2152 net/ipv6/mcast.c 			pmc->mca_sources = psf->sf_next;
pmc              2153 net/ipv6/mcast.c 		if (psf->sf_oldin && !(pmc->mca_flags & MAF_NOREPORT) &&
pmc              2156 net/ipv6/mcast.c 			psf->sf_next = pmc->mca_tomb;
pmc              2157 net/ipv6/mcast.c 			pmc->mca_tomb = psf;
pmc              2169 net/ipv6/mcast.c 	struct ifmcaddr6 *pmc;
pmc              2176 net/ipv6/mcast.c 	for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
pmc              2177 net/ipv6/mcast.c 		if (ipv6_addr_equal(pmca, &pmc->mca_addr))
pmc              2180 net/ipv6/mcast.c 	if (!pmc) {
pmc              2185 net/ipv6/mcast.c 	spin_lock_bh(&pmc->mca_lock);
pmc              2186 net/ipv6/mcast.c 	sf_markstate(pmc);
pmc              2188 net/ipv6/mcast.c 		if (!pmc->mca_sfcount[sfmode]) {
pmc              2189 net/ipv6/mcast.c 			spin_unlock_bh(&pmc->mca_lock);
pmc              2193 net/ipv6/mcast.c 		pmc->mca_sfcount[sfmode]--;
pmc              2197 net/ipv6/mcast.c 		int rv = ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]);
pmc              2203 net/ipv6/mcast.c 	if (pmc->mca_sfmode == MCAST_EXCLUDE &&
pmc              2204 net/ipv6/mcast.c 	    pmc->mca_sfcount[MCAST_EXCLUDE] == 0 &&
pmc              2205 net/ipv6/mcast.c 	    pmc->mca_sfcount[MCAST_INCLUDE]) {
pmc              2209 net/ipv6/mcast.c 		pmc->mca_sfmode = MCAST_INCLUDE;
pmc              2210 net/ipv6/mcast.c 		pmc->mca_crcount = idev->mc_qrv;
pmc              2211 net/ipv6/mcast.c 		idev->mc_ifc_count = pmc->mca_crcount;
pmc              2212 net/ipv6/mcast.c 		for (psf = pmc->mca_sources; psf; psf = psf->sf_next)
pmc              2214 net/ipv6/mcast.c 		mld_ifc_event(pmc->idev);
pmc              2215 net/ipv6/mcast.c 	} else if (sf_setstate(pmc) || changerec)
pmc              2216 net/ipv6/mcast.c 		mld_ifc_event(pmc->idev);
pmc              2217 net/ipv6/mcast.c 	spin_unlock_bh(&pmc->mca_lock);
pmc              2225 net/ipv6/mcast.c static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode,
pmc              2231 net/ipv6/mcast.c 	for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
pmc              2245 net/ipv6/mcast.c 			pmc->mca_sources = psf;
pmc              2251 net/ipv6/mcast.c static void sf_markstate(struct ifmcaddr6 *pmc)
pmc              2254 net/ipv6/mcast.c 	int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
pmc              2256 net/ipv6/mcast.c 	for (psf = pmc->mca_sources; psf; psf = psf->sf_next)
pmc              2257 net/ipv6/mcast.c 		if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
pmc              2265 net/ipv6/mcast.c static int sf_setstate(struct ifmcaddr6 *pmc)
pmc              2268 net/ipv6/mcast.c 	int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
pmc              2269 net/ipv6/mcast.c 	int qrv = pmc->idev->mc_qrv;
pmc              2273 net/ipv6/mcast.c 	for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
pmc              2274 net/ipv6/mcast.c 		if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
pmc              2283 net/ipv6/mcast.c 				for (dpsf = pmc->mca_tomb; dpsf;
pmc              2294 net/ipv6/mcast.c 						pmc->mca_tomb = dpsf->sf_next;
pmc              2306 net/ipv6/mcast.c 			for (dpsf = pmc->mca_tomb; dpsf; dpsf = dpsf->sf_next)
pmc              2316 net/ipv6/mcast.c 				dpsf->sf_next = pmc->mca_tomb;
pmc              2317 net/ipv6/mcast.c 				pmc->mca_tomb = dpsf;
pmc              2333 net/ipv6/mcast.c 	struct ifmcaddr6 *pmc;
pmc              2340 net/ipv6/mcast.c 	for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
pmc              2341 net/ipv6/mcast.c 		if (ipv6_addr_equal(pmca, &pmc->mca_addr))
pmc              2344 net/ipv6/mcast.c 	if (!pmc) {
pmc              2349 net/ipv6/mcast.c 	spin_lock_bh(&pmc->mca_lock);
pmc              2351 net/ipv6/mcast.c 	sf_markstate(pmc);
pmc              2352 net/ipv6/mcast.c 	isexclude = pmc->mca_sfmode == MCAST_EXCLUDE;
pmc              2354 net/ipv6/mcast.c 		pmc->mca_sfcount[sfmode]++;
pmc              2357 net/ipv6/mcast.c 		err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i]);
pmc              2365 net/ipv6/mcast.c 			pmc->mca_sfcount[sfmode]--;
pmc              2367 net/ipv6/mcast.c 			ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]);
pmc              2368 net/ipv6/mcast.c 	} else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) {
pmc              2372 net/ipv6/mcast.c 		if (pmc->mca_sfcount[MCAST_EXCLUDE])
pmc              2373 net/ipv6/mcast.c 			pmc->mca_sfmode = MCAST_EXCLUDE;
pmc              2374 net/ipv6/mcast.c 		else if (pmc->mca_sfcount[MCAST_INCLUDE])
pmc              2375 net/ipv6/mcast.c 			pmc->mca_sfmode = MCAST_INCLUDE;
pmc              2378 net/ipv6/mcast.c 		pmc->mca_crcount = idev->mc_qrv;
pmc              2379 net/ipv6/mcast.c 		idev->mc_ifc_count = pmc->mca_crcount;
pmc              2380 net/ipv6/mcast.c 		for (psf = pmc->mca_sources; psf; psf = psf->sf_next)
pmc              2383 net/ipv6/mcast.c 	} else if (sf_setstate(pmc))
pmc              2385 net/ipv6/mcast.c 	spin_unlock_bh(&pmc->mca_lock);
pmc              2390 net/ipv6/mcast.c static void ip6_mc_clear_src(struct ifmcaddr6 *pmc)
pmc              2394 net/ipv6/mcast.c 	for (psf = pmc->mca_tomb; psf; psf = nextpsf) {
pmc              2398 net/ipv6/mcast.c 	pmc->mca_tomb = NULL;
pmc              2399 net/ipv6/mcast.c 	for (psf = pmc->mca_sources; psf; psf = nextpsf) {
pmc              2403 net/ipv6/mcast.c 	pmc->mca_sources = NULL;
pmc              2404 net/ipv6/mcast.c 	pmc->mca_sfmode = MCAST_EXCLUDE;
pmc              2405 net/ipv6/mcast.c 	pmc->mca_sfcount[MCAST_INCLUDE] = 0;
pmc              2406 net/ipv6/mcast.c 	pmc->mca_sfcount[MCAST_EXCLUDE] = 1;
pmc              2629 net/ipv6/mcast.c 	struct ifmcaddr6 *pmc;
pmc              2635 net/ipv6/mcast.c 		for (pmc = idev->mc_list; pmc; pmc = pmc->next)
pmc              2636 net/ipv6/mcast.c 			igmp6_join_group(pmc);
pmc                71 tools/testing/selftests/powerpc/pmu/ebb/ebb.c bool ebb_check_count(int pmc, u64 sample_period, int fudge)
pmc                75 tools/testing/selftests/powerpc/pmu/ebb/ebb.c 	count = ebb_state.stats.pmc_count[PMC_INDEX(pmc)];
pmc                81 tools/testing/selftests/powerpc/pmu/ebb/ebb.c 			pmc, count, lower, lower - count);
pmc                89 tools/testing/selftests/powerpc/pmu/ebb/ebb.c 			pmc, count, upper, count - upper);
pmc                94 tools/testing/selftests/powerpc/pmu/ebb/ebb.c 		pmc, count, lower, upper, count - lower, upper - count);
pmc               252 tools/testing/selftests/powerpc/pmu/ebb/ebb.c int count_pmc(int pmc, uint32_t sample_period)
pmc               260 tools/testing/selftests/powerpc/pmu/ebb/ebb.c 	val = read_pmc(pmc);
pmc               264 tools/testing/selftests/powerpc/pmu/ebb/ebb.c 		ebb_state.stats.pmc_count[PMC_INDEX(pmc)] += val - start_value;
pmc               266 tools/testing/selftests/powerpc/pmu/ebb/ebb.c 	trace_log_reg(ebb_state.trace, SPRN_PMC1 + pmc - 1, val);
pmc               269 tools/testing/selftests/powerpc/pmu/ebb/ebb.c 	write_pmc(pmc, start_value);
pmc               442 tools/testing/selftests/powerpc/pmu/ebb/ebb.c void write_pmc(int pmc, u64 value)
pmc               444 tools/testing/selftests/powerpc/pmu/ebb/ebb.c 	switch (pmc) {
pmc               454 tools/testing/selftests/powerpc/pmu/ebb/ebb.c u64 read_pmc(int pmc)
pmc               456 tools/testing/selftests/powerpc/pmu/ebb/ebb.c 	switch (pmc) {
pmc                14 tools/testing/selftests/powerpc/pmu/ebb/ebb.h #define PMC_INDEX(pmc)	((pmc)-1)
pmc                41 tools/testing/selftests/powerpc/pmu/ebb/ebb.h static inline void ebb_enable_pmc_counting(int pmc)
pmc                43 tools/testing/selftests/powerpc/pmu/ebb/ebb.h 	ebb_state.pmc_enable[PMC_INDEX(pmc)] = true;
pmc                46 tools/testing/selftests/powerpc/pmu/ebb/ebb.h bool ebb_check_count(int pmc, u64 sample_period, int fudge);
pmc                60 tools/testing/selftests/powerpc/pmu/ebb/ebb.h int count_pmc(int pmc, uint32_t sample_period);
pmc                65 tools/testing/selftests/powerpc/pmu/ebb/ebb.h void write_pmc(int pmc, u64 value);
pmc                66 tools/testing/selftests/powerpc/pmu/ebb/ebb.h u64 read_pmc(int pmc);
pmc                32 virt/kvm/arm/pmu.c static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
pmc                37 virt/kvm/arm/pmu.c 	pmc -= pmc->idx;
pmc                38 virt/kvm/arm/pmu.c 	pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
pmc                47 virt/kvm/arm/pmu.c static bool kvm_pmu_pmc_is_chained(struct kvm_pmc *pmc)
pmc                49 virt/kvm/arm/pmu.c 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
pmc                51 virt/kvm/arm/pmu.c 	return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
pmc                70 virt/kvm/arm/pmu.c static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc)
pmc                72 virt/kvm/arm/pmu.c 	if (kvm_pmu_pmc_is_chained(pmc) &&
pmc                73 virt/kvm/arm/pmu.c 	    kvm_pmu_idx_is_high_counter(pmc->idx))
pmc                74 virt/kvm/arm/pmu.c 		return pmc - 1;
pmc                76 virt/kvm/arm/pmu.c 	return pmc;
pmc               105 virt/kvm/arm/pmu.c 					  struct kvm_pmc *pmc)
pmc               109 virt/kvm/arm/pmu.c 	if (kvm_pmu_pmc_is_chained(pmc)) {
pmc               110 virt/kvm/arm/pmu.c 		pmc = kvm_pmu_get_canonical_pmc(pmc);
pmc               111 virt/kvm/arm/pmu.c 		reg = PMEVCNTR0_EL0 + pmc->idx;
pmc               118 virt/kvm/arm/pmu.c 		reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
pmc               119 virt/kvm/arm/pmu.c 		      ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
pmc               127 virt/kvm/arm/pmu.c 	if (pmc->perf_event)
pmc               128 virt/kvm/arm/pmu.c 		counter += perf_event_read_value(pmc->perf_event, &enabled,
pmc               143 virt/kvm/arm/pmu.c 	struct kvm_pmc *pmc = &pmu->pmc[select_idx];
pmc               145 virt/kvm/arm/pmu.c 	counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
pmc               147 virt/kvm/arm/pmu.c 	if (kvm_pmu_pmc_is_chained(pmc) &&
pmc               178 virt/kvm/arm/pmu.c static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
pmc               180 virt/kvm/arm/pmu.c 	pmc = kvm_pmu_get_canonical_pmc(pmc);
pmc               181 virt/kvm/arm/pmu.c 	if (pmc->perf_event) {
pmc               182 virt/kvm/arm/pmu.c 		perf_event_disable(pmc->perf_event);
pmc               183 virt/kvm/arm/pmu.c 		perf_event_release_kernel(pmc->perf_event);
pmc               184 virt/kvm/arm/pmu.c 		pmc->perf_event = NULL;
pmc               194 virt/kvm/arm/pmu.c static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
pmc               198 virt/kvm/arm/pmu.c 	pmc = kvm_pmu_get_canonical_pmc(pmc);
pmc               199 virt/kvm/arm/pmu.c 	if (!pmc->perf_event)
pmc               202 virt/kvm/arm/pmu.c 	counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
pmc               204 virt/kvm/arm/pmu.c 	if (pmc->idx == ARMV8_PMU_CYCLE_IDX) {
pmc               208 virt/kvm/arm/pmu.c 		reg = PMEVCNTR0_EL0 + pmc->idx;
pmc               214 virt/kvm/arm/pmu.c 	if (kvm_pmu_pmc_is_chained(pmc))
pmc               217 virt/kvm/arm/pmu.c 	kvm_pmu_release_perf_event(pmc);
pmc               231 virt/kvm/arm/pmu.c 		pmu->pmc[i].idx = i;
pmc               245 virt/kvm/arm/pmu.c 		kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
pmc               261 virt/kvm/arm/pmu.c 		kvm_pmu_release_perf_event(&pmu->pmc[i]);
pmc               286 virt/kvm/arm/pmu.c 	struct kvm_pmc *pmc;
pmc               295 virt/kvm/arm/pmu.c 		pmc = &pmu->pmc[i];
pmc               301 virt/kvm/arm/pmu.c 		if (kvm_pmu_pmc_is_chained(pmc) &&
pmc               308 virt/kvm/arm/pmu.c 		if (pmc->perf_event) {
pmc               309 virt/kvm/arm/pmu.c 			perf_event_enable(pmc->perf_event);
pmc               310 virt/kvm/arm/pmu.c 			if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
pmc               327 virt/kvm/arm/pmu.c 	struct kvm_pmc *pmc;
pmc               336 virt/kvm/arm/pmu.c 		pmc = &pmu->pmc[i];
pmc               342 virt/kvm/arm/pmu.c 		if (kvm_pmu_pmc_is_chained(pmc) &&
pmc               349 virt/kvm/arm/pmu.c 		if (pmc->perf_event)
pmc               350 virt/kvm/arm/pmu.c 			perf_event_disable(pmc->perf_event);
pmc               445 virt/kvm/arm/pmu.c 	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
pmc               447 virt/kvm/arm/pmu.c 	struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
pmc               448 virt/kvm/arm/pmu.c 	int idx = pmc->idx;
pmc               459 virt/kvm/arm/pmu.c 	if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
pmc               512 virt/kvm/arm/pmu.c 		if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) {
pmc               567 virt/kvm/arm/pmu.c 	struct kvm_pmc *pmc;
pmc               577 virt/kvm/arm/pmu.c 	pmc = kvm_pmu_get_canonical_pmc(&pmu->pmc[select_idx]);
pmc               579 virt/kvm/arm/pmu.c 	reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
pmc               580 virt/kvm/arm/pmu.c 	      ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx;
pmc               583 virt/kvm/arm/pmu.c 	kvm_pmu_stop_counter(vcpu, pmc);
pmc               588 virt/kvm/arm/pmu.c 	    pmc->idx != ARMV8_PMU_CYCLE_IDX)
pmc               595 virt/kvm/arm/pmu.c 	attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx);
pmc               600 virt/kvm/arm/pmu.c 	attr.config = (pmc->idx == ARMV8_PMU_CYCLE_IDX) ?
pmc               603 virt/kvm/arm/pmu.c 	counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
pmc               605 virt/kvm/arm/pmu.c 	if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) {
pmc               612 virt/kvm/arm/pmu.c 		if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
pmc               617 virt/kvm/arm/pmu.c 							 pmc + 1);
pmc               620 virt/kvm/arm/pmu.c 		if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
pmc               626 virt/kvm/arm/pmu.c 						 kvm_pmu_perf_overflow, pmc);
pmc               635 virt/kvm/arm/pmu.c 	pmc->perf_event = event;
pmc               649 virt/kvm/arm/pmu.c 	struct kvm_pmc *pmc = &pmu->pmc[select_idx];
pmc               651 virt/kvm/arm/pmu.c 	if (kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx)) {
pmc               656 virt/kvm/arm/pmu.c 		if (!kvm_pmu_pmc_is_chained(pmc))
pmc               657 virt/kvm/arm/pmu.c 			kvm_pmu_stop_counter(vcpu, pmc);
pmc               659 virt/kvm/arm/pmu.c 		set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
pmc               661 virt/kvm/arm/pmu.c 		clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);