se                330 arch/arm/kernel/module.c 	const Elf_Shdr *s, *se;
se                333 arch/arm/kernel/module.c 	for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++)
se                477 arch/arm64/kernel/module.c 	const Elf_Shdr *s, *se;
se                480 arch/arm64/kernel/module.c 	for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
se                476 arch/ia64/include/asm/pal.h 			se		: 1,	/* Shared error.  MCA in a
se                444 arch/mips/include/asm/octeon/cvmx-pemx-defs.h 		uint64_t se:1;
se                448 arch/mips/include/asm/octeon/cvmx-pemx-defs.h 		uint64_t se:1;
se                483 arch/mips/include/asm/octeon/cvmx-pemx-defs.h 		uint64_t se:1;
se                487 arch/mips/include/asm/octeon/cvmx-pemx-defs.h 		uint64_t se:1;
se                522 arch/mips/include/asm/octeon/cvmx-pemx-defs.h 		uint64_t se:1;
se                526 arch/mips/include/asm/octeon/cvmx-pemx-defs.h 		uint64_t se:1;
se                 53 arch/s390/include/asm/nmi.h 		u64 se :  1; /* 16 storage error uncorrected */
se                432 arch/s390/kernel/nmi.c 		if (mci.se)
se                117 arch/sh/include/mach-se/mach/se.h #define __IO_PREFIX	se
se                484 crypto/twofish_common.c    ctx->s[0][i] = mds[0][q0[(a) ^ sa] ^ se]; \
se                492 crypto/twofish_common.c    ctx->s[0][i] = mds[0][q0[q0[(b) ^ sa] ^ se] ^ si]; \
se                500 crypto/twofish_common.c    ctx->s[0][i] = mds[0][q0[q0[q1[(b) ^ sa] ^ se] ^ si] ^ sm]; \
se                579 crypto/twofish_common.c 	u8 sa = 0, sb = 0, sc = 0, sd = 0, se = 0, sf = 0, sg = 0, sh = 0;
se                604 crypto/twofish_common.c 	CALC_S (se, sf, sg, sh, 8, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */
se                605 crypto/twofish_common.c 	CALC_S (se, sf, sg, sh, 9, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */
se                606 crypto/twofish_common.c 	CALC_S (se, sf, sg, sh, 10, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */
se                607 crypto/twofish_common.c 	CALC_S (se, sf, sg, sh, 11, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */
se                608 crypto/twofish_common.c 	CALC_S (se, sf, sg, sh, 12, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */
se                609 crypto/twofish_common.c 	CALC_S (se, sf, sg, sh, 13, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */
se                610 crypto/twofish_common.c 	CALC_S (se, sf, sg, sh, 14, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */
se                611 crypto/twofish_common.c 	CALC_S (se, sf, sg, sh, 15, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */
se                196 drivers/crypto/cavium/cpt/cpt_hw_types.h 		u64 se:8;
se                200 drivers/crypto/cavium/cpt/cpt_hw_types.h 		u64 se:8;
se                356 drivers/crypto/cavium/cpt/cptpf_main.c 	cpt->max_se_cores = pf_cnsts.s.se;
se               1146 drivers/gpu/drm/amd/amdgpu/amdgpu.h #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
se                623 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	uint32_t offset, se, sh, cu, wave, simd, data[32];
se                630 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	se = (*pos & GENMASK_ULL(14, 7)) >> 7;
se                638 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	amdgpu_gfx_select_se_sh(adev, se, sh, cu);
se                695 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
se                702 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	se = (*pos & GENMASK_ULL(19, 12)) >> 12;
se                716 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 	amdgpu_gfx_select_se_sh(adev, se, sh, cu);
se                146 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 	unsigned se, sh, cu;
se                157 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 		int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
se                163 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 		if (se < max_se && sh < max_sh && cu < 16) {
se                164 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 			DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu);
se                165 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 			mask[se * max_sh + sh] |= 1u << cu;
se                168 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c 				  se, sh, cu);
se                333 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h #define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
se               1382 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	unsigned se;
se               1393 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 	for (se = 0; se < num_se; se++) {
se               1395 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
se               1397 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		int idx = (se / 2) * 2;
se               1420 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 			unsigned rb0_mask = 1 << (se * rb_per_se);
se               1437 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 				rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
se               1455 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c 		gfx_v6_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
se               1679 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	unsigned se;
se               1703 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 	for (se = 0; se < num_se; se++) {
se               1705 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 		unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
se               1707 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 		int idx = (se / 2) * 2;
se               1732 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 			unsigned rb0_mask = 1 << (se * rb_per_se);
se               1750 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 				rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
se               1769 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c 		gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
se               3543 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	unsigned se;
se               3567 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 	for (se = 0; se < num_se; se++) {
se               3569 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
se               3571 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		int idx = (se / 2) * 2;
se               3596 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 			unsigned rb0_mask = 1 << (se * rb_per_se);
se               3614 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 				rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
se               3633 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 		gfx_v8_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
se                102 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c 	int i, se, sh, cu = 0;
se                109 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c 	for (se = 0; se < cu_info.num_shader_engines; se++)
se                111 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c 			cu_per_se[se] += hweight32(cu_info.cu_bitmap[se % 4][sh + (se / 4)]);
se                120 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c 	se = 0;
se                123 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c 			se_mask[se] |= 1 << cu;
se                126 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c 			se++;
se                127 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c 			if (se == cu_info.num_shader_engines) {
se                128 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c 				se = 0;
se                131 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c 		} while (cu >= cu_per_se[se] && cu < 32);
se               1283 drivers/gpu/drm/arm/malidp_hw.c 	const struct malidp_irq_map *se = &hw->map.se_irq_map;
se               1295 drivers/gpu/drm/arm/malidp_hw.c 	if (!(status & (se->irq_mask | se->err_mask)))
se               1299 drivers/gpu/drm/arm/malidp_hw.c 	if (status & se->err_mask)
se               1306 drivers/gpu/drm/arm/malidp_hw.c 	if (status & se->vsync_irq) {
se               3852 drivers/gpu/drm/i915/gt/intel_lrc.c 	struct virtual_engine *se = to_virtual_engine(src);
se               3856 drivers/gpu/drm/i915/gt/intel_lrc.c 					     se->siblings,
se               3857 drivers/gpu/drm/i915/gt/intel_lrc.c 					     se->num_siblings);
se               3861 drivers/gpu/drm/i915/gt/intel_lrc.c 	if (se->num_bonds) {
se               3864 drivers/gpu/drm/i915/gt/intel_lrc.c 		de->bonds = kmemdup(se->bonds,
se               3865 drivers/gpu/drm/i915/gt/intel_lrc.c 				    sizeof(*se->bonds) * se->num_bonds,
se               3872 drivers/gpu/drm/i915/gt/intel_lrc.c 		de->num_bonds = se->num_bonds;
se               1127 drivers/gpu/drm/i915/gvt/gtt.c static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
se               1132 drivers/gpu/drm/i915/gvt/gtt.c 	se->type = ge->type;
se               1133 drivers/gpu/drm/i915/gvt/gtt.c 	se->val64 = ge->val64;
se               1136 drivers/gpu/drm/i915/gvt/gtt.c 	if (se->type == GTT_TYPE_PPGTT_PDE_ENTRY)
se               1137 drivers/gpu/drm/i915/gvt/gtt.c 		ops->clear_ips(se);
se               1139 drivers/gpu/drm/i915/gvt/gtt.c 	ops->set_pfn(se, s->shadow_page.mfn);
se               1168 drivers/gpu/drm/i915/gvt/gtt.c 	struct intel_gvt_gtt_entry *se)
se               1180 drivers/gpu/drm/i915/gvt/gtt.c 	start_gfn = ops->get_pfn(se);
se               1193 drivers/gpu/drm/i915/gvt/gtt.c 		sub_se.val64 = se->val64;
se               1197 drivers/gpu/drm/i915/gvt/gtt.c 		sub_se.val64 |= (se->val64 & _PAGE_PAT_LARGE) >> 5;
se               1204 drivers/gpu/drm/i915/gvt/gtt.c 	se->val64 &= ~_PAGE_DIRTY;
se               1206 drivers/gpu/drm/i915/gvt/gtt.c 	ops->clear_pse(se);
se               1207 drivers/gpu/drm/i915/gvt/gtt.c 	ops->clear_ips(se);
se               1208 drivers/gpu/drm/i915/gvt/gtt.c 	ops->set_pfn(se, sub_spt->shadow_page.mfn);
se               1209 drivers/gpu/drm/i915/gvt/gtt.c 	ppgtt_set_shadow_entry(spt, se, index);
se               1215 drivers/gpu/drm/i915/gvt/gtt.c 	struct intel_gvt_gtt_entry *se)
se               1218 drivers/gpu/drm/i915/gvt/gtt.c 	struct intel_gvt_gtt_entry entry = *se;
se               1227 drivers/gpu/drm/i915/gvt/gtt.c 	start_gfn = ops->get_pfn(se);
se               1249 drivers/gpu/drm/i915/gvt/gtt.c 	struct intel_gvt_gtt_entry se = *ge;
se               1270 drivers/gpu/drm/i915/gvt/gtt.c 		return split_64KB_gtt_entry(vgpu, spt, index, &se);
se               1275 drivers/gpu/drm/i915/gvt/gtt.c 			return split_2MB_gtt_entry(vgpu, spt, index, &se);
se               1293 drivers/gpu/drm/i915/gvt/gtt.c 	pte_ops->set_pfn(&se, dma_addr >> PAGE_SHIFT);
se               1294 drivers/gpu/drm/i915/gvt/gtt.c 	ppgtt_set_shadow_entry(spt, &se, index);
se               1304 drivers/gpu/drm/i915/gvt/gtt.c 	struct intel_gvt_gtt_entry se, ge;
se               1318 drivers/gpu/drm/i915/gvt/gtt.c 			ppgtt_get_shadow_entry(spt, &se, i);
se               1319 drivers/gpu/drm/i915/gvt/gtt.c 			ppgtt_generate_shadow_entry(&se, s, &ge);
se               1320 drivers/gpu/drm/i915/gvt/gtt.c 			ppgtt_set_shadow_entry(spt, &se, i);
se               1324 drivers/gpu/drm/i915/gvt/gtt.c 				ops->set_pfn(&se, gvt->gtt.scratch_mfn);
se               1325 drivers/gpu/drm/i915/gvt/gtt.c 				ppgtt_set_shadow_entry(spt, &se, i);
se               1342 drivers/gpu/drm/i915/gvt/gtt.c 		struct intel_gvt_gtt_entry *se, unsigned long index)
se               1349 drivers/gpu/drm/i915/gvt/gtt.c 			       spt->shadow_page.type, se->val64, index);
se               1352 drivers/gpu/drm/i915/gvt/gtt.c 		    se->type, index, se->val64);
se               1354 drivers/gpu/drm/i915/gvt/gtt.c 	if (!ops->test_present(se))
se               1357 drivers/gpu/drm/i915/gvt/gtt.c 	if (ops->get_pfn(se) ==
se               1361 drivers/gpu/drm/i915/gvt/gtt.c 	if (gtt_type_is_pt(get_next_pt_type(se->type))) {
se               1363 drivers/gpu/drm/i915/gvt/gtt.c 			intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(se));
se               1374 drivers/gpu/drm/i915/gvt/gtt.c 		WARN(se->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY,
se               1376 drivers/gpu/drm/i915/gvt/gtt.c 		ppgtt_invalidate_pte(spt, se);
se               1382 drivers/gpu/drm/i915/gvt/gtt.c 			spt, se->val64, se->type);
se               1716 drivers/gpu/drm/i915/gvt/gtt.c 	struct intel_gvt_gtt_entry we, se;
se               1744 drivers/gpu/drm/i915/gvt/gtt.c 			ppgtt_get_shadow_entry(spt, &se, index);
se               1745 drivers/gpu/drm/i915/gvt/gtt.c 			ret = ppgtt_handle_guest_entry_removal(spt, &se, index);
se               1748 drivers/gpu/drm/i915/gvt/gtt.c 			ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
se               1749 drivers/gpu/drm/i915/gvt/gtt.c 			ppgtt_set_shadow_entry(spt, &se, index);
se               1780 drivers/gpu/drm/i915/gvt/gtt.c 	struct intel_gvt_gtt_entry se;
se               1787 drivers/gpu/drm/i915/gvt/gtt.c 		ppgtt_get_shadow_root_entry(mm, &se, index);
se               1789 drivers/gpu/drm/i915/gvt/gtt.c 		if (!ops->test_present(&se))
se               1792 drivers/gpu/drm/i915/gvt/gtt.c 		ppgtt_invalidate_spt_by_shadow_entry(vgpu, &se);
se               1793 drivers/gpu/drm/i915/gvt/gtt.c 		se.val64 = 0;
se               1794 drivers/gpu/drm/i915/gvt/gtt.c 		ppgtt_set_shadow_root_entry(mm, &se, index);
se               1797 drivers/gpu/drm/i915/gvt/gtt.c 				       NULL, se.type, se.val64, index);
se               1811 drivers/gpu/drm/i915/gvt/gtt.c 	struct intel_gvt_gtt_entry ge, se;
se               1834 drivers/gpu/drm/i915/gvt/gtt.c 		ppgtt_generate_shadow_entry(&se, spt, &ge);
se               1835 drivers/gpu/drm/i915/gvt/gtt.c 		ppgtt_set_shadow_root_entry(mm, &se, index);
se               1838 drivers/gpu/drm/i915/gvt/gtt.c 				       NULL, se.type, se.val64, index);
se               2390 drivers/gpu/drm/i915/gvt/gtt.c 		struct intel_gvt_gtt_entry se;
se               2392 drivers/gpu/drm/i915/gvt/gtt.c 		memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
se               2393 drivers/gpu/drm/i915/gvt/gtt.c 		se.type = get_entry_type(type - 1);
se               2394 drivers/gpu/drm/i915/gvt/gtt.c 		ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn);
se               2399 drivers/gpu/drm/i915/gvt/gtt.c 		se.val64 |= _PAGE_PRESENT | _PAGE_RW;
se               2401 drivers/gpu/drm/i915/gvt/gtt.c 			se.val64 |= PPAT_CACHED;
se               2404 drivers/gpu/drm/i915/gvt/gtt.c 			ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
se                137 drivers/gpu/drm/radeon/cik.c static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
se               6539 drivers/gpu/drm/radeon/cik.c static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
se               6544 drivers/gpu/drm/radeon/cik.c 	cik_select_se_sh(rdev, se, sh);
se                125 drivers/gpu/drm/radeon/si.c static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
se               5299 drivers/gpu/drm/radeon/si.c static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
se               5304 drivers/gpu/drm/radeon/si.c 	si_select_se_sh(rdev, se, sh);
se                 77 drivers/i2c/busses/i2c-qcom-geni.c 	struct geni_se se;
se                155 drivers/i2c/busses/i2c-qcom-geni.c 	writel_relaxed(0, gi2c->se.base + SE_GENI_CLK_SEL);
se                158 drivers/i2c/busses/i2c-qcom-geni.c 	writel_relaxed(val, gi2c->se.base + GENI_SER_M_CLK_CFG);
se                163 drivers/i2c/busses/i2c-qcom-geni.c 	writel_relaxed(val, gi2c->se.base + SE_I2C_SCL_COUNTERS);
se                168 drivers/i2c/busses/i2c-qcom-geni.c 	u32 m_cmd = readl_relaxed(gi2c->se.base + SE_GENI_M_CMD0);
se                169 drivers/i2c/busses/i2c-qcom-geni.c 	u32 m_stat = readl_relaxed(gi2c->se.base + SE_GENI_M_IRQ_STATUS);
se                170 drivers/i2c/busses/i2c-qcom-geni.c 	u32 geni_s = readl_relaxed(gi2c->se.base + SE_GENI_STATUS);
se                171 drivers/i2c/busses/i2c-qcom-geni.c 	u32 geni_ios = readl_relaxed(gi2c->se.base + SE_GENI_IOS);
se                172 drivers/i2c/busses/i2c-qcom-geni.c 	u32 dma = readl_relaxed(gi2c->se.base + SE_GENI_DMA_MODE_EN);
se                176 drivers/i2c/busses/i2c-qcom-geni.c 		rx_st = readl_relaxed(gi2c->se.base + SE_DMA_RX_IRQ_STAT);
se                177 drivers/i2c/busses/i2c-qcom-geni.c 		tx_st = readl_relaxed(gi2c->se.base + SE_DMA_TX_IRQ_STAT);
se                179 drivers/i2c/busses/i2c-qcom-geni.c 		rx_st = readl_relaxed(gi2c->se.base + SE_GENI_RX_FIFO_STATUS);
se                180 drivers/i2c/busses/i2c-qcom-geni.c 		tx_st = readl_relaxed(gi2c->se.base + SE_GENI_TX_FIFO_STATUS);
se                182 drivers/i2c/busses/i2c-qcom-geni.c 	dev_dbg(gi2c->se.dev, "DMA:%d tx_stat:0x%x, rx_stat:0x%x, irq-stat:0x%x\n",
se                184 drivers/i2c/busses/i2c-qcom-geni.c 	dev_dbg(gi2c->se.dev, "m_cmd:0x%x, geni_status:0x%x, geni_ios:0x%x\n",
se                193 drivers/i2c/busses/i2c-qcom-geni.c 		dev_dbg(gi2c->se.dev, "len:%d, slv-addr:0x%x, RD/WR:%d\n",
se                197 drivers/i2c/busses/i2c-qcom-geni.c 		dev_err(gi2c->se.dev, "%s\n", gi2c_log[err].msg);
se                205 drivers/i2c/busses/i2c-qcom-geni.c 	void __iomem *base = gi2c->se.base;
se                246 drivers/i2c/busses/i2c-qcom-geni.c 		dev_dbg(gi2c->se.dev, "i2c dma tx:0x%x, dma rx:0x%x\n",
se                312 drivers/i2c/busses/i2c-qcom-geni.c 	geni_se_abort_m_cmd(&gi2c->se);
se                316 drivers/i2c/busses/i2c-qcom-geni.c 		val = readl_relaxed(gi2c->se.base + SE_GENI_M_IRQ_STATUS);
se                320 drivers/i2c/busses/i2c-qcom-geni.c 		dev_err(gi2c->se.dev, "Timeout abort_m_cmd\n");
se                328 drivers/i2c/busses/i2c-qcom-geni.c 	writel_relaxed(1, gi2c->se.base + SE_DMA_RX_FSM_RST);
se                331 drivers/i2c/busses/i2c-qcom-geni.c 		val = readl_relaxed(gi2c->se.base + SE_DMA_RX_IRQ_STAT);
se                335 drivers/i2c/busses/i2c-qcom-geni.c 		dev_err(gi2c->se.dev, "Timeout resetting RX_FSM\n");
se                343 drivers/i2c/busses/i2c-qcom-geni.c 	writel_relaxed(1, gi2c->se.base + SE_DMA_TX_FSM_RST);
se                346 drivers/i2c/busses/i2c-qcom-geni.c 		val = readl_relaxed(gi2c->se.base + SE_DMA_TX_IRQ_STAT);
se                350 drivers/i2c/busses/i2c-qcom-geni.c 		dev_err(gi2c->se.dev, "Timeout resetting TX_FSM\n");
se                359 drivers/i2c/busses/i2c-qcom-geni.c 	struct geni_se *se = &gi2c->se;
se                366 drivers/i2c/busses/i2c-qcom-geni.c 		geni_se_select_mode(se, GENI_SE_DMA);
se                368 drivers/i2c/busses/i2c-qcom-geni.c 		geni_se_select_mode(se, GENI_SE_FIFO);
se                370 drivers/i2c/busses/i2c-qcom-geni.c 	writel_relaxed(len, se->base + SE_I2C_RX_TRANS_LEN);
se                371 drivers/i2c/busses/i2c-qcom-geni.c 	geni_se_setup_m_cmd(se, I2C_READ, m_param);
se                373 drivers/i2c/busses/i2c-qcom-geni.c 	if (dma_buf && geni_se_rx_dma_prep(se, dma_buf, len, &rx_dma)) {
se                374 drivers/i2c/busses/i2c-qcom-geni.c 		geni_se_select_mode(se, GENI_SE_FIFO);
se                387 drivers/i2c/busses/i2c-qcom-geni.c 		geni_se_rx_dma_unprep(se, rx_dma, len);
se                400 drivers/i2c/busses/i2c-qcom-geni.c 	struct geni_se *se = &gi2c->se;
se                407 drivers/i2c/busses/i2c-qcom-geni.c 		geni_se_select_mode(se, GENI_SE_DMA);
se                409 drivers/i2c/busses/i2c-qcom-geni.c 		geni_se_select_mode(se, GENI_SE_FIFO);
se                411 drivers/i2c/busses/i2c-qcom-geni.c 	writel_relaxed(len, se->base + SE_I2C_TX_TRANS_LEN);
se                412 drivers/i2c/busses/i2c-qcom-geni.c 	geni_se_setup_m_cmd(se, I2C_WRITE, m_param);
se                414 drivers/i2c/busses/i2c-qcom-geni.c 	if (dma_buf && geni_se_tx_dma_prep(se, dma_buf, len, &tx_dma)) {
se                415 drivers/i2c/busses/i2c-qcom-geni.c 		geni_se_select_mode(se, GENI_SE_FIFO);
se                421 drivers/i2c/busses/i2c-qcom-geni.c 		writel_relaxed(1, se->base + SE_GENI_TX_WATERMARK_REG);
se                431 drivers/i2c/busses/i2c-qcom-geni.c 		geni_se_tx_dma_unprep(se, tx_dma, len);
se                447 drivers/i2c/busses/i2c-qcom-geni.c 	ret = pm_runtime_get_sync(gi2c->se.dev);
se                449 drivers/i2c/busses/i2c-qcom-geni.c 		dev_err(gi2c->se.dev, "error turning SE resources:%d\n", ret);
se                450 drivers/i2c/busses/i2c-qcom-geni.c 		pm_runtime_put_noidle(gi2c->se.dev);
se                452 drivers/i2c/busses/i2c-qcom-geni.c 		pm_runtime_set_suspended(gi2c->se.dev);
se                474 drivers/i2c/busses/i2c-qcom-geni.c 	pm_runtime_mark_last_busy(gi2c->se.dev);
se                475 drivers/i2c/busses/i2c-qcom-geni.c 	pm_runtime_put_autosuspend(gi2c->se.dev);
se                510 drivers/i2c/busses/i2c-qcom-geni.c 	gi2c->se.dev = &pdev->dev;
se                511 drivers/i2c/busses/i2c-qcom-geni.c 	gi2c->se.wrapper = dev_get_drvdata(pdev->dev.parent);
se                513 drivers/i2c/busses/i2c-qcom-geni.c 	gi2c->se.base = devm_ioremap_resource(&pdev->dev, res);
se                514 drivers/i2c/busses/i2c-qcom-geni.c 	if (IS_ERR(gi2c->se.base))
se                515 drivers/i2c/busses/i2c-qcom-geni.c 		return PTR_ERR(gi2c->se.base);
se                517 drivers/i2c/busses/i2c-qcom-geni.c 	gi2c->se.clk = devm_clk_get(&pdev->dev, "se");
se                518 drivers/i2c/busses/i2c-qcom-geni.c 	if (IS_ERR(gi2c->se.clk) && !has_acpi_companion(&pdev->dev)) {
se                519 drivers/i2c/busses/i2c-qcom-geni.c 		ret = PTR_ERR(gi2c->se.clk);
se                566 drivers/i2c/busses/i2c-qcom-geni.c 	ret = geni_se_resources_on(&gi2c->se);
se                571 drivers/i2c/busses/i2c-qcom-geni.c 	proto = geni_se_read_proto(&gi2c->se);
se                572 drivers/i2c/busses/i2c-qcom-geni.c 	tx_depth = geni_se_get_tx_fifo_depth(&gi2c->se);
se                575 drivers/i2c/busses/i2c-qcom-geni.c 		geni_se_resources_off(&gi2c->se);
se                579 drivers/i2c/busses/i2c-qcom-geni.c 	geni_se_init(&gi2c->se, gi2c->tx_wm, tx_depth);
se                580 drivers/i2c/busses/i2c-qcom-geni.c 	geni_se_config_packing(&gi2c->se, BITS_PER_BYTE, PACKING_BYTES_PW,
se                582 drivers/i2c/busses/i2c-qcom-geni.c 	ret = geni_se_resources_off(&gi2c->se);
se                591 drivers/i2c/busses/i2c-qcom-geni.c 	pm_runtime_set_suspended(gi2c->se.dev);
se                592 drivers/i2c/busses/i2c-qcom-geni.c 	pm_runtime_set_autosuspend_delay(gi2c->se.dev, I2C_AUTO_SUSPEND_DELAY);
se                593 drivers/i2c/busses/i2c-qcom-geni.c 	pm_runtime_use_autosuspend(gi2c->se.dev);
se                594 drivers/i2c/busses/i2c-qcom-geni.c 	pm_runtime_enable(gi2c->se.dev);
se                599 drivers/i2c/busses/i2c-qcom-geni.c 		pm_runtime_disable(gi2c->se.dev);
se                613 drivers/i2c/busses/i2c-qcom-geni.c 	pm_runtime_disable(gi2c->se.dev);
se                623 drivers/i2c/busses/i2c-qcom-geni.c 	ret = geni_se_resources_off(&gi2c->se);
se                640 drivers/i2c/busses/i2c-qcom-geni.c 	ret = geni_se_resources_on(&gi2c->se);
se                732 drivers/infiniband/hw/cxgb4/t4.h static inline int t4_arm_cq(struct t4_cq *cq, int se)
se                742 drivers/infiniband/hw/cxgb4/t4.h 	val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6);
se                159 drivers/infiniband/hw/hfi1/trace.c 			     u8 *se, u8 *pad, u8 *opcode, u8 *tver,
se                166 drivers/infiniband/hw/hfi1/trace.c 	*se = ib_bth_get_se(ohdr);
se                177 drivers/infiniband/hw/hfi1/trace.c 			      u8 *pad, u8 *se, u8 *tver,
se                184 drivers/infiniband/hw/hfi1/trace.c 	*se = ib_bth_get_se(ohdr);
se                258 drivers/infiniband/hw/hfi1/trace.c 				u8 se, u8 pad, u8 opcode, const char *opname,
se                271 drivers/infiniband/hw/hfi1/trace.c 					 se, mig, pad, tver, qpn, ack, psn);
se                276 drivers/infiniband/hw/hfi1/trace.c 				 se, mig, pad, tver, pkey, fecn, becn,
se                117 drivers/infiniband/hw/hfi1/trace_ibhdrs.h 			     u8 *se, u8 *pad, u8 *opcode, u8 *tver,
se                124 drivers/infiniband/hw/hfi1/trace_ibhdrs.h 			      u8 *pad, u8 *se, u8 *tver,
se                140 drivers/infiniband/hw/hfi1/trace_ibhdrs.h 				u8 se, u8 pad, u8 opcode, const char *opname,
se                176 drivers/infiniband/hw/hfi1/trace_ibhdrs.h 			__field(u8, se)
se                223 drivers/infiniband/hw/hfi1/trace_ibhdrs.h 								 &__entry->se,
se                244 drivers/infiniband/hw/hfi1/trace_ibhdrs.h 							  &__entry->se,
se                290 drivers/infiniband/hw/hfi1/trace_ibhdrs.h 						  __entry->se,
se                335 drivers/infiniband/hw/hfi1/trace_ibhdrs.h 			__field(u8, se)
se                388 drivers/infiniband/hw/hfi1/trace_ibhdrs.h 								 &__entry->se,
se                412 drivers/infiniband/hw/hfi1/trace_ibhdrs.h 							&__entry->se,
se                453 drivers/infiniband/hw/hfi1/trace_ibhdrs.h 						  __entry->se,
se                133 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void __bth_set_se(void *arg, int se)
se                137 drivers/infiniband/sw/rxe/rxe_hdr.h 	if (se)
se                323 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void bth_set_se(struct rxe_pkt_info *pkt, int se)
se                325 drivers/infiniband/sw/rxe/rxe_hdr.h 	__bth_set_se(pkt->hdr + pkt->offset, se);
se                433 drivers/infiniband/sw/rxe/rxe_hdr.h static inline void bth_init(struct rxe_pkt_info *pkt, u8 opcode, int se,
se                441 drivers/infiniband/sw/rxe/rxe_hdr.h 	if (se)
se               1105 drivers/md/dm-cache-target.c 	sector_t se = bio_end_sector(bio);
se               1109 drivers/md/dm-cache-target.c 	if (se - sb < cache->discard_block_size)
se               1112 drivers/md/dm-cache-target.c 		*e = to_dblock(block_div(se, cache->discard_block_size));
se                289 drivers/media/usb/msi2500/msi2500.c 			struct {signed int x:14; } se; /* sign extension */
se                294 drivers/media/usb/msi2500/msi2500.c 				se.x = *s16src++;
se                296 drivers/media/usb/msi2500/msi2500.c 				utmp = se.x + 8192;
se                812 drivers/nfc/pn544/pn544.c 	struct nfc_se *se;
se                829 drivers/nfc/pn544/pn544.c 	se = nfc_find_se(hdev->ndev, se_idx);
se                831 drivers/nfc/pn544/pn544.c 	switch (se->type) {
se                867 drivers/nfc/pn544/pn544.c 	struct nfc_se *se;
se                870 drivers/nfc/pn544/pn544.c 	se = nfc_find_se(hdev->ndev, se_idx);
se                872 drivers/nfc/pn544/pn544.c 	switch (se->type) {
se                123 drivers/s390/net/smsgiucv_app.c 	struct smsg_app_event *se;
se                138 drivers/s390/net/smsgiucv_app.c 	se = smsg_app_event_alloc(from, msg);
se                139 drivers/s390/net/smsgiucv_app.c 	if (!se)
se                144 drivers/s390/net/smsgiucv_app.c 	list_add_tail(&se->list, &smsg_event_queue);
se                130 drivers/soc/fsl/dpio/qbman-portal.c 				    u8 epm, int sd, int sp, int se,
se                141 drivers/soc/fsl/dpio/qbman-portal.c 		se << SWP_CFG_SE_SHIFT |
se                178 drivers/soc/qcom/qcom-geni-se.c u32 geni_se_get_qup_hw_version(struct geni_se *se)
se                180 drivers/soc/qcom/qcom-geni-se.c 	struct geni_wrapper *wrapper = se->wrapper;
se                219 drivers/soc/qcom/qcom-geni-se.c static void geni_se_irq_clear(struct geni_se *se)
se                221 drivers/soc/qcom/qcom-geni-se.c 	writel_relaxed(0, se->base + SE_GSI_EVENT_EN);
se                222 drivers/soc/qcom/qcom-geni-se.c 	writel_relaxed(0xffffffff, se->base + SE_GENI_M_IRQ_CLEAR);
se                223 drivers/soc/qcom/qcom-geni-se.c 	writel_relaxed(0xffffffff, se->base + SE_GENI_S_IRQ_CLEAR);
se                224 drivers/soc/qcom/qcom-geni-se.c 	writel_relaxed(0xffffffff, se->base + SE_DMA_TX_IRQ_CLR);
se                225 drivers/soc/qcom/qcom-geni-se.c 	writel_relaxed(0xffffffff, se->base + SE_DMA_RX_IRQ_CLR);
se                226 drivers/soc/qcom/qcom-geni-se.c 	writel_relaxed(0xffffffff, se->base + SE_IRQ_EN);
se                238 drivers/soc/qcom/qcom-geni-se.c void geni_se_init(struct geni_se *se, u32 rx_wm, u32 rx_rfr)
se                242 drivers/soc/qcom/qcom-geni-se.c 	geni_se_irq_clear(se);
se                243 drivers/soc/qcom/qcom-geni-se.c 	geni_se_io_init(se->base);
se                244 drivers/soc/qcom/qcom-geni-se.c 	geni_se_io_set_mode(se->base);
se                246 drivers/soc/qcom/qcom-geni-se.c 	writel_relaxed(rx_wm, se->base + SE_GENI_RX_WATERMARK_REG);
se                247 drivers/soc/qcom/qcom-geni-se.c 	writel_relaxed(rx_rfr, se->base + SE_GENI_RX_RFR_WATERMARK_REG);
se                249 drivers/soc/qcom/qcom-geni-se.c 	val = readl_relaxed(se->base + SE_GENI_M_IRQ_EN);
se                251 drivers/soc/qcom/qcom-geni-se.c 	writel_relaxed(val, se->base + SE_GENI_M_IRQ_EN);
se                253 drivers/soc/qcom/qcom-geni-se.c 	val = readl_relaxed(se->base + SE_GENI_S_IRQ_EN);
se                255 drivers/soc/qcom/qcom-geni-se.c 	writel_relaxed(val, se->base + SE_GENI_S_IRQ_EN);
se                259 drivers/soc/qcom/qcom-geni-se.c static void geni_se_select_fifo_mode(struct geni_se *se)
se                261 drivers/soc/qcom/qcom-geni-se.c 	u32 proto = geni_se_read_proto(se);
se                264 drivers/soc/qcom/qcom-geni-se.c 	geni_se_irq_clear(se);
se                266 drivers/soc/qcom/qcom-geni-se.c 	val = readl_relaxed(se->base + SE_GENI_M_IRQ_EN);
se                271 drivers/soc/qcom/qcom-geni-se.c 	writel_relaxed(val, se->base + SE_GENI_M_IRQ_EN);
se                273 drivers/soc/qcom/qcom-geni-se.c 	val = readl_relaxed(se->base + SE_GENI_S_IRQ_EN);
se                276 drivers/soc/qcom/qcom-geni-se.c 	writel_relaxed(val, se->base + SE_GENI_S_IRQ_EN);
se                278 drivers/soc/qcom/qcom-geni-se.c 	val = readl_relaxed(se->base + SE_GENI_DMA_MODE_EN);
se                280 drivers/soc/qcom/qcom-geni-se.c 	writel_relaxed(val, se->base + SE_GENI_DMA_MODE_EN);
se                283 drivers/soc/qcom/qcom-geni-se.c static void geni_se_select_dma_mode(struct geni_se *se)
se                287 drivers/soc/qcom/qcom-geni-se.c 	geni_se_irq_clear(se);
se                289 drivers/soc/qcom/qcom-geni-se.c 	val = readl_relaxed(se->base + SE_GENI_DMA_MODE_EN);
se                291 drivers/soc/qcom/qcom-geni-se.c 	writel_relaxed(val, se->base + SE_GENI_DMA_MODE_EN);
se                299 drivers/soc/qcom/qcom-geni-se.c void geni_se_select_mode(struct geni_se *se, enum geni_se_xfer_mode mode)
se                305 drivers/soc/qcom/qcom-geni-se.c 		geni_se_select_fifo_mode(se);
se                308 drivers/soc/qcom/qcom-geni-se.c 		geni_se_select_dma_mode(se);
se                379 drivers/soc/qcom/qcom-geni-se.c void geni_se_config_packing(struct geni_se *se, int bpw, int pack_words,
se                414 drivers/soc/qcom/qcom-geni-se.c 		writel_relaxed(cfg0, se->base + SE_GENI_TX_PACKING_CFG0);
se                415 drivers/soc/qcom/qcom-geni-se.c 		writel_relaxed(cfg1, se->base + SE_GENI_TX_PACKING_CFG1);
se                418 drivers/soc/qcom/qcom-geni-se.c 		writel_relaxed(cfg0, se->base + SE_GENI_RX_PACKING_CFG0);
se                419 drivers/soc/qcom/qcom-geni-se.c 		writel_relaxed(cfg1, se->base + SE_GENI_RX_PACKING_CFG1);
se                430 drivers/soc/qcom/qcom-geni-se.c 		writel_relaxed(bpw / 16, se->base + SE_GENI_BYTE_GRAN);
se                434 drivers/soc/qcom/qcom-geni-se.c static void geni_se_clks_off(struct geni_se *se)
se                436 drivers/soc/qcom/qcom-geni-se.c 	struct geni_wrapper *wrapper = se->wrapper;
se                438 drivers/soc/qcom/qcom-geni-se.c 	clk_disable_unprepare(se->clk);
se                450 drivers/soc/qcom/qcom-geni-se.c int geni_se_resources_off(struct geni_se *se)
se                454 drivers/soc/qcom/qcom-geni-se.c 	if (has_acpi_companion(se->dev))
se                457 drivers/soc/qcom/qcom-geni-se.c 	ret = pinctrl_pm_select_sleep_state(se->dev);
se                461 drivers/soc/qcom/qcom-geni-se.c 	geni_se_clks_off(se);
se                466 drivers/soc/qcom/qcom-geni-se.c static int geni_se_clks_on(struct geni_se *se)
se                469 drivers/soc/qcom/qcom-geni-se.c 	struct geni_wrapper *wrapper = se->wrapper;
se                476 drivers/soc/qcom/qcom-geni-se.c 	ret = clk_prepare_enable(se->clk);
se                490 drivers/soc/qcom/qcom-geni-se.c int geni_se_resources_on(struct geni_se *se)
se                494 drivers/soc/qcom/qcom-geni-se.c 	if (has_acpi_companion(se->dev))
se                497 drivers/soc/qcom/qcom-geni-se.c 	ret = geni_se_clks_on(se);
se                501 drivers/soc/qcom/qcom-geni-se.c 	ret = pinctrl_pm_select_default_state(se->dev);
se                503 drivers/soc/qcom/qcom-geni-se.c 		geni_se_clks_off(se);
se                522 drivers/soc/qcom/qcom-geni-se.c int geni_se_clk_tbl_get(struct geni_se *se, unsigned long **tbl)
se                527 drivers/soc/qcom/qcom-geni-se.c 	if (se->clk_perf_tbl) {
se                528 drivers/soc/qcom/qcom-geni-se.c 		*tbl = se->clk_perf_tbl;
se                529 drivers/soc/qcom/qcom-geni-se.c 		return se->num_clk_levels;
se                532 drivers/soc/qcom/qcom-geni-se.c 	se->clk_perf_tbl = devm_kcalloc(se->dev, MAX_CLK_PERF_LEVEL,
se                533 drivers/soc/qcom/qcom-geni-se.c 					sizeof(*se->clk_perf_tbl),
se                535 drivers/soc/qcom/qcom-geni-se.c 	if (!se->clk_perf_tbl)
se                539 drivers/soc/qcom/qcom-geni-se.c 		freq = clk_round_rate(se->clk, freq + 1);
se                540 drivers/soc/qcom/qcom-geni-se.c 		if (freq <= 0 || freq == se->clk_perf_tbl[i - 1])
se                542 drivers/soc/qcom/qcom-geni-se.c 		se->clk_perf_tbl[i] = freq;
se                544 drivers/soc/qcom/qcom-geni-se.c 	se->num_clk_levels = i;
se                545 drivers/soc/qcom/qcom-geni-se.c 	*tbl = se->clk_perf_tbl;
se                546 drivers/soc/qcom/qcom-geni-se.c 	return se->num_clk_levels;
se                569 drivers/soc/qcom/qcom-geni-se.c int geni_se_clk_freq_match(struct geni_se *se, unsigned long req_freq,
se                580 drivers/soc/qcom/qcom-geni-se.c 	num_clk_levels = geni_se_clk_tbl_get(se, &tbl);
se                627 drivers/soc/qcom/qcom-geni-se.c int geni_se_tx_dma_prep(struct geni_se *se, void *buf, size_t len,
se                630 drivers/soc/qcom/qcom-geni-se.c 	struct geni_wrapper *wrapper = se->wrapper;
se                643 drivers/soc/qcom/qcom-geni-se.c 	writel_relaxed(val, se->base + SE_DMA_TX_IRQ_EN_SET);
se                644 drivers/soc/qcom/qcom-geni-se.c 	writel_relaxed(lower_32_bits(*iova), se->base + SE_DMA_TX_PTR_L);
se                645 drivers/soc/qcom/qcom-geni-se.c 	writel_relaxed(upper_32_bits(*iova), se->base + SE_DMA_TX_PTR_H);
se                646 drivers/soc/qcom/qcom-geni-se.c 	writel_relaxed(GENI_SE_DMA_EOT_BUF, se->base + SE_DMA_TX_ATTR);
se                647 drivers/soc/qcom/qcom-geni-se.c 	writel_relaxed(len, se->base + SE_DMA_TX_LEN);
se                663 drivers/soc/qcom/qcom-geni-se.c int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len,
se                666 drivers/soc/qcom/qcom-geni-se.c 	struct geni_wrapper *wrapper = se->wrapper;
se                679 drivers/soc/qcom/qcom-geni-se.c 	writel_relaxed(val, se->base + SE_DMA_RX_IRQ_EN_SET);
se                680 drivers/soc/qcom/qcom-geni-se.c 	writel_relaxed(lower_32_bits(*iova), se->base + SE_DMA_RX_PTR_L);
se                681 drivers/soc/qcom/qcom-geni-se.c 	writel_relaxed(upper_32_bits(*iova), se->base + SE_DMA_RX_PTR_H);
se                683 drivers/soc/qcom/qcom-geni-se.c 	writel_relaxed(0, se->base + SE_DMA_RX_ATTR);
se                684 drivers/soc/qcom/qcom-geni-se.c 	writel_relaxed(len, se->base + SE_DMA_RX_LEN);
se                697 drivers/soc/qcom/qcom-geni-se.c void geni_se_tx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len)
se                699 drivers/soc/qcom/qcom-geni-se.c 	struct geni_wrapper *wrapper = se->wrapper;
se                714 drivers/soc/qcom/qcom-geni-se.c void geni_se_rx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len)
se                716 drivers/soc/qcom/qcom-geni-se.c 	struct geni_wrapper *wrapper = se->wrapper;
se                 75 drivers/spi/spi-geni-qcom.c 	struct geni_se se;
se                 99 drivers/spi/spi-geni-qcom.c 	struct geni_se *se = &mas->se;
se                102 drivers/spi/spi-geni-qcom.c 	ret = geni_se_clk_freq_match(&mas->se,
se                116 drivers/spi/spi-geni-qcom.c 	ret = clk_set_rate(se->clk, sclk_freq);
se                127 drivers/spi/spi-geni-qcom.c 	struct geni_se *se = &mas->se;
se                132 drivers/spi/spi-geni-qcom.c 	geni_se_cancel_m_cmd(se);
se                133 drivers/spi/spi-geni-qcom.c 	writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
se                141 drivers/spi/spi-geni-qcom.c 	geni_se_abort_m_cmd(se);
se                152 drivers/spi/spi-geni-qcom.c 	struct geni_se *se = &mas->se;
se                162 drivers/spi/spi-geni-qcom.c 		geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0);
se                164 drivers/spi/spi-geni-qcom.c 		geni_se_setup_m_cmd(se, SPI_CS_DEASSERT, 0);
se                178 drivers/spi/spi-geni-qcom.c 	struct geni_se *se = &mas->se;
se                181 drivers/spi/spi-geni-qcom.c 	word_len = readl(se->base + SE_SPI_WORD_LEN);
se                193 drivers/spi/spi-geni-qcom.c 	geni_se_config_packing(&mas->se, bits_per_word, pack_words, msb_first,
se                195 drivers/spi/spi-geni-qcom.c 	writel(word_len, se->base + SE_SPI_WORD_LEN);
se                202 drivers/spi/spi-geni-qcom.c 	struct geni_se *se = &mas->se;
se                207 drivers/spi/spi-geni-qcom.c 	loopback_cfg = readl(se->base + SE_SPI_LOOPBACK);
se                208 drivers/spi/spi-geni-qcom.c 	cpol = readl(se->base + SE_SPI_CPOL);
se                209 drivers/spi/spi-geni-qcom.c 	cpha = readl(se->base + SE_SPI_CPHA);
se                241 drivers/spi/spi-geni-qcom.c 	writel(loopback_cfg, se->base + SE_SPI_LOOPBACK);
se                242 drivers/spi/spi-geni-qcom.c 	writel(demux_sel, se->base + SE_SPI_DEMUX_SEL);
se                243 drivers/spi/spi-geni-qcom.c 	writel(cpha, se->base + SE_SPI_CPHA);
se                244 drivers/spi/spi-geni-qcom.c 	writel(cpol, se->base + SE_SPI_CPOL);
se                245 drivers/spi/spi-geni-qcom.c 	writel(demux_output_inv, se->base + SE_SPI_DEMUX_OUTPUT_INV);
se                246 drivers/spi/spi-geni-qcom.c 	writel(clk_sel, se->base + SE_GENI_CLK_SEL);
se                247 drivers/spi/spi-geni-qcom.c 	writel(m_clk_cfg, se->base + GENI_SER_M_CLK_CFG);
se                256 drivers/spi/spi-geni-qcom.c 	struct geni_se *se = &mas->se;
se                258 drivers/spi/spi-geni-qcom.c 	geni_se_select_mode(se, GENI_SE_FIFO);
se                267 drivers/spi/spi-geni-qcom.c 	struct geni_se *se = &mas->se;
se                272 drivers/spi/spi-geni-qcom.c 	proto = geni_se_read_proto(se);
se                278 drivers/spi/spi-geni-qcom.c 	mas->tx_fifo_depth = geni_se_get_tx_fifo_depth(se);
se                281 drivers/spi/spi-geni-qcom.c 	mas->fifo_width_bits = geni_se_get_tx_fifo_width(se);
se                287 drivers/spi/spi-geni-qcom.c 	geni_se_init(se, 0x0, mas->tx_fifo_depth - 2);
se                290 drivers/spi/spi-geni-qcom.c 	ver = geni_se_get_qup_hw_version(se);
se                309 drivers/spi/spi-geni-qcom.c 	struct geni_se *se = &mas->se;
se                311 drivers/spi/spi-geni-qcom.c 	spi_tx_cfg = readl(se->base + SE_SPI_TRANS_CFG);
se                338 drivers/spi/spi-geni-qcom.c 		writel(clk_sel, se->base + SE_GENI_CLK_SEL);
se                339 drivers/spi/spi-geni-qcom.c 		writel(m_clk_cfg, se->base + GENI_SER_M_CLK_CFG);
se                362 drivers/spi/spi-geni-qcom.c 		writel(len, se->base + SE_SPI_TX_TRANS_LEN);
se                366 drivers/spi/spi-geni-qcom.c 		writel(len, se->base + SE_SPI_RX_TRANS_LEN);
se                369 drivers/spi/spi-geni-qcom.c 	writel(spi_tx_cfg, se->base + SE_SPI_TRANS_CFG);
se                371 drivers/spi/spi-geni-qcom.c 	geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
se                379 drivers/spi/spi-geni-qcom.c 		writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
se                412 drivers/spi/spi-geni-qcom.c 	struct geni_se *se = &mas->se;
se                432 drivers/spi/spi-geni-qcom.c 		iowrite32_rep(se->base + SE_GENI_TX_FIFOn, &fifo_word, 1);
se                436 drivers/spi/spi-geni-qcom.c 		writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
se                441 drivers/spi/spi-geni-qcom.c 	struct geni_se *se = &mas->se;
se                449 drivers/spi/spi-geni-qcom.c 	rx_fifo_status = readl(se->base + SE_GENI_RX_FIFO_STATUS);
se                468 drivers/spi/spi-geni-qcom.c 		ioread32_rep(se->base + SE_GENI_RX_FIFOn, &fifo_word, 1);
se                479 drivers/spi/spi-geni-qcom.c 	struct geni_se *se = &mas->se;
se                487 drivers/spi/spi-geni-qcom.c 	m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
se                513 drivers/spi/spi-geni-qcom.c 			writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
se                527 drivers/spi/spi-geni-qcom.c 	writel(m_irq, se->base + SE_GENI_M_IRQ_CLEAR);
se                563 drivers/spi/spi-geni-qcom.c 	mas->se.dev = &pdev->dev;
se                564 drivers/spi/spi-geni-qcom.c 	mas->se.wrapper = dev_get_drvdata(pdev->dev.parent);
se                565 drivers/spi/spi-geni-qcom.c 	mas->se.base = base;
se                566 drivers/spi/spi-geni-qcom.c 	mas->se.clk = clk;
se                624 drivers/spi/spi-geni-qcom.c 	return geni_se_resources_off(&mas->se);
se                632 drivers/spi/spi-geni-qcom.c 	return geni_se_resources_on(&mas->se);
se                103 drivers/tty/serial/qcom_geni_serial.c 	struct geni_se se;
se                206 drivers/tty/serial/qcom_geni_serial.c 	port->se.base = uport->membase;
se                444 drivers/tty/serial/qcom_geni_serial.c 		geni_se_cancel_m_cmd(&port->se);
se                447 drivers/tty/serial/qcom_geni_serial.c 			geni_se_abort_m_cmd(&port->se);
se                582 drivers/tty/serial/qcom_geni_serial.c 	geni_se_cancel_m_cmd(&port->se);
se                585 drivers/tty/serial/qcom_geni_serial.c 		geni_se_abort_m_cmd(&port->se);
se                603 drivers/tty/serial/qcom_geni_serial.c 	geni_se_setup_s_cmd(&port->se, UART_START_READ, 0);
se                634 drivers/tty/serial/qcom_geni_serial.c 	geni_se_cancel_s_cmd(&port->se);
se                827 drivers/tty/serial/qcom_geni_serial.c 	port->tx_fifo_depth = geni_se_get_tx_fifo_depth(&port->se);
se                828 drivers/tty/serial/qcom_geni_serial.c 	port->tx_fifo_width = geni_se_get_tx_fifo_width(&port->se);
se                829 drivers/tty/serial/qcom_geni_serial.c 	port->rx_fifo_depth = geni_se_get_rx_fifo_depth(&port->se);
se                864 drivers/tty/serial/qcom_geni_serial.c 	proto = geni_se_read_proto(&port->se);
se                881 drivers/tty/serial/qcom_geni_serial.c 	geni_se_config_packing(&port->se, BITS_PER_BYTE, port->tx_bytes_pw,
se                883 drivers/tty/serial/qcom_geni_serial.c 	geni_se_config_packing(&port->se, BITS_PER_BYTE, port->rx_bytes_pw,
se                885 drivers/tty/serial/qcom_geni_serial.c 	geni_se_init(&port->se, UART_RX_WM, port->rx_fifo_depth - 2);
se                886 drivers/tty/serial/qcom_geni_serial.c 	geni_se_select_mode(&port->se, GENI_SE_FIFO);
se                972 drivers/tty/serial/qcom_geni_serial.c 	ver = geni_se_get_qup_hw_version(&port->se);
se                981 drivers/tty/serial/qcom_geni_serial.c 	clk_set_rate(port->se.clk, clk_rate);
se               1120 drivers/tty/serial/qcom_geni_serial.c 	struct geni_se se;
se               1125 drivers/tty/serial/qcom_geni_serial.c 	memset(&se, 0, sizeof(se));
se               1126 drivers/tty/serial/qcom_geni_serial.c 	se.base = uport->membase;
se               1127 drivers/tty/serial/qcom_geni_serial.c 	if (geni_se_read_proto(&se) != GENI_SE_UART)
se               1142 drivers/tty/serial/qcom_geni_serial.c 	geni_se_config_packing(&se, BITS_PER_BYTE, 1, false, true, false);
se               1143 drivers/tty/serial/qcom_geni_serial.c 	geni_se_init(&se, DEF_FIFO_DEPTH_WORDS / 2, DEF_FIFO_DEPTH_WORDS - 2);
se               1144 drivers/tty/serial/qcom_geni_serial.c 	geni_se_select_mode(&se, GENI_SE_FIFO);
se               1216 drivers/tty/serial/qcom_geni_serial.c 		geni_se_resources_on(&port->se);
se               1219 drivers/tty/serial/qcom_geni_serial.c 		geni_se_resources_off(&port->se);
se               1292 drivers/tty/serial/qcom_geni_serial.c 	port->se.dev = &pdev->dev;
se               1293 drivers/tty/serial/qcom_geni_serial.c 	port->se.wrapper = dev_get_drvdata(pdev->dev.parent);
se               1294 drivers/tty/serial/qcom_geni_serial.c 	port->se.clk = devm_clk_get(&pdev->dev, "se");
se               1295 drivers/tty/serial/qcom_geni_serial.c 	if (IS_ERR(port->se.clk)) {
se               1296 drivers/tty/serial/qcom_geni_serial.c 		ret = PTR_ERR(port->se.clk);
se                136 fs/f2fs/checkpoint.c 	struct seg_entry *se;
se                145 fs/f2fs/checkpoint.c 	se = get_seg_entry(sbi, segno);
se                147 fs/f2fs/checkpoint.c 	exist = f2fs_test_bit(offset, se->cur_valid_map);
se                364 fs/f2fs/f2fs.h #define sit_in_journal(jnl, i)		((jnl)->sit_j.entries[i].se)
se                884 fs/f2fs/segment.c 	struct seg_entry *se;
se                889 fs/f2fs/segment.c 		se = get_seg_entry(sbi, segno);
se                890 fs/f2fs/segment.c 		if (IS_NODESEG(se->type))
se                891 fs/f2fs/segment.c 			holes[NODE] += sbi->blocks_per_seg - se->valid_blocks;
se                893 fs/f2fs/segment.c 			holes[DATA] += sbi->blocks_per_seg - se->valid_blocks;
se               1806 fs/f2fs/segment.c 	struct seg_entry *se;
se               1829 fs/f2fs/segment.c 		se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
se               1832 fs/f2fs/segment.c 		if (!f2fs_test_and_set_bit(offset, se->discard_map))
se               1846 fs/f2fs/segment.c 	struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
se               1847 fs/f2fs/segment.c 	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
se               1848 fs/f2fs/segment.c 	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
se               1849 fs/f2fs/segment.c 	unsigned long *discard_map = (unsigned long *)se->discard_map;
se               1857 fs/f2fs/segment.c 	if (se->valid_blocks == max_blocks || !f2fs_hw_support_discard(sbi))
se               1861 fs/f2fs/segment.c 		if (!f2fs_realtime_discard_enable(sbi) || !se->valid_blocks ||
se               2113 fs/f2fs/segment.c 	struct seg_entry *se = get_seg_entry(sbi, segno);
se               2114 fs/f2fs/segment.c 	se->type = type;
se               2121 fs/f2fs/segment.c 	struct seg_entry *se;
se               2131 fs/f2fs/segment.c 	se = get_seg_entry(sbi, segno);
se               2132 fs/f2fs/segment.c 	new_vblocks = se->valid_blocks + del;
se               2138 fs/f2fs/segment.c 	se->valid_blocks = new_vblocks;
se               2139 fs/f2fs/segment.c 	se->mtime = get_mtime(sbi, false);
se               2140 fs/f2fs/segment.c 	if (se->mtime > SIT_I(sbi)->max_mtime)
se               2141 fs/f2fs/segment.c 		SIT_I(sbi)->max_mtime = se->mtime;
se               2145 fs/f2fs/segment.c 		exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
se               2148 fs/f2fs/segment.c 						se->cur_valid_map_mir);
se               2159 fs/f2fs/segment.c 			se->valid_blocks--;
se               2163 fs/f2fs/segment.c 		if (!f2fs_test_and_set_bit(offset, se->discard_map))
se               2171 fs/f2fs/segment.c 			if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
se               2172 fs/f2fs/segment.c 				se->ckpt_valid_blocks++;
se               2175 fs/f2fs/segment.c 		exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map);
se               2178 fs/f2fs/segment.c 						se->cur_valid_map_mir);
se               2189 fs/f2fs/segment.c 			se->valid_blocks++;
se               2198 fs/f2fs/segment.c 			if (f2fs_test_bit(offset, se->ckpt_valid_map)) {
se               2205 fs/f2fs/segment.c 		if (f2fs_test_and_clear_bit(offset, se->discard_map))
se               2208 fs/f2fs/segment.c 	if (!f2fs_test_bit(offset, se->ckpt_valid_map))
se               2209 fs/f2fs/segment.c 		se->ckpt_valid_blocks += del;
se               2246 fs/f2fs/segment.c 	struct seg_entry *se;
se               2255 fs/f2fs/segment.c 	se = get_seg_entry(sbi, segno);
se               2258 fs/f2fs/segment.c 	if (f2fs_test_bit(offset, se->ckpt_valid_map))
se               2530 fs/f2fs/segment.c 	struct seg_entry *se = get_seg_entry(sbi, seg->segno);
se               2533 fs/f2fs/segment.c 	unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
se               2534 fs/f2fs/segment.c 	unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
se               3298 fs/f2fs/segment.c 	struct seg_entry *se;
se               3303 fs/f2fs/segment.c 	se = get_seg_entry(sbi, segno);
se               3304 fs/f2fs/segment.c 	type = se->type;
se               3310 fs/f2fs/segment.c 		if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
se               3834 fs/f2fs/segment.c 	struct seg_entry *se;
se               3884 fs/f2fs/segment.c 			se = get_seg_entry(sbi, segno);
se               3886 fs/f2fs/segment.c 			if (memcmp(se->cur_valid_map, se->cur_valid_map_mir,
se               3903 fs/f2fs/segment.c 				seg_info_to_raw_sit(se,
se               3909 fs/f2fs/segment.c 				seg_info_to_raw_sit(se,
se               4118 fs/f2fs/segment.c 	struct seg_entry *se;
se               4137 fs/f2fs/segment.c 			se = &sit_i->sentries[start];
se               4148 fs/f2fs/segment.c 			seg_info_from_raw_sit(se, &sit);
se               4149 fs/f2fs/segment.c 			if (IS_NODESEG(se->type))
se               4150 fs/f2fs/segment.c 				total_node_blocks += se->valid_blocks;
se               4154 fs/f2fs/segment.c 				memset(se->discard_map, 0xff,
se               4157 fs/f2fs/segment.c 				memcpy(se->discard_map,
se               4158 fs/f2fs/segment.c 					se->cur_valid_map,
se               4162 fs/f2fs/segment.c 					se->valid_blocks;
se               4167 fs/f2fs/segment.c 							se->valid_blocks;
se               4184 fs/f2fs/segment.c 		se = &sit_i->sentries[start];
se               4187 fs/f2fs/segment.c 		old_valid_blocks = se->valid_blocks;
se               4188 fs/f2fs/segment.c 		if (IS_NODESEG(se->type))
se               4194 fs/f2fs/segment.c 		seg_info_from_raw_sit(se, &sit);
se               4195 fs/f2fs/segment.c 		if (IS_NODESEG(se->type))
se               4196 fs/f2fs/segment.c 			total_node_blocks += se->valid_blocks;
se               4199 fs/f2fs/segment.c 			memset(se->discard_map, 0xff, SIT_VBLOCK_MAP_SIZE);
se               4201 fs/f2fs/segment.c 			memcpy(se->discard_map, se->cur_valid_map,
se               4204 fs/f2fs/segment.c 			sbi->discard_blks -= se->valid_blocks;
se               4209 fs/f2fs/segment.c 							se->valid_blocks;
se               4320 fs/f2fs/segment.c 		struct seg_entry *se = get_seg_entry(sbi, curseg->segno);
se               4323 fs/f2fs/segment.c 		if (f2fs_test_bit(blkofs, se->cur_valid_map))
se               4330 fs/f2fs/segment.c 			if (!f2fs_test_bit(blkofs, se->cur_valid_map))
se                352 fs/f2fs/segment.h static inline void seg_info_from_raw_sit(struct seg_entry *se,
se                355 fs/f2fs/segment.h 	se->valid_blocks = GET_SIT_VBLOCKS(rs);
se                356 fs/f2fs/segment.h 	se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs);
se                357 fs/f2fs/segment.h 	memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
se                358 fs/f2fs/segment.h 	memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
se                360 fs/f2fs/segment.h 	memcpy(se->cur_valid_map_mir, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
se                362 fs/f2fs/segment.h 	se->type = GET_SIT_TYPE(rs);
se                363 fs/f2fs/segment.h 	se->mtime = le64_to_cpu(rs->mtime);
se                366 fs/f2fs/segment.h static inline void __seg_info_to_raw_sit(struct seg_entry *se,
se                369 fs/f2fs/segment.h 	unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) |
se                370 fs/f2fs/segment.h 					se->valid_blocks;
se                372 fs/f2fs/segment.h 	memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
se                373 fs/f2fs/segment.h 	rs->mtime = cpu_to_le64(se->mtime);
se                380 fs/f2fs/segment.h 	struct seg_entry *se;
se                390 fs/f2fs/segment.h 		se = get_seg_entry(sbi, start + i);
se                391 fs/f2fs/segment.h 		__seg_info_to_raw_sit(se, rs);
se                395 fs/f2fs/segment.h static inline void seg_info_to_raw_sit(struct seg_entry *se,
se                398 fs/f2fs/segment.h 	__seg_info_to_raw_sit(se, rs);
se                400 fs/f2fs/segment.h 	memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
se                401 fs/f2fs/segment.h 	se->ckpt_valid_blocks = se->valid_blocks;
se                617 fs/f2fs/sysfs.c 		struct seg_entry *se = get_seg_entry(sbi, i);
se                621 fs/f2fs/sysfs.c 		seq_printf(seq, "%d|%-3u", se->type, se->valid_blocks);
se                644 fs/f2fs/sysfs.c 		struct seg_entry *se = get_seg_entry(sbi, i);
se                647 fs/f2fs/sysfs.c 		seq_printf(seq, "%d|%-3u|", se->type, se->valid_blocks);
se                649 fs/f2fs/sysfs.c 			seq_printf(seq, " %.2x", se->cur_valid_map[j]);
se                 67 fs/hpfs/anode.c 	secno se;
se                105 fs/hpfs/anode.c 		if (hpfs_alloc_if_possible(s, se = le32_to_cpu(btree->u.external[n].disk_secno) + le32_to_cpu(btree->u.external[n].length))) {
se                109 fs/hpfs/anode.c 			return se;
se                117 fs/hpfs/anode.c 		se = !fnod ? node : (node + 16384) & ~16383;
se                119 fs/hpfs/anode.c 	if (!(se = hpfs_alloc_sector(s, se, 1, fsecno*ALLOC_M>ALLOC_FWD_MAX ? ALLOC_FWD_MAX : fsecno*ALLOC_M<ALLOC_FWD_MIN ? ALLOC_FWD_MIN : fsecno*ALLOC_M))) {
se                128 fs/hpfs/anode.c 			hpfs_free_sectors(s, se, 1);
se                148 fs/hpfs/anode.c 			hpfs_free_sectors(s, se, 1);
se                158 fs/hpfs/anode.c 	btree->u.external[n].disk_secno = cpu_to_le32(se);
se                163 fs/hpfs/anode.c 	if ((a == node && fnod) || na == -1) return se;
se                195 fs/hpfs/anode.c 			return se;
se                266 fs/hpfs/anode.c 	return se;
se               2759 fs/nfsd/nfs4state.c gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
se               2767 fs/nfsd/nfs4state.c 	if (se->se_callback_netid_len == 3 &&
se               2768 fs/nfsd/nfs4state.c 	    !memcmp(se->se_callback_netid_val, "tcp", 3))
se               2770 fs/nfsd/nfs4state.c 	else if (se->se_callback_netid_len == 4 &&
se               2771 fs/nfsd/nfs4state.c 		 !memcmp(se->se_callback_netid_val, "tcp6", 4))
se               2776 fs/nfsd/nfs4state.c 	conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
se               2777 fs/nfsd/nfs4state.c 					    se->se_callback_addr_len,
se               2787 fs/nfsd/nfs4state.c 	conn->cb_prog = se->se_callback_prog;
se               2788 fs/nfsd/nfs4state.c 	conn->cb_ident = se->se_callback_ident;
se                 69 fs/ocfs2/slot_map.c 	struct ocfs2_slot_map_extended *se;
se                 73 fs/ocfs2/slot_map.c 		se = (struct ocfs2_slot_map_extended *)si->si_bh[b]->b_data;
se                 78 fs/ocfs2/slot_map.c 			if (se->se_slots[i].es_valid)
se                 80 fs/ocfs2/slot_map.c 					       le32_to_cpu(se->se_slots[i].es_node_num));
se                155 fs/ocfs2/slot_map.c 	struct ocfs2_slot_map_extended *se;
se                159 fs/ocfs2/slot_map.c 	se = (struct ocfs2_slot_map_extended *)si->si_bh[blkind]->b_data;
se                160 fs/ocfs2/slot_map.c 	se->se_slots[slotno].es_valid = si->si_slots[slot_num].sl_valid;
se                162 fs/ocfs2/slot_map.c 		se->se_slots[slotno].es_node_num =
se                480 fs/proc/base.c 		   (unsigned long long)task->se.sum_exec_runtime,
se                450 include/linux/f2fs_fs.h 	struct f2fs_sit_entry se;
se                234 include/linux/qcom-geni-se.h u32 geni_se_get_qup_hw_version(struct geni_se *se);
se                242 include/linux/qcom-geni-se.h static inline u32 geni_se_read_proto(struct geni_se *se)
se                246 include/linux/qcom-geni-se.h 	val = readl_relaxed(se->base + GENI_FW_REVISION_RO);
se                260 include/linux/qcom-geni-se.h static inline void geni_se_setup_m_cmd(struct geni_se *se, u32 cmd, u32 params)
se                265 include/linux/qcom-geni-se.h 	writel_relaxed(m_cmd, se->base + SE_GENI_M_CMD0);
se                277 include/linux/qcom-geni-se.h static inline void geni_se_setup_s_cmd(struct geni_se *se, u32 cmd, u32 params)
se                281 include/linux/qcom-geni-se.h 	s_cmd = readl_relaxed(se->base + SE_GENI_S_CMD0);
se                285 include/linux/qcom-geni-se.h 	writel_relaxed(s_cmd, se->base + SE_GENI_S_CMD0);
se                296 include/linux/qcom-geni-se.h static inline void geni_se_cancel_m_cmd(struct geni_se *se)
se                298 include/linux/qcom-geni-se.h 	writel_relaxed(M_GENI_CMD_CANCEL, se->base + SE_GENI_M_CMD_CTRL_REG);
se                309 include/linux/qcom-geni-se.h static inline void geni_se_cancel_s_cmd(struct geni_se *se)
se                311 include/linux/qcom-geni-se.h 	writel_relaxed(S_GENI_CMD_CANCEL, se->base + SE_GENI_S_CMD_CTRL_REG);
se                321 include/linux/qcom-geni-se.h static inline void geni_se_abort_m_cmd(struct geni_se *se)
se                323 include/linux/qcom-geni-se.h 	writel_relaxed(M_GENI_CMD_ABORT, se->base + SE_GENI_M_CMD_CTRL_REG);
se                334 include/linux/qcom-geni-se.h static inline void geni_se_abort_s_cmd(struct geni_se *se)
se                336 include/linux/qcom-geni-se.h 	writel_relaxed(S_GENI_CMD_ABORT, se->base + SE_GENI_S_CMD_CTRL_REG);
se                348 include/linux/qcom-geni-se.h static inline u32 geni_se_get_tx_fifo_depth(struct geni_se *se)
se                352 include/linux/qcom-geni-se.h 	val = readl_relaxed(se->base + SE_HW_PARAM_0);
se                366 include/linux/qcom-geni-se.h static inline u32 geni_se_get_tx_fifo_width(struct geni_se *se)
se                370 include/linux/qcom-geni-se.h 	val = readl_relaxed(se->base + SE_HW_PARAM_0);
se                384 include/linux/qcom-geni-se.h static inline u32 geni_se_get_rx_fifo_depth(struct geni_se *se)
se                388 include/linux/qcom-geni-se.h 	val = readl_relaxed(se->base + SE_HW_PARAM_1);
se                393 include/linux/qcom-geni-se.h void geni_se_init(struct geni_se *se, u32 rx_wm, u32 rx_rfr);
se                395 include/linux/qcom-geni-se.h void geni_se_select_mode(struct geni_se *se, enum geni_se_xfer_mode mode);
se                397 include/linux/qcom-geni-se.h void geni_se_config_packing(struct geni_se *se, int bpw, int pack_words,
se                400 include/linux/qcom-geni-se.h int geni_se_resources_off(struct geni_se *se);
se                402 include/linux/qcom-geni-se.h int geni_se_resources_on(struct geni_se *se);
se                404 include/linux/qcom-geni-se.h int geni_se_clk_tbl_get(struct geni_se *se, unsigned long **tbl);
se                406 include/linux/qcom-geni-se.h int geni_se_clk_freq_match(struct geni_se *se, unsigned long req_freq,
se                410 include/linux/qcom-geni-se.h int geni_se_tx_dma_prep(struct geni_se *se, void *buf, size_t len,
se                413 include/linux/qcom-geni-se.h int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len,
se                416 include/linux/qcom-geni-se.h void geni_se_tx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len);
se                418 include/linux/qcom-geni-se.h void geni_se_rx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len);
se                676 include/linux/sched.h 	struct sched_entity		se;
se                621 include/trace/events/sched.h 	TP_PROTO(struct sched_entity *se),
se                622 include/trace/events/sched.h 	TP_ARGS(se));
se                 81 init/init_task.c 	.se		= {
se                 82 init/init_task.c 		.group_node 	= LIST_HEAD_INIT(init_task.se.group_node),
se                109 kernel/delayacct.c 	t3 = tsk->se.sum_exec_runtime;
se               7787 kernel/events/core.c 	struct perf_switch_event *se = data;
se               7797 kernel/events/core.c 		se->event_id.header.type = PERF_RECORD_SWITCH;
se               7798 kernel/events/core.c 		se->event_id.header.size = sizeof(se->event_id.header);
se               7800 kernel/events/core.c 		se->event_id.header.type = PERF_RECORD_SWITCH_CPU_WIDE;
se               7801 kernel/events/core.c 		se->event_id.header.size = sizeof(se->event_id);
se               7802 kernel/events/core.c 		se->event_id.next_prev_pid =
se               7803 kernel/events/core.c 					perf_event_pid(event, se->next_prev);
se               7804 kernel/events/core.c 		se->event_id.next_prev_tid =
se               7805 kernel/events/core.c 					perf_event_tid(event, se->next_prev);
se               7808 kernel/events/core.c 	perf_event_header__init_id(&se->event_id.header, &sample, event);
se               7810 kernel/events/core.c 	ret = perf_output_begin(&handle, event, se->event_id.header.size);
se               7815 kernel/events/core.c 		perf_output_put(&handle, se->event_id.header);
se               7817 kernel/events/core.c 		perf_output_put(&handle, se->event_id);
se                134 kernel/exit.c  	add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
se                155 kernel/exit.c  	sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
se                750 kernel/sched/core.c 	struct load_weight *load = &p->se.load;
se                758 kernel/sched/core.c 		p->se.runnable_weight = load->weight;
se                771 kernel/sched/core.c 		p->se.runnable_weight = load->weight;
se               1746 kernel/sched/core.c 		p->se.nr_migrations++;
se               2183 kernel/sched/core.c 		__schedstat_inc(p->se.statistics.nr_wakeups_local);
se               2187 kernel/sched/core.c 		__schedstat_inc(p->se.statistics.nr_wakeups_remote);
se               2199 kernel/sched/core.c 		__schedstat_inc(p->se.statistics.nr_wakeups_migrate);
se               2203 kernel/sched/core.c 	__schedstat_inc(p->se.statistics.nr_wakeups);
se               2206 kernel/sched/core.c 		__schedstat_inc(p->se.statistics.nr_wakeups_sync);
se               2682 kernel/sched/core.c 	p->se.on_rq			= 0;
se               2683 kernel/sched/core.c 	p->se.exec_start		= 0;
se               2684 kernel/sched/core.c 	p->se.sum_exec_runtime		= 0;
se               2685 kernel/sched/core.c 	p->se.prev_sum_exec_runtime	= 0;
se               2686 kernel/sched/core.c 	p->se.nr_migrations		= 0;
se               2687 kernel/sched/core.c 	p->se.vruntime			= 0;
se               2688 kernel/sched/core.c 	INIT_LIST_HEAD(&p->se.group_node);
se               2691 kernel/sched/core.c 	p->se.cfs_rq			= NULL;
se               2696 kernel/sched/core.c 	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
se               2882 kernel/sched/core.c 	init_entity_runnable_average(&p->se);
se               3529 kernel/sched/core.c 	struct sched_entity *curr = (&p->se)->cfs_rq->curr;
se               3561 kernel/sched/core.c 		return p->se.sum_exec_runtime;
se               3575 kernel/sched/core.c 	ns = p->se.sum_exec_runtime;
se               3681 kernel/sched/core.c 		delta = rq_clock_task(rq) - curr->se.exec_start;
se               6027 kernel/sched/core.c 	idle->se.exec_start = sched_clock();
se               6573 kernel/sched/core.c 		root_task_group.se = (struct sched_entity **)ptr;
se               6839 kernel/sched/core.c 		p->se.exec_start = 0;
se               6840 kernel/sched/core.c 		schedstat_set(p->se.statistics.wait_start,  0);
se               6841 kernel/sched/core.c 		schedstat_set(p->se.statistics.sleep_start, 0);
se               6842 kernel/sched/core.c 		schedstat_set(p->se.statistics.block_start, 0);
se               7627 kernel/sched/core.c 			ws += schedstat_val(tg->se[i]->statistics.wait_sum);
se                272 kernel/sched/cputime.c 	return t->se.sum_exec_runtime;
se                282 kernel/sched/cputime.c 	ns = t->se.sum_exec_runtime;
se                666 kernel/sched/cputime.c 		.sum_exec_runtime = p->se.sum_exec_runtime,
se               1199 kernel/sched/deadline.c 	delta_exec = now - curr->se.exec_start;
se               1206 kernel/sched/deadline.c 	schedstat_set(curr->se.statistics.exec_max,
se               1207 kernel/sched/deadline.c 		      max(curr->se.statistics.exec_max, delta_exec));
se               1209 kernel/sched/deadline.c 	curr->se.sum_exec_runtime += delta_exec;
se               1212 kernel/sched/deadline.c 	curr->se.exec_start = now;
se               1748 kernel/sched/deadline.c 	p->se.exec_start = rq_clock_task(rq);
se                377 kernel/sched/debug.c 	struct sched_entity *se = tg->se[cpu];
se                384 kernel/sched/debug.c 	if (!se)
se                387 kernel/sched/debug.c 	PN(se->exec_start);
se                388 kernel/sched/debug.c 	PN(se->vruntime);
se                389 kernel/sched/debug.c 	PN(se->sum_exec_runtime);
se                392 kernel/sched/debug.c 		PN_SCHEDSTAT(se->statistics.wait_start);
se                393 kernel/sched/debug.c 		PN_SCHEDSTAT(se->statistics.sleep_start);
se                394 kernel/sched/debug.c 		PN_SCHEDSTAT(se->statistics.block_start);
se                395 kernel/sched/debug.c 		PN_SCHEDSTAT(se->statistics.sleep_max);
se                396 kernel/sched/debug.c 		PN_SCHEDSTAT(se->statistics.block_max);
se                397 kernel/sched/debug.c 		PN_SCHEDSTAT(se->statistics.exec_max);
se                398 kernel/sched/debug.c 		PN_SCHEDSTAT(se->statistics.slice_max);
se                399 kernel/sched/debug.c 		PN_SCHEDSTAT(se->statistics.wait_max);
se                400 kernel/sched/debug.c 		PN_SCHEDSTAT(se->statistics.wait_sum);
se                401 kernel/sched/debug.c 		P_SCHEDSTAT(se->statistics.wait_count);
se                404 kernel/sched/debug.c 	P(se->load.weight);
se                405 kernel/sched/debug.c 	P(se->runnable_weight);
se                407 kernel/sched/debug.c 	P(se->avg.load_avg);
se                408 kernel/sched/debug.c 	P(se->avg.util_avg);
se                409 kernel/sched/debug.c 	P(se->avg.runnable_load_avg);
se                443 kernel/sched/debug.c 		SPLIT_NS(p->se.vruntime),
se                448 kernel/sched/debug.c 		SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
se                449 kernel/sched/debug.c 		SPLIT_NS(p->se.sum_exec_runtime),
se                450 kernel/sched/debug.c 		SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
se                879 kernel/sched/debug.c 	PN(se.exec_start);
se                880 kernel/sched/debug.c 	PN(se.vruntime);
se                881 kernel/sched/debug.c 	PN(se.sum_exec_runtime);
se                885 kernel/sched/debug.c 	P(se.nr_migrations);
se                890 kernel/sched/debug.c 		PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
se                891 kernel/sched/debug.c 		PN_SCHEDSTAT(se.statistics.wait_start);
se                892 kernel/sched/debug.c 		PN_SCHEDSTAT(se.statistics.sleep_start);
se                893 kernel/sched/debug.c 		PN_SCHEDSTAT(se.statistics.block_start);
se                894 kernel/sched/debug.c 		PN_SCHEDSTAT(se.statistics.sleep_max);
se                895 kernel/sched/debug.c 		PN_SCHEDSTAT(se.statistics.block_max);
se                896 kernel/sched/debug.c 		PN_SCHEDSTAT(se.statistics.exec_max);
se                897 kernel/sched/debug.c 		PN_SCHEDSTAT(se.statistics.slice_max);
se                898 kernel/sched/debug.c 		PN_SCHEDSTAT(se.statistics.wait_max);
se                899 kernel/sched/debug.c 		PN_SCHEDSTAT(se.statistics.wait_sum);
se                900 kernel/sched/debug.c 		P_SCHEDSTAT(se.statistics.wait_count);
se                901 kernel/sched/debug.c 		PN_SCHEDSTAT(se.statistics.iowait_sum);
se                902 kernel/sched/debug.c 		P_SCHEDSTAT(se.statistics.iowait_count);
se                903 kernel/sched/debug.c 		P_SCHEDSTAT(se.statistics.nr_migrations_cold);
se                904 kernel/sched/debug.c 		P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
se                905 kernel/sched/debug.c 		P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
se                906 kernel/sched/debug.c 		P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
se                907 kernel/sched/debug.c 		P_SCHEDSTAT(se.statistics.nr_forced_migrations);
se                908 kernel/sched/debug.c 		P_SCHEDSTAT(se.statistics.nr_wakeups);
se                909 kernel/sched/debug.c 		P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
se                910 kernel/sched/debug.c 		P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
se                911 kernel/sched/debug.c 		P_SCHEDSTAT(se.statistics.nr_wakeups_local);
se                912 kernel/sched/debug.c 		P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
se                913 kernel/sched/debug.c 		P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
se                914 kernel/sched/debug.c 		P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
se                915 kernel/sched/debug.c 		P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
se                916 kernel/sched/debug.c 		P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
se                918 kernel/sched/debug.c 		avg_atom = p->se.sum_exec_runtime;
se                924 kernel/sched/debug.c 		avg_per_cpu = p->se.sum_exec_runtime;
se                925 kernel/sched/debug.c 		if (p->se.nr_migrations) {
se                927 kernel/sched/debug.c 						p->se.nr_migrations);
se                942 kernel/sched/debug.c 	P(se.load.weight);
se                943 kernel/sched/debug.c 	P(se.runnable_weight);
se                945 kernel/sched/debug.c 	P(se.avg.load_sum);
se                946 kernel/sched/debug.c 	P(se.avg.runnable_load_sum);
se                947 kernel/sched/debug.c 	P(se.avg.util_sum);
se                948 kernel/sched/debug.c 	P(se.avg.load_avg);
se                949 kernel/sched/debug.c 	P(se.avg.runnable_load_avg);
se                950 kernel/sched/debug.c 	P(se.avg.util_avg);
se                951 kernel/sched/debug.c 	P(se.avg.last_update_time);
se                952 kernel/sched/debug.c 	P(se.avg.util_est.ewma);
se                953 kernel/sched/debug.c 	P(se.avg.util_est.enqueued);
se                984 kernel/sched/debug.c 	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
se                251 kernel/sched/fair.c static inline struct task_struct *task_of(struct sched_entity *se)
se                253 kernel/sched/fair.c 	SCHED_WARN_ON(!entity_is_task(se));
se                254 kernel/sched/fair.c 	return container_of(se, struct task_struct, se);
se                258 kernel/sched/fair.c #define for_each_sched_entity(se) \
se                259 kernel/sched/fair.c 		for (; se; se = se->parent)
se                263 kernel/sched/fair.c 	return p->se.cfs_rq;
se                267 kernel/sched/fair.c static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
se                269 kernel/sched/fair.c 	return se->cfs_rq;
se                391 kernel/sched/fair.c is_same_group(struct sched_entity *se, struct sched_entity *pse)
se                393 kernel/sched/fair.c 	if (se->cfs_rq == pse->cfs_rq)
se                394 kernel/sched/fair.c 		return se->cfs_rq;
se                399 kernel/sched/fair.c static inline struct sched_entity *parent_entity(struct sched_entity *se)
se                401 kernel/sched/fair.c 	return se->parent;
se                405 kernel/sched/fair.c find_matching_se(struct sched_entity **se, struct sched_entity **pse)
se                417 kernel/sched/fair.c 	se_depth = (*se)->depth;
se                422 kernel/sched/fair.c 		*se = parent_entity(*se);
se                430 kernel/sched/fair.c 	while (!is_same_group(*se, *pse)) {
se                431 kernel/sched/fair.c 		*se = parent_entity(*se);
se                438 kernel/sched/fair.c static inline struct task_struct *task_of(struct sched_entity *se)
se                440 kernel/sched/fair.c 	return container_of(se, struct task_struct, se);
se                443 kernel/sched/fair.c #define for_each_sched_entity(se) \
se                444 kernel/sched/fair.c 		for (; se; se = NULL)
se                451 kernel/sched/fair.c static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
se                453 kernel/sched/fair.c 	struct task_struct *p = task_of(se);
se                487 kernel/sched/fair.c static inline struct sched_entity *parent_entity(struct sched_entity *se)
se                493 kernel/sched/fair.c find_matching_se(struct sched_entity **se, struct sched_entity **pse)
se                545 kernel/sched/fair.c 		struct sched_entity *se;
se                546 kernel/sched/fair.c 		se = rb_entry(leftmost, struct sched_entity, run_node);
se                549 kernel/sched/fair.c 			vruntime = se->vruntime;
se                551 kernel/sched/fair.c 			vruntime = min_vruntime(vruntime, se->vruntime);
se                565 kernel/sched/fair.c static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
se                582 kernel/sched/fair.c 		if (entity_before(se, entry)) {
se                590 kernel/sched/fair.c 	rb_link_node(&se->run_node, parent, link);
se                591 kernel/sched/fair.c 	rb_insert_color_cached(&se->run_node,
se                595 kernel/sched/fair.c static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
se                597 kernel/sched/fair.c 	rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline);
se                610 kernel/sched/fair.c static struct sched_entity *__pick_next_entity(struct sched_entity *se)
se                612 kernel/sched/fair.c 	struct rb_node *next = rb_next(&se->run_node);
se                662 kernel/sched/fair.c static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
se                664 kernel/sched/fair.c 	if (unlikely(se->load.weight != NICE_0_LOAD))
se                665 kernel/sched/fair.c 		delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
se                692 kernel/sched/fair.c static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
se                694 kernel/sched/fair.c 	u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
se                696 kernel/sched/fair.c 	for_each_sched_entity(se) {
se                700 kernel/sched/fair.c 		cfs_rq = cfs_rq_of(se);
se                703 kernel/sched/fair.c 		if (unlikely(!se->on_rq)) {
se                706 kernel/sched/fair.c 			update_load_add(&lw, se->load.weight);
se                709 kernel/sched/fair.c 		slice = __calc_delta(slice, se->load.weight, load);
se                719 kernel/sched/fair.c static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
se                721 kernel/sched/fair.c 	return calc_delta_fair(sched_slice(cfs_rq, se), se);
se                732 kernel/sched/fair.c void init_entity_runnable_average(struct sched_entity *se)
se                734 kernel/sched/fair.c 	struct sched_avg *sa = &se->avg;
se                744 kernel/sched/fair.c 	if (entity_is_task(se))
se                745 kernel/sched/fair.c 		sa->runnable_load_avg = sa->load_avg = scale_load_down(se->load.weight);
se                747 kernel/sched/fair.c 	se->runnable_weight = se->load.weight;
se                752 kernel/sched/fair.c static void attach_entity_cfs_rq(struct sched_entity *se);
se                782 kernel/sched/fair.c 	struct sched_entity *se = &p->se;
se                783 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
se                784 kernel/sched/fair.c 	struct sched_avg *sa = &se->avg;
se                790 kernel/sched/fair.c 			sa->util_avg  = cfs_rq->avg.util_avg * se->load.weight;
se                811 kernel/sched/fair.c 		se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq);
se                815 kernel/sched/fair.c 	attach_entity_cfs_rq(se);
se                819 kernel/sched/fair.c void init_entity_runnable_average(struct sched_entity *se)
se                870 kernel/sched/fair.c 	update_curr(cfs_rq_of(&rq->curr->se));
se                874 kernel/sched/fair.c update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
se                882 kernel/sched/fair.c 	prev_wait_start = schedstat_val(se->statistics.wait_start);
se                884 kernel/sched/fair.c 	if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) &&
se                888 kernel/sched/fair.c 	__schedstat_set(se->statistics.wait_start, wait_start);
se                892 kernel/sched/fair.c update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
se                900 kernel/sched/fair.c 	delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start);
se                902 kernel/sched/fair.c 	if (entity_is_task(se)) {
se                903 kernel/sched/fair.c 		p = task_of(se);
se                910 kernel/sched/fair.c 			__schedstat_set(se->statistics.wait_start, delta);
se                916 kernel/sched/fair.c 	__schedstat_set(se->statistics.wait_max,
se                917 kernel/sched/fair.c 		      max(schedstat_val(se->statistics.wait_max), delta));
se                918 kernel/sched/fair.c 	__schedstat_inc(se->statistics.wait_count);
se                919 kernel/sched/fair.c 	__schedstat_add(se->statistics.wait_sum, delta);
se                920 kernel/sched/fair.c 	__schedstat_set(se->statistics.wait_start, 0);
se                924 kernel/sched/fair.c update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
se                932 kernel/sched/fair.c 	sleep_start = schedstat_val(se->statistics.sleep_start);
se                933 kernel/sched/fair.c 	block_start = schedstat_val(se->statistics.block_start);
se                935 kernel/sched/fair.c 	if (entity_is_task(se))
se                936 kernel/sched/fair.c 		tsk = task_of(se);
se                944 kernel/sched/fair.c 		if (unlikely(delta > schedstat_val(se->statistics.sleep_max)))
se                945 kernel/sched/fair.c 			__schedstat_set(se->statistics.sleep_max, delta);
se                947 kernel/sched/fair.c 		__schedstat_set(se->statistics.sleep_start, 0);
se                948 kernel/sched/fair.c 		__schedstat_add(se->statistics.sum_sleep_runtime, delta);
se                961 kernel/sched/fair.c 		if (unlikely(delta > schedstat_val(se->statistics.block_max)))
se                962 kernel/sched/fair.c 			__schedstat_set(se->statistics.block_max, delta);
se                964 kernel/sched/fair.c 		__schedstat_set(se->statistics.block_start, 0);
se                965 kernel/sched/fair.c 		__schedstat_add(se->statistics.sum_sleep_runtime, delta);
se                969 kernel/sched/fair.c 				__schedstat_add(se->statistics.iowait_sum, delta);
se                970 kernel/sched/fair.c 				__schedstat_inc(se->statistics.iowait_count);
se                995 kernel/sched/fair.c update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
se               1004 kernel/sched/fair.c 	if (se != cfs_rq->curr)
se               1005 kernel/sched/fair.c 		update_stats_wait_start(cfs_rq, se);
se               1008 kernel/sched/fair.c 		update_stats_enqueue_sleeper(cfs_rq, se);
se               1012 kernel/sched/fair.c update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
se               1022 kernel/sched/fair.c 	if (se != cfs_rq->curr)
se               1023 kernel/sched/fair.c 		update_stats_wait_end(cfs_rq, se);
se               1025 kernel/sched/fair.c 	if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) {
se               1026 kernel/sched/fair.c 		struct task_struct *tsk = task_of(se);
se               1029 kernel/sched/fair.c 			__schedstat_set(se->statistics.sleep_start,
se               1032 kernel/sched/fair.c 			__schedstat_set(se->statistics.block_start,
se               1041 kernel/sched/fair.c update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
se               1046 kernel/sched/fair.c 	se->exec_start = rq_clock_task(rq_of(cfs_rq));
se               2014 kernel/sched/fair.c 	now = p->se.exec_start;
se               2015 kernel/sched/fair.c 	runtime = p->se.sum_exec_runtime;
se               2025 kernel/sched/fair.c 		delta = p->se.avg.load_sum;
se               2489 kernel/sched/fair.c 	u64 runtime = p->se.sum_exec_runtime;
se               2620 kernel/sched/fair.c 	if (unlikely(p->se.sum_exec_runtime != runtime)) {
se               2621 kernel/sched/fair.c 		u64 diff = p->se.sum_exec_runtime - runtime;
se               2690 kernel/sched/fair.c 	now = curr->se.sum_exec_runtime;
se               2757 kernel/sched/fair.c account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
se               2759 kernel/sched/fair.c 	update_load_add(&cfs_rq->load, se->load.weight);
se               2761 kernel/sched/fair.c 	if (entity_is_task(se)) {
se               2764 kernel/sched/fair.c 		account_numa_enqueue(rq, task_of(se));
se               2765 kernel/sched/fair.c 		list_add(&se->group_node, &rq->cfs_tasks);
se               2772 kernel/sched/fair.c account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
se               2774 kernel/sched/fair.c 	update_load_sub(&cfs_rq->load, se->load.weight);
se               2776 kernel/sched/fair.c 	if (entity_is_task(se)) {
se               2777 kernel/sched/fair.c 		account_numa_dequeue(rq_of(cfs_rq), task_of(se));
se               2778 kernel/sched/fair.c 		list_del_init(&se->group_node);
se               2834 kernel/sched/fair.c enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
se               2836 kernel/sched/fair.c 	cfs_rq->runnable_weight += se->runnable_weight;
se               2838 kernel/sched/fair.c 	cfs_rq->avg.runnable_load_avg += se->avg.runnable_load_avg;
se               2839 kernel/sched/fair.c 	cfs_rq->avg.runnable_load_sum += se_runnable(se) * se->avg.runnable_load_sum;
se               2843 kernel/sched/fair.c dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
se               2845 kernel/sched/fair.c 	cfs_rq->runnable_weight -= se->runnable_weight;
se               2847 kernel/sched/fair.c 	sub_positive(&cfs_rq->avg.runnable_load_avg, se->avg.runnable_load_avg);
se               2849 kernel/sched/fair.c 		     se_runnable(se) * se->avg.runnable_load_sum);
se               2853 kernel/sched/fair.c enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
se               2855 kernel/sched/fair.c 	cfs_rq->avg.load_avg += se->avg.load_avg;
se               2856 kernel/sched/fair.c 	cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
se               2860 kernel/sched/fair.c dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
se               2862 kernel/sched/fair.c 	sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
se               2863 kernel/sched/fair.c 	sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
se               2867 kernel/sched/fair.c enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
se               2869 kernel/sched/fair.c dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
se               2871 kernel/sched/fair.c enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
se               2873 kernel/sched/fair.c dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
se               2876 kernel/sched/fair.c static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
se               2879 kernel/sched/fair.c 	if (se->on_rq) {
se               2881 kernel/sched/fair.c 		if (cfs_rq->curr == se)
se               2883 kernel/sched/fair.c 		account_entity_dequeue(cfs_rq, se);
se               2884 kernel/sched/fair.c 		dequeue_runnable_load_avg(cfs_rq, se);
se               2886 kernel/sched/fair.c 	dequeue_load_avg(cfs_rq, se);
se               2888 kernel/sched/fair.c 	se->runnable_weight = runnable;
se               2889 kernel/sched/fair.c 	update_load_set(&se->load, weight);
se               2893 kernel/sched/fair.c 		u32 divider = LOAD_AVG_MAX - 1024 + se->avg.period_contrib;
se               2895 kernel/sched/fair.c 		se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider);
se               2896 kernel/sched/fair.c 		se->avg.runnable_load_avg =
se               2897 kernel/sched/fair.c 			div_u64(se_runnable(se) * se->avg.runnable_load_sum, divider);
se               2901 kernel/sched/fair.c 	enqueue_load_avg(cfs_rq, se);
se               2902 kernel/sched/fair.c 	if (se->on_rq) {
se               2903 kernel/sched/fair.c 		account_entity_enqueue(cfs_rq, se);
se               2904 kernel/sched/fair.c 		enqueue_runnable_load_avg(cfs_rq, se);
se               2910 kernel/sched/fair.c 	struct sched_entity *se = &p->se;
se               2911 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
se               2912 kernel/sched/fair.c 	struct load_weight *load = &se->load;
se               2915 kernel/sched/fair.c 	reweight_entity(cfs_rq, se, weight, weight);
se               3079 kernel/sched/fair.c static void update_cfs_group(struct sched_entity *se)
se               3081 kernel/sched/fair.c 	struct cfs_rq *gcfs_rq = group_cfs_rq(se);
se               3093 kernel/sched/fair.c 	if (likely(se->load.weight == shares))
se               3100 kernel/sched/fair.c 	reweight_entity(cfs_rq_of(se), se, shares, runnable);
se               3104 kernel/sched/fair.c static inline void update_cfs_group(struct sched_entity *se)
se               3170 kernel/sched/fair.c void set_task_rq_fair(struct sched_entity *se,
se               3186 kernel/sched/fair.c 	if (!(se->avg.last_update_time && prev))
se               3210 kernel/sched/fair.c 	__update_load_avg_blocked_se(p_last_update_time, se);
se               3211 kernel/sched/fair.c 	se->avg.last_update_time = n_last_update_time;
se               3284 kernel/sched/fair.c update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
se               3286 kernel/sched/fair.c 	long delta = gcfs_rq->avg.util_avg - se->avg.util_avg;
se               3301 kernel/sched/fair.c 	se->avg.util_avg = gcfs_rq->avg.util_avg;
se               3302 kernel/sched/fair.c 	se->avg.util_sum = se->avg.util_avg * LOAD_AVG_MAX;
se               3310 kernel/sched/fair.c update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
se               3327 kernel/sched/fair.c 		runnable_sum += se->avg.load_sum;
se               3340 kernel/sched/fair.c 		runnable_sum = min(se->avg.load_sum, load_sum);
se               3349 kernel/sched/fair.c 	running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT;
se               3352 kernel/sched/fair.c 	load_sum = (s64)se_weight(se) * runnable_sum;
se               3355 kernel/sched/fair.c 	delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
se               3356 kernel/sched/fair.c 	delta_avg = load_avg - se->avg.load_avg;
se               3358 kernel/sched/fair.c 	se->avg.load_sum = runnable_sum;
se               3359 kernel/sched/fair.c 	se->avg.load_avg = load_avg;
se               3363 kernel/sched/fair.c 	runnable_load_sum = (s64)se_runnable(se) * runnable_sum;
se               3365 kernel/sched/fair.c 	delta_sum = runnable_load_sum - se_weight(se) * se->avg.runnable_load_sum;
se               3366 kernel/sched/fair.c 	delta_avg = runnable_load_avg - se->avg.runnable_load_avg;
se               3368 kernel/sched/fair.c 	se->avg.runnable_load_sum = runnable_sum;
se               3369 kernel/sched/fair.c 	se->avg.runnable_load_avg = runnable_load_avg;
se               3371 kernel/sched/fair.c 	if (se->on_rq) {
se               3384 kernel/sched/fair.c static inline int propagate_entity_load_avg(struct sched_entity *se)
se               3388 kernel/sched/fair.c 	if (entity_is_task(se))
se               3391 kernel/sched/fair.c 	gcfs_rq = group_cfs_rq(se);
se               3397 kernel/sched/fair.c 	cfs_rq = cfs_rq_of(se);
se               3401 kernel/sched/fair.c 	update_tg_cfs_util(cfs_rq, se, gcfs_rq);
se               3402 kernel/sched/fair.c 	update_tg_cfs_runnable(cfs_rq, se, gcfs_rq);
se               3405 kernel/sched/fair.c 	trace_pelt_se_tp(se);
se               3414 kernel/sched/fair.c static inline bool skip_blocked_update(struct sched_entity *se)
se               3416 kernel/sched/fair.c 	struct cfs_rq *gcfs_rq = group_cfs_rq(se);
se               3422 kernel/sched/fair.c 	if (se->avg.load_avg || se->avg.util_avg)
se               3444 kernel/sched/fair.c static inline int propagate_entity_load_avg(struct sched_entity *se)
se               3519 kernel/sched/fair.c static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
se               3530 kernel/sched/fair.c 	se->avg.last_update_time = cfs_rq->avg.last_update_time;
se               3531 kernel/sched/fair.c 	se->avg.period_contrib = cfs_rq->avg.period_contrib;
se               3539 kernel/sched/fair.c 	se->avg.util_sum = se->avg.util_avg * divider;
se               3541 kernel/sched/fair.c 	se->avg.load_sum = divider;
se               3542 kernel/sched/fair.c 	if (se_weight(se)) {
se               3543 kernel/sched/fair.c 		se->avg.load_sum =
se               3544 kernel/sched/fair.c 			div_u64(se->avg.load_avg * se->avg.load_sum, se_weight(se));
se               3547 kernel/sched/fair.c 	se->avg.runnable_load_sum = se->avg.load_sum;
se               3549 kernel/sched/fair.c 	enqueue_load_avg(cfs_rq, se);
se               3550 kernel/sched/fair.c 	cfs_rq->avg.util_avg += se->avg.util_avg;
se               3551 kernel/sched/fair.c 	cfs_rq->avg.util_sum += se->avg.util_sum;
se               3553 kernel/sched/fair.c 	add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
se               3568 kernel/sched/fair.c static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
se               3570 kernel/sched/fair.c 	dequeue_load_avg(cfs_rq, se);
se               3571 kernel/sched/fair.c 	sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
se               3572 kernel/sched/fair.c 	sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
se               3574 kernel/sched/fair.c 	add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
se               3589 kernel/sched/fair.c static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
se               3598 kernel/sched/fair.c 	if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
se               3599 kernel/sched/fair.c 		__update_load_avg_se(now, cfs_rq, se);
se               3602 kernel/sched/fair.c 	decayed |= propagate_entity_load_avg(se);
se               3604 kernel/sched/fair.c 	if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
se               3613 kernel/sched/fair.c 		attach_entity_load_avg(cfs_rq, se, SCHED_CPUFREQ_MIGRATION);
se               3649 kernel/sched/fair.c static void sync_entity_load_avg(struct sched_entity *se)
se               3651 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
se               3655 kernel/sched/fair.c 	__update_load_avg_blocked_se(last_update_time, se);
se               3662 kernel/sched/fair.c static void remove_entity_load_avg(struct sched_entity *se)
se               3664 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
se               3673 kernel/sched/fair.c 	sync_entity_load_avg(se);
se               3677 kernel/sched/fair.c 	cfs_rq->removed.util_avg	+= se->avg.util_avg;
se               3678 kernel/sched/fair.c 	cfs_rq->removed.load_avg	+= se->avg.load_avg;
se               3679 kernel/sched/fair.c 	cfs_rq->removed.runnable_sum	+= se->avg.load_sum; /* == runnable_sum */
se               3695 kernel/sched/fair.c 	return READ_ONCE(p->se.avg.util_avg);
se               3700 kernel/sched/fair.c 	struct util_est ue = READ_ONCE(p->se.avg.util_est);
se               3763 kernel/sched/fair.c 	ue = p->se.avg.util_est;
se               3804 kernel/sched/fair.c 	WRITE_ONCE(p->se.avg.util_est, ue);
se               3836 kernel/sched/fair.c static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
se               3841 kernel/sched/fair.c static inline void remove_entity_load_avg(struct sched_entity *se) {}
se               3844 kernel/sched/fair.c attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) {}
se               3846 kernel/sched/fair.c detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
se               3863 kernel/sched/fair.c static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
se               3866 kernel/sched/fair.c 	s64 d = se->vruntime - cfs_rq->min_vruntime;
se               3877 kernel/sched/fair.c place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
se               3888 kernel/sched/fair.c 		vruntime += sched_vslice(cfs_rq, se);
se               3905 kernel/sched/fair.c 	se->vruntime = max_vruntime(se->vruntime, vruntime);
se               3963 kernel/sched/fair.c enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
se               3966 kernel/sched/fair.c 	bool curr = cfs_rq->curr == se;
se               3973 kernel/sched/fair.c 		se->vruntime += cfs_rq->min_vruntime;
se               3984 kernel/sched/fair.c 		se->vruntime += cfs_rq->min_vruntime;
se               3994 kernel/sched/fair.c 	update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH);
se               3995 kernel/sched/fair.c 	update_cfs_group(se);
se               3996 kernel/sched/fair.c 	enqueue_runnable_load_avg(cfs_rq, se);
se               3997 kernel/sched/fair.c 	account_entity_enqueue(cfs_rq, se);
se               4000 kernel/sched/fair.c 		place_entity(cfs_rq, se, 0);
se               4003 kernel/sched/fair.c 	update_stats_enqueue(cfs_rq, se, flags);
se               4004 kernel/sched/fair.c 	check_spread(cfs_rq, se);
se               4006 kernel/sched/fair.c 		__enqueue_entity(cfs_rq, se);
se               4007 kernel/sched/fair.c 	se->on_rq = 1;
se               4021 kernel/sched/fair.c static void __clear_buddies_last(struct sched_entity *se)
se               4023 kernel/sched/fair.c 	for_each_sched_entity(se) {
se               4024 kernel/sched/fair.c 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
se               4025 kernel/sched/fair.c 		if (cfs_rq->last != se)
se               4032 kernel/sched/fair.c static void __clear_buddies_next(struct sched_entity *se)
se               4034 kernel/sched/fair.c 	for_each_sched_entity(se) {
se               4035 kernel/sched/fair.c 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
se               4036 kernel/sched/fair.c 		if (cfs_rq->next != se)
se               4043 kernel/sched/fair.c static void __clear_buddies_skip(struct sched_entity *se)
se               4045 kernel/sched/fair.c 	for_each_sched_entity(se) {
se               4046 kernel/sched/fair.c 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
se               4047 kernel/sched/fair.c 		if (cfs_rq->skip != se)
se               4054 kernel/sched/fair.c static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
se               4056 kernel/sched/fair.c 	if (cfs_rq->last == se)
se               4057 kernel/sched/fair.c 		__clear_buddies_last(se);
se               4059 kernel/sched/fair.c 	if (cfs_rq->next == se)
se               4060 kernel/sched/fair.c 		__clear_buddies_next(se);
se               4062 kernel/sched/fair.c 	if (cfs_rq->skip == se)
se               4063 kernel/sched/fair.c 		__clear_buddies_skip(se);
se               4069 kernel/sched/fair.c dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
se               4084 kernel/sched/fair.c 	update_load_avg(cfs_rq, se, UPDATE_TG);
se               4085 kernel/sched/fair.c 	dequeue_runnable_load_avg(cfs_rq, se);
se               4087 kernel/sched/fair.c 	update_stats_dequeue(cfs_rq, se, flags);
se               4089 kernel/sched/fair.c 	clear_buddies(cfs_rq, se);
se               4091 kernel/sched/fair.c 	if (se != cfs_rq->curr)
se               4092 kernel/sched/fair.c 		__dequeue_entity(cfs_rq, se);
se               4093 kernel/sched/fair.c 	se->on_rq = 0;
se               4094 kernel/sched/fair.c 	account_entity_dequeue(cfs_rq, se);
se               4103 kernel/sched/fair.c 		se->vruntime -= cfs_rq->min_vruntime;
se               4108 kernel/sched/fair.c 	update_cfs_group(se);
se               4127 kernel/sched/fair.c 	struct sched_entity *se;
se               4150 kernel/sched/fair.c 	se = __pick_first_entity(cfs_rq);
se               4151 kernel/sched/fair.c 	delta = curr->vruntime - se->vruntime;
se               4161 kernel/sched/fair.c set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
se               4164 kernel/sched/fair.c 	if (se->on_rq) {
se               4170 kernel/sched/fair.c 		update_stats_wait_end(cfs_rq, se);
se               4171 kernel/sched/fair.c 		__dequeue_entity(cfs_rq, se);
se               4172 kernel/sched/fair.c 		update_load_avg(cfs_rq, se, UPDATE_TG);
se               4175 kernel/sched/fair.c 	update_stats_curr_start(cfs_rq, se);
se               4176 kernel/sched/fair.c 	cfs_rq->curr = se;
se               4184 kernel/sched/fair.c 	    rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) {
se               4185 kernel/sched/fair.c 		schedstat_set(se->statistics.slice_max,
se               4186 kernel/sched/fair.c 			max((u64)schedstat_val(se->statistics.slice_max),
se               4187 kernel/sched/fair.c 			    se->sum_exec_runtime - se->prev_sum_exec_runtime));
se               4190 kernel/sched/fair.c 	se->prev_sum_exec_runtime = se->sum_exec_runtime;
se               4194 kernel/sched/fair.c wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
se               4207 kernel/sched/fair.c 	struct sched_entity *se;
se               4216 kernel/sched/fair.c 	se = left; /* ideally we run the leftmost entity */
se               4222 kernel/sched/fair.c 	if (cfs_rq->skip == se) {
se               4225 kernel/sched/fair.c 		if (se == curr) {
se               4228 kernel/sched/fair.c 			second = __pick_next_entity(se);
se               4234 kernel/sched/fair.c 			se = second;
se               4241 kernel/sched/fair.c 		se = cfs_rq->last;
se               4247 kernel/sched/fair.c 		se = cfs_rq->next;
se               4249 kernel/sched/fair.c 	clear_buddies(cfs_rq, se);
se               4251 kernel/sched/fair.c 	return se;
se               4502 kernel/sched/fair.c 	struct sched_entity *se;
se               4506 kernel/sched/fair.c 	se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
se               4515 kernel/sched/fair.c 	for_each_sched_entity(se) {
se               4516 kernel/sched/fair.c 		struct cfs_rq *qcfs_rq = cfs_rq_of(se);
se               4518 kernel/sched/fair.c 		if (!se->on_rq)
se               4522 kernel/sched/fair.c 			dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
se               4530 kernel/sched/fair.c 	if (!se)
se               4562 kernel/sched/fair.c 	struct sched_entity *se;
se               4566 kernel/sched/fair.c 	se = cfs_rq->tg->se[cpu_of(rq)];
se               4585 kernel/sched/fair.c 	for_each_sched_entity(se) {
se               4586 kernel/sched/fair.c 		if (se->on_rq)
se               4589 kernel/sched/fair.c 		cfs_rq = cfs_rq_of(se);
se               4591 kernel/sched/fair.c 			enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
se               4599 kernel/sched/fair.c 	if (!se)
se               4607 kernel/sched/fair.c 	for_each_sched_entity(se) {
se               4608 kernel/sched/fair.c 		cfs_rq = cfs_rq_of(se);
se               5137 kernel/sched/fair.c 	struct sched_entity *se = &p->se;
se               5138 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
se               5143 kernel/sched/fair.c 		u64 slice = sched_slice(cfs_rq, se);
se               5144 kernel/sched/fair.c 		u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
se               5168 kernel/sched/fair.c 	if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
se               5210 kernel/sched/fair.c 	struct sched_entity *se = &p->se;
se               5229 kernel/sched/fair.c 	for_each_sched_entity(se) {
se               5230 kernel/sched/fair.c 		if (se->on_rq)
se               5232 kernel/sched/fair.c 		cfs_rq = cfs_rq_of(se);
se               5233 kernel/sched/fair.c 		enqueue_entity(cfs_rq, se, flags);
se               5245 kernel/sched/fair.c 	for_each_sched_entity(se) {
se               5246 kernel/sched/fair.c 		cfs_rq = cfs_rq_of(se);
se               5248 kernel/sched/fair.c 		update_load_avg(cfs_rq, se, UPDATE_TG);
se               5249 kernel/sched/fair.c 		update_cfs_group(se);
se               5267 kernel/sched/fair.c 	if (!se) {
se               5295 kernel/sched/fair.c 		for_each_sched_entity(se) {
se               5296 kernel/sched/fair.c 			cfs_rq = cfs_rq_of(se);
se               5308 kernel/sched/fair.c static void set_next_buddy(struct sched_entity *se);
se               5318 kernel/sched/fair.c 	struct sched_entity *se = &p->se;
se               5322 kernel/sched/fair.c 	for_each_sched_entity(se) {
se               5323 kernel/sched/fair.c 		cfs_rq = cfs_rq_of(se);
se               5324 kernel/sched/fair.c 		dequeue_entity(cfs_rq, se, flags);
se               5336 kernel/sched/fair.c 			se = parent_entity(se);
se               5341 kernel/sched/fair.c 			if (task_sleep && se && !throttled_hierarchy(cfs_rq))
se               5342 kernel/sched/fair.c 				set_next_buddy(se);
se               5348 kernel/sched/fair.c 	for_each_sched_entity(se) {
se               5349 kernel/sched/fair.c 		cfs_rq = cfs_rq_of(se);
se               5351 kernel/sched/fair.c 		update_load_avg(cfs_rq, se, UPDATE_TG);
se               5352 kernel/sched/fair.c 		update_cfs_group(se);
se               5364 kernel/sched/fair.c 	if (!se)
se               5557 kernel/sched/fair.c 	schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
se               5562 kernel/sched/fair.c 	schedstat_inc(p->se.statistics.nr_wakeups_affine);
se               5785 kernel/sched/fair.c 		sync_entity_load_avg(&p->se);
se               6134 kernel/sched/fair.c 	if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
se               6226 kernel/sched/fair.c 	sync_entity_load_avg(&p->se);
se               6384 kernel/sched/fair.c 	sync_entity_load_avg(&p->se);
se               6531 kernel/sched/fair.c static void detach_entity_cfs_rq(struct sched_entity *se);
se               6547 kernel/sched/fair.c 		struct sched_entity *se = &p->se;
se               6548 kernel/sched/fair.c 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
se               6563 kernel/sched/fair.c 		se->vruntime -= min_vruntime;
se               6572 kernel/sched/fair.c 		detach_entity_cfs_rq(&p->se);
se               6583 kernel/sched/fair.c 		remove_entity_load_avg(&p->se);
se               6587 kernel/sched/fair.c 	p->se.avg.last_update_time = 0;
se               6590 kernel/sched/fair.c 	p->se.exec_start = 0;
se               6597 kernel/sched/fair.c 	remove_entity_load_avg(&p->se);
se               6610 kernel/sched/fair.c static unsigned long wakeup_gran(struct sched_entity *se)
se               6627 kernel/sched/fair.c 	return calc_delta_fair(gran, se);
se               6645 kernel/sched/fair.c wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
se               6647 kernel/sched/fair.c 	s64 gran, vdiff = curr->vruntime - se->vruntime;
se               6652 kernel/sched/fair.c 	gran = wakeup_gran(se);
se               6659 kernel/sched/fair.c static void set_last_buddy(struct sched_entity *se)
se               6661 kernel/sched/fair.c 	if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se))))
se               6664 kernel/sched/fair.c 	for_each_sched_entity(se) {
se               6665 kernel/sched/fair.c 		if (SCHED_WARN_ON(!se->on_rq))
se               6667 kernel/sched/fair.c 		cfs_rq_of(se)->last = se;
se               6671 kernel/sched/fair.c static void set_next_buddy(struct sched_entity *se)
se               6673 kernel/sched/fair.c 	if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se))))
se               6676 kernel/sched/fair.c 	for_each_sched_entity(se) {
se               6677 kernel/sched/fair.c 		if (SCHED_WARN_ON(!se->on_rq))
se               6679 kernel/sched/fair.c 		cfs_rq_of(se)->next = se;
se               6683 kernel/sched/fair.c static void set_skip_buddy(struct sched_entity *se)
se               6685 kernel/sched/fair.c 	for_each_sched_entity(se)
se               6686 kernel/sched/fair.c 		cfs_rq_of(se)->skip = se;
se               6695 kernel/sched/fair.c 	struct sched_entity *se = &curr->se, *pse = &p->se;
se               6700 kernel/sched/fair.c 	if (unlikely(se == pse))
se               6742 kernel/sched/fair.c 	find_matching_se(&se, &pse);
se               6743 kernel/sched/fair.c 	update_curr(cfs_rq_of(se));
se               6745 kernel/sched/fair.c 	if (wakeup_preempt_entity(se, pse) == 1) {
se               6768 kernel/sched/fair.c 	if (unlikely(!se->on_rq || curr == rq->idle))
se               6771 kernel/sched/fair.c 	if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
se               6772 kernel/sched/fair.c 		set_last_buddy(se);
se               6779 kernel/sched/fair.c 	struct sched_entity *se;
se               6830 kernel/sched/fair.c 		se = pick_next_entity(cfs_rq, curr);
se               6831 kernel/sched/fair.c 		cfs_rq = group_cfs_rq(se);
se               6834 kernel/sched/fair.c 	p = task_of(se);
se               6842 kernel/sched/fair.c 		struct sched_entity *pse = &prev->se;
se               6844 kernel/sched/fair.c 		while (!(cfs_rq = is_same_group(se, pse))) {
se               6845 kernel/sched/fair.c 			int se_depth = se->depth;
se               6853 kernel/sched/fair.c 				set_next_entity(cfs_rq_of(se), se);
se               6854 kernel/sched/fair.c 				se = parent_entity(se);
se               6859 kernel/sched/fair.c 		set_next_entity(cfs_rq, se);
se               6869 kernel/sched/fair.c 		se = pick_next_entity(cfs_rq, NULL);
se               6870 kernel/sched/fair.c 		set_next_entity(cfs_rq, se);
se               6871 kernel/sched/fair.c 		cfs_rq = group_cfs_rq(se);
se               6874 kernel/sched/fair.c 	p = task_of(se);
se               6883 kernel/sched/fair.c 	list_move(&p->se.group_node, &rq->cfs_tasks);
se               6924 kernel/sched/fair.c 	struct sched_entity *se = &prev->se;
se               6927 kernel/sched/fair.c 	for_each_sched_entity(se) {
se               6928 kernel/sched/fair.c 		cfs_rq = cfs_rq_of(se);
se               6929 kernel/sched/fair.c 		put_prev_entity(cfs_rq, se);
se               6942 kernel/sched/fair.c 	struct sched_entity *se = &curr->se;
se               6950 kernel/sched/fair.c 	clear_buddies(cfs_rq, se);
se               6966 kernel/sched/fair.c 	set_skip_buddy(se);
se               6971 kernel/sched/fair.c 	struct sched_entity *se = &p->se;
se               6974 kernel/sched/fair.c 	if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
se               6978 kernel/sched/fair.c 	set_next_buddy(se);
se               7168 kernel/sched/fair.c 			(&p->se == cfs_rq_of(&p->se)->next ||
se               7169 kernel/sched/fair.c 			 &p->se == cfs_rq_of(&p->se)->last))
se               7177 kernel/sched/fair.c 	delta = rq_clock_task(env->src_rq) - p->se.exec_start;
se               7265 kernel/sched/fair.c 		schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
se               7296 kernel/sched/fair.c 		schedstat_inc(p->se.statistics.nr_failed_migrations_running);
se               7314 kernel/sched/fair.c 			schedstat_inc(p->se.statistics.nr_forced_migrations);
se               7319 kernel/sched/fair.c 	schedstat_inc(p->se.statistics.nr_failed_migrations_hot);
se               7347 kernel/sched/fair.c 			&env->src_rq->cfs_tasks, se.group_node) {
se               7393 kernel/sched/fair.c 		p = list_last_entry(tasks, struct task_struct, se.group_node);
se               7419 kernel/sched/fair.c 		list_add(&p->se.group_node, &env->tasks);
se               7443 kernel/sched/fair.c 		list_move(&p->se.group_node, tasks);
se               7496 kernel/sched/fair.c 		p = list_first_entry(tasks, struct task_struct, se.group_node);
se               7497 kernel/sched/fair.c 		list_del_init(&p->se.group_node);
se               7598 kernel/sched/fair.c 		struct sched_entity *se;
se               7608 kernel/sched/fair.c 		se = cfs_rq->tg->se[cpu];
se               7609 kernel/sched/fair.c 		if (se && !skip_blocked_update(se))
se               7610 kernel/sched/fair.c 			update_load_avg(cfs_rq_of(se), se, 0);
se               7635 kernel/sched/fair.c 	struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
se               7643 kernel/sched/fair.c 	for_each_sched_entity(se) {
se               7644 kernel/sched/fair.c 		cfs_rq = cfs_rq_of(se);
se               7645 kernel/sched/fair.c 		WRITE_ONCE(cfs_rq->h_load_next, se);
se               7650 kernel/sched/fair.c 	if (!se) {
se               7655 kernel/sched/fair.c 	while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
se               7657 kernel/sched/fair.c 		load = div64_ul(load * se->avg.load_avg,
se               7659 kernel/sched/fair.c 		cfs_rq = group_cfs_rq(se);
se               7670 kernel/sched/fair.c 	return div64_ul(p->se.avg.load_avg * cfs_rq->h_load,
se               7688 kernel/sched/fair.c 	return p->se.avg.load_avg;
se               9982 kernel/sched/fair.c 	struct sched_entity *se = &curr->se;
se               9984 kernel/sched/fair.c 	for_each_sched_entity(se) {
se               9985 kernel/sched/fair.c 		cfs_rq = cfs_rq_of(se);
se               9986 kernel/sched/fair.c 		entity_tick(cfs_rq, se, queued);
se               10004 kernel/sched/fair.c 	struct sched_entity *se = &p->se, *curr;
se               10015 kernel/sched/fair.c 		se->vruntime = curr->vruntime;
se               10017 kernel/sched/fair.c 	place_entity(cfs_rq, se, 1);
se               10019 kernel/sched/fair.c 	if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
se               10024 kernel/sched/fair.c 		swap(curr->vruntime, se->vruntime);
se               10028 kernel/sched/fair.c 	se->vruntime -= cfs_rq->min_vruntime;
se               10056 kernel/sched/fair.c 	struct sched_entity *se = &p->se;
se               10075 kernel/sched/fair.c 	if (!se->sum_exec_runtime ||
se               10087 kernel/sched/fair.c static void propagate_entity_cfs_rq(struct sched_entity *se)
se               10092 kernel/sched/fair.c 	se = se->parent;
se               10094 kernel/sched/fair.c 	for_each_sched_entity(se) {
se               10095 kernel/sched/fair.c 		cfs_rq = cfs_rq_of(se);
se               10100 kernel/sched/fair.c 		update_load_avg(cfs_rq, se, UPDATE_TG);
se               10104 kernel/sched/fair.c static void propagate_entity_cfs_rq(struct sched_entity *se) { }
se               10107 kernel/sched/fair.c static void detach_entity_cfs_rq(struct sched_entity *se)
se               10109 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
se               10112 kernel/sched/fair.c 	update_load_avg(cfs_rq, se, 0);
se               10113 kernel/sched/fair.c 	detach_entity_load_avg(cfs_rq, se);
se               10115 kernel/sched/fair.c 	propagate_entity_cfs_rq(se);
se               10118 kernel/sched/fair.c static void attach_entity_cfs_rq(struct sched_entity *se)
se               10120 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
se               10127 kernel/sched/fair.c 	se->depth = se->parent ? se->parent->depth + 1 : 0;
se               10131 kernel/sched/fair.c 	update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
se               10132 kernel/sched/fair.c 	attach_entity_load_avg(cfs_rq, se, 0);
se               10134 kernel/sched/fair.c 	propagate_entity_cfs_rq(se);
se               10139 kernel/sched/fair.c 	struct sched_entity *se = &p->se;
se               10140 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
se               10147 kernel/sched/fair.c 		place_entity(cfs_rq, se, 0);
se               10148 kernel/sched/fair.c 		se->vruntime -= cfs_rq->min_vruntime;
se               10151 kernel/sched/fair.c 	detach_entity_cfs_rq(se);
se               10156 kernel/sched/fair.c 	struct sched_entity *se = &p->se;
se               10157 kernel/sched/fair.c 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
se               10159 kernel/sched/fair.c 	attach_entity_cfs_rq(se);
se               10162 kernel/sched/fair.c 		se->vruntime += cfs_rq->min_vruntime;
se               10194 kernel/sched/fair.c 	struct sched_entity *se = &p->se;
se               10202 kernel/sched/fair.c 		list_move(&se->group_node, &rq->cfs_tasks);
se               10206 kernel/sched/fair.c 	for_each_sched_entity(se) {
se               10207 kernel/sched/fair.c 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
se               10209 kernel/sched/fair.c 		set_next_entity(cfs_rq, se);
se               10230 kernel/sched/fair.c 	struct sched_entity *se = &p->se;
se               10233 kernel/sched/fair.c 	se->depth = se->parent ? se->parent->depth + 1 : 0;
se               10243 kernel/sched/fair.c 	p->se.avg.last_update_time = 0;
se               10270 kernel/sched/fair.c 		if (tg->se)
se               10271 kernel/sched/fair.c 			kfree(tg->se[i]);
se               10275 kernel/sched/fair.c 	kfree(tg->se);
se               10280 kernel/sched/fair.c 	struct sched_entity *se;
se               10287 kernel/sched/fair.c 	tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL);
se               10288 kernel/sched/fair.c 	if (!tg->se)
se               10301 kernel/sched/fair.c 		se = kzalloc_node(sizeof(struct sched_entity),
se               10303 kernel/sched/fair.c 		if (!se)
se               10307 kernel/sched/fair.c 		init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
se               10308 kernel/sched/fair.c 		init_entity_runnable_average(se);
se               10321 kernel/sched/fair.c 	struct sched_entity *se;
se               10328 kernel/sched/fair.c 		se = tg->se[i];
se               10331 kernel/sched/fair.c 		attach_entity_cfs_rq(se);
se               10344 kernel/sched/fair.c 		if (tg->se[cpu])
se               10345 kernel/sched/fair.c 			remove_entity_load_avg(tg->se[cpu]);
se               10363 kernel/sched/fair.c 			struct sched_entity *se, int cpu,
se               10373 kernel/sched/fair.c 	tg->se[cpu] = se;
se               10376 kernel/sched/fair.c 	if (!se)
se               10380 kernel/sched/fair.c 		se->cfs_rq = &rq->cfs;
se               10381 kernel/sched/fair.c 		se->depth = 0;
se               10383 kernel/sched/fair.c 		se->cfs_rq = parent->my_q;
se               10384 kernel/sched/fair.c 		se->depth = parent->depth + 1;
se               10387 kernel/sched/fair.c 	se->my_q = cfs_rq;
se               10389 kernel/sched/fair.c 	update_load_set(&se->load, NICE_0_LOAD);
se               10390 kernel/sched/fair.c 	se->parent = parent;
se               10402 kernel/sched/fair.c 	if (!tg->se[0])
se               10414 kernel/sched/fair.c 		struct sched_entity *se = tg->se[i];
se               10420 kernel/sched/fair.c 		for_each_sched_entity(se) {
se               10421 kernel/sched/fair.c 			update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
se               10422 kernel/sched/fair.c 			update_cfs_group(se);
se               10449 kernel/sched/fair.c 	struct sched_entity *se = &task->se;
se               10457 kernel/sched/fair.c 		rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
se                266 kernel/sched/pelt.c int __update_load_avg_blocked_se(u64 now, struct sched_entity *se)
se                268 kernel/sched/pelt.c 	if (___update_load_sum(now, &se->avg, 0, 0, 0)) {
se                269 kernel/sched/pelt.c 		___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
se                270 kernel/sched/pelt.c 		trace_pelt_se_tp(se);
se                277 kernel/sched/pelt.c int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se)
se                279 kernel/sched/pelt.c 	if (___update_load_sum(now, &se->avg, !!se->on_rq, !!se->on_rq,
se                280 kernel/sched/pelt.c 				cfs_rq->curr == se)) {
se                282 kernel/sched/pelt.c 		___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
se                283 kernel/sched/pelt.c 		cfs_se_util_change(&se->avg);
se                284 kernel/sched/pelt.c 		trace_pelt_se_tp(se);
se                  4 kernel/sched/pelt.h int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
se                  5 kernel/sched/pelt.h int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
se                966 kernel/sched/rt.c 	delta_exec = now - curr->se.exec_start;
se                970 kernel/sched/rt.c 	schedstat_set(curr->se.statistics.exec_max,
se                971 kernel/sched/rt.c 		      max(curr->se.statistics.exec_max, delta_exec));
se                973 kernel/sched/rt.c 	curr->se.sum_exec_runtime += delta_exec;
se                976 kernel/sched/rt.c 	curr->se.exec_start = now;
se               1520 kernel/sched/rt.c 	p->se.exec_start = rq_clock_task(rq);
se               2299 kernel/sched/rt.c 						    p->se.sum_exec_runtime);
se                366 kernel/sched/sched.h 	struct sched_entity	**se;
se                450 kernel/sched/sched.h 			struct sched_entity *se, int cpu,
se                481 kernel/sched/sched.h extern void set_task_rq_fair(struct sched_entity *se,
se                484 kernel/sched/sched.h static inline void set_task_rq_fair(struct sched_entity *se,
se                696 kernel/sched/sched.h #define entity_is_task(se)	(!se->my_q)
se                698 kernel/sched/sched.h #define entity_is_task(se)	1
se                705 kernel/sched/sched.h static inline long se_weight(struct sched_entity *se)
se                707 kernel/sched/sched.h 	return scale_load_down(se->load.weight);
se                710 kernel/sched/sched.h static inline long se_runnable(struct sched_entity *se)
se                712 kernel/sched/sched.h 	return scale_load_down(se->runnable_weight);
se               1509 kernel/sched/sched.h 	set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
se               1510 kernel/sched/sched.h 	p->se.cfs_rq = tg->cfs_rq[cpu];
se               1511 kernel/sched/sched.h 	p->se.parent = tg->se[cpu];
se               1894 kernel/sched/sched.h extern void init_entity_runnable_average(struct sched_entity *se);
se                 34 kernel/sched/stop_task.c 	stop->se.exec_start = rq_clock_task(rq);
se                 71 kernel/sched/stop_task.c 	delta_exec = rq_clock_task(rq) - curr->se.exec_start;
se                 75 kernel/sched/stop_task.c 	schedstat_set(curr->se.statistics.exec_max,
se                 76 kernel/sched/stop_task.c 			max(curr->se.statistics.exec_max, delta_exec));
se                 78 kernel/sched/stop_task.c 	curr->se.sum_exec_runtime += delta_exec;
se                 81 kernel/sched/stop_task.c 	curr->se.exec_start = rq_clock_task(rq);
se                229 kernel/time/posix-cpu-timers.c 	store_samples(samples, stime, utime, p->se.sum_exec_runtime);
se                844 kernel/trace/trace_events_hist.c 	struct synth_event *se;
se                850 kernel/trace/trace_events_hist.c 	se = container_of(event, struct synth_event, call.event);
se                852 kernel/trace/trace_events_hist.c 	trace_seq_printf(s, "%s: ", se->name);
se                854 kernel/trace/trace_events_hist.c 	for (i = 0, n_u64 = 0; i < se->n_fields; i++) {
se                858 kernel/trace/trace_events_hist.c 		fmt = synth_field_fmt(se->fields[i]->type);
se                867 kernel/trace/trace_events_hist.c 		if (se->fields[i]->is_string) {
se                868 kernel/trace/trace_events_hist.c 			trace_seq_printf(s, print_fmt, se->fields[i]->name,
se                870 kernel/trace/trace_events_hist.c 					 i == se->n_fields - 1 ? "" : " ");
se                875 kernel/trace/trace_events_hist.c 			char *space = (i == se->n_fields - 1 ? "" : " ");
se                878 kernel/trace/trace_events_hist.c 						  se->fields[i]->name,
se                879 kernel/trace/trace_events_hist.c 						  se->fields[i]->size,
se                883 kernel/trace/trace_events_hist.c 			if (strcmp(se->fields[i]->type, "gfp_t") == 0) {
se               6178 kernel/trace/trace_events_hist.c 	struct synth_event *se;
se               6193 kernel/trace/trace_events_hist.c 			se = find_synth_event(se_name);
se               6194 kernel/trace/trace_events_hist.c 			if (se)
se               6195 kernel/trace/trace_events_hist.c 				se->ref--;
se               6215 kernel/trace/trace_events_hist.c 	struct synth_event *se;
se               6314 kernel/trace/trace_events_hist.c 		se = find_synth_event(se_name);
se               6315 kernel/trace/trace_events_hist.c 		if (se)
se               6316 kernel/trace/trace_events_hist.c 			se->ref--;
se               6353 kernel/trace/trace_events_hist.c 	se = find_synth_event(se_name);
se               6354 kernel/trace/trace_events_hist.c 	if (se)
se               6355 kernel/trace/trace_events_hist.c 		se->ref++;
se                161 mm/swapfile.c  static inline struct swap_extent *next_se(struct swap_extent *se)
se                163 mm/swapfile.c  	struct rb_node *rb = rb_next(&se->rb_node);
se                173 mm/swapfile.c  	struct swap_extent *se;
se                179 mm/swapfile.c  	se = first_se(si);
se                180 mm/swapfile.c  	start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
se                181 mm/swapfile.c  	nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
se                190 mm/swapfile.c  	for (se = next_se(se); se; se = next_se(se)) {
se                191 mm/swapfile.c  		start_block = se->start_block << (PAGE_SHIFT - 9);
se                192 mm/swapfile.c  		nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
se                207 mm/swapfile.c  	struct swap_extent *se;
se                212 mm/swapfile.c  		se = rb_entry(rb, struct swap_extent, rb_node);
se                213 mm/swapfile.c  		if (offset < se->start_page)
se                215 mm/swapfile.c  		else if (offset >= se->start_page + se->nr_pages)
se                218 mm/swapfile.c  			return se;
se                231 mm/swapfile.c  	struct swap_extent *se = offset_to_swap_extent(si, start_page);
se                234 mm/swapfile.c  		pgoff_t offset = start_page - se->start_page;
se                235 mm/swapfile.c  		sector_t start_block = se->start_block + offset;
se                236 mm/swapfile.c  		sector_t nr_blocks = se->nr_pages - offset;
se                249 mm/swapfile.c  		se = next_se(se);
se               1783 mm/swapfile.c  			struct swap_extent *se = first_se(sis);
se               1785 mm/swapfile.c  			if (se->start_block == offset) {
se               2260 mm/swapfile.c  	struct swap_extent *se;
se               2267 mm/swapfile.c  	se = offset_to_swap_extent(sis, offset);
se               2268 mm/swapfile.c  	return se->start_block + (offset - se->start_page);
se               2288 mm/swapfile.c  		struct swap_extent *se = rb_entry(rb, struct swap_extent, rb_node);
se               2291 mm/swapfile.c  		kfree(se);
se               2315 mm/swapfile.c  	struct swap_extent *se;
se               2328 mm/swapfile.c  		se = rb_entry(parent, struct swap_extent, rb_node);
se               2329 mm/swapfile.c  		BUG_ON(se->start_page + se->nr_pages != start_page);
se               2330 mm/swapfile.c  		if (se->start_block + se->nr_pages == start_block) {
se               2332 mm/swapfile.c  			se->nr_pages += nr_pages;
se               2338 mm/swapfile.c  	new_se = kmalloc(sizeof(*se), GFP_KERNEL);
se                534 net/nfc/core.c 	struct nfc_se *se;
se                536 net/nfc/core.c 	list_for_each_entry(se, &dev->secure_elements, list)
se                537 net/nfc/core.c 		if (se->idx == se_idx)
se                538 net/nfc/core.c 			return se;
se                546 net/nfc/core.c 	struct nfc_se *se;
se                573 net/nfc/core.c 	se = nfc_find_se(dev, se_idx);
se                574 net/nfc/core.c 	if (!se) {
se                579 net/nfc/core.c 	if (se->state == NFC_SE_ENABLED) {
se                586 net/nfc/core.c 		se->state = NFC_SE_ENABLED;
se                595 net/nfc/core.c 	struct nfc_se *se;
se                617 net/nfc/core.c 	se = nfc_find_se(dev, se_idx);
se                618 net/nfc/core.c 	if (!se) {
se                623 net/nfc/core.c 	if (se->state == NFC_SE_DISABLED) {
se                630 net/nfc/core.c 		se->state = NFC_SE_DISABLED;
se                868 net/nfc/core.c 	struct nfc_se *se;
se                873 net/nfc/core.c 	se = nfc_find_se(dev, se_idx);
se                874 net/nfc/core.c 	if (se)
se                877 net/nfc/core.c 	se = kzalloc(sizeof(struct nfc_se), GFP_KERNEL);
se                878 net/nfc/core.c 	if (!se)
se                881 net/nfc/core.c 	se->idx = se_idx;
se                882 net/nfc/core.c 	se->type = type;
se                883 net/nfc/core.c 	se->state = NFC_SE_DISABLED;
se                884 net/nfc/core.c 	INIT_LIST_HEAD(&se->list);
se                886 net/nfc/core.c 	list_add(&se->list, &dev->secure_elements);
se                890 net/nfc/core.c 		list_del(&se->list);
se                891 net/nfc/core.c 		kfree(se);
se                902 net/nfc/core.c 	struct nfc_se *se, *n;
se                907 net/nfc/core.c 	list_for_each_entry_safe(se, n, &dev->secure_elements, list)
se                908 net/nfc/core.c 		if (se->idx == se_idx) {
se                913 net/nfc/core.c 			list_del(&se->list);
se                914 net/nfc/core.c 			kfree(se);
se                960 net/nfc/core.c 	struct nfc_se *se, *n;
se                967 net/nfc/core.c 	list_for_each_entry_safe(se, n, &dev->secure_elements, list) {
se                968 net/nfc/core.c 			nfc_genl_se_removed(dev, se->idx);
se                969 net/nfc/core.c 			list_del(&se->list);
se                970 net/nfc/core.c 			kfree(se);
se                496 net/nfc/netlink.c 	struct nfc_se *se;
se                509 net/nfc/netlink.c 	se = nfc_find_se(dev, se_idx);
se                510 net/nfc/netlink.c 	if (!se)
se                515 net/nfc/netlink.c 	    nla_put_u8(msg, NFC_ATTR_SE_TYPE, se->type) ||
se                541 net/nfc/netlink.c 	struct nfc_se *se;
se                554 net/nfc/netlink.c 	se = nfc_find_se(dev, se_idx);
se                555 net/nfc/netlink.c 	if (!se)
se                560 net/nfc/netlink.c 	    nla_put_u8(msg, NFC_ATTR_SE_TYPE, se->type))
se               1330 net/nfc/netlink.c 	struct nfc_se *se, *n;
se               1332 net/nfc/netlink.c 	list_for_each_entry_safe(se, n, &dev->secure_elements, list) {
se               1342 net/nfc/netlink.c 		    nla_put_u32(msg, NFC_ATTR_SE_INDEX, se->idx) ||
se               1343 net/nfc/netlink.c 		    nla_put_u8(msg, NFC_ATTR_SE_TYPE, se->type))
se               1412 net/nfc/netlink.c 	struct nfc_se *se;
se               1434 net/nfc/netlink.c 	se = nfc_find_se(dev, se_idx);
se               1435 net/nfc/netlink.c 	if (!se) {
se               1440 net/nfc/netlink.c 	if (se->state != NFC_SE_ENABLED) {
se                110 scripts/dtc/util.c 	const char *ss, *se;
se                120 scripts/dtc/util.c 	se = s + len;
se                122 scripts/dtc/util.c 	while (s < se) {
se                124 scripts/dtc/util.c 		while (s < se && *s && isprint((unsigned char)*s))
se                629 scripts/kallsyms.c static int may_be_linker_script_provide_symbol(const struct sym_entry *se)
se                631 scripts/kallsyms.c 	const char *symbol = (char *)se->sym + 1;
se                632 scripts/kallsyms.c 	int len = se->len - 1;
se                140 sound/pci/ctxfi/cthardware.h 	int (*amixer_set_se)(void *blk, unsigned int se);
se                642 sound/pci/ctxfi/cthw20k1.c static int amixer_set_se(void *blk, unsigned int se)
se                646 sound/pci/ctxfi/cthw20k1.c 	set_field(&ctl->amophi, AMOPHI_SE, se);
se                644 sound/pci/ctxfi/cthw20k2.c static int amixer_set_se(void *blk, unsigned int se)
se                648 sound/pci/ctxfi/cthw20k2.c 	set_field(&ctl->amophi, AMOPHI_SE, se);
se                970 sound/soc/codecs/hdac_hdmi.c 	struct soc_enum *se;
se                982 sound/soc/codecs/hdac_hdmi.c 	se = devm_kzalloc(&hdev->dev, sizeof(*se), GFP_KERNEL);
se                983 sound/soc/codecs/hdac_hdmi.c 	if (!se)
se                992 sound/soc/codecs/hdac_hdmi.c 	kc->private_value = (long)se;
se                999 sound/soc/codecs/hdac_hdmi.c 	se->reg = SND_SOC_NOPM;
se               1002 sound/soc/codecs/hdac_hdmi.c 	se->items = num_items;
se               1003 sound/soc/codecs/hdac_hdmi.c 	se->mask = roundup_pow_of_two(se->items) - 1;
se               1018 sound/soc/codecs/hdac_hdmi.c 	se->texts = devm_kmemdup(&hdev->dev, items,
se               1020 sound/soc/codecs/hdac_hdmi.c 	if (!se->texts)
se               1036 sound/soc/codecs/hdac_hdmi.c 	struct soc_enum *se;
se               1042 sound/soc/codecs/hdac_hdmi.c 		se = (struct soc_enum *)kc->private_value;
se               1046 sound/soc/codecs/hdac_hdmi.c 					se->texts[j + 1],
se               2972 sound/soc/intel/skylake/skl-topology.c static int skl_init_enum_data(struct device *dev, struct soc_enum *se,
se               2983 sound/soc/intel/skylake/skl-topology.c 		se->dobj.private = data;
se               2999 sound/soc/intel/skylake/skl-topology.c 	struct soc_enum *se;
se               3017 sound/soc/intel/skylake/skl-topology.c 			se = (struct soc_enum *)kctl->private_value;
se               3019 sound/soc/intel/skylake/skl-topology.c 				return skl_init_enum_data(bus->dev, se,
se                 83 sound/soc/soc-topology.c static void soc_tplg_denum_remove_texts(struct soc_enum *se);
se                 84 sound/soc/soc-topology.c static void soc_tplg_denum_remove_values(struct soc_enum *se);
se                396 sound/soc/soc-topology.c 	struct soc_enum *se = container_of(dobj, struct soc_enum, dobj);
se                407 sound/soc/soc-topology.c 	soc_tplg_denum_remove_values(se);
se                408 sound/soc/soc-topology.c 	soc_tplg_denum_remove_texts(se);
se                409 sound/soc/soc-topology.c 	kfree(se);
se                474 sound/soc/soc-topology.c 			struct soc_enum *se =
se                480 sound/soc/soc-topology.c 			soc_tplg_denum_remove_values(se);
se                481 sound/soc/soc-topology.c 			soc_tplg_denum_remove_texts(se);
se                483 sound/soc/soc-topology.c 			kfree(se);
se                932 sound/soc/soc-topology.c static int soc_tplg_denum_create_texts(struct soc_enum *se,
se                937 sound/soc/soc-topology.c 	se->dobj.control.dtexts =
se                939 sound/soc/soc-topology.c 	if (se->dobj.control.dtexts == NULL)
se                950 sound/soc/soc-topology.c 		se->dobj.control.dtexts[i] = kstrdup(ec->texts[i], GFP_KERNEL);
se                951 sound/soc/soc-topology.c 		if (!se->dobj.control.dtexts[i]) {
se                957 sound/soc/soc-topology.c 	se->items = le32_to_cpu(ec->items);
se                958 sound/soc/soc-topology.c 	se->texts = (const char * const *)se->dobj.control.dtexts;
se                962 sound/soc/soc-topology.c 	se->items = i;
se                963 sound/soc/soc-topology.c 	soc_tplg_denum_remove_texts(se);
se                967 sound/soc/soc-topology.c static inline void soc_tplg_denum_remove_texts(struct soc_enum *se)
se                969 sound/soc/soc-topology.c 	int i = se->items;
se                972 sound/soc/soc-topology.c 		kfree(se->dobj.control.dtexts[i]);
se                973 sound/soc/soc-topology.c 	kfree(se->dobj.control.dtexts);
se                976 sound/soc/soc-topology.c static int soc_tplg_denum_create_values(struct soc_enum *se,
se                984 sound/soc/soc-topology.c 	se->dobj.control.dvalues = kzalloc(le32_to_cpu(ec->items) *
se                987 sound/soc/soc-topology.c 	if (!se->dobj.control.dvalues)
se                992 sound/soc/soc-topology.c 		se->dobj.control.dvalues[i] = le32_to_cpu(ec->values[i]);
se                998 sound/soc/soc-topology.c static inline void soc_tplg_denum_remove_values(struct soc_enum *se)
se               1000 sound/soc/soc-topology.c 	kfree(se->dobj.control.dvalues);
se               1007 sound/soc/soc-topology.c 	struct soc_enum *se;
se               1028 sound/soc/soc-topology.c 		se = kzalloc((sizeof(*se)), GFP_KERNEL);
se               1029 sound/soc/soc-topology.c 		if (se == NULL)
se               1040 sound/soc/soc-topology.c 		kc.private_value = (long)se;
se               1044 sound/soc/soc-topology.c 		se->reg = tplc_chan_get_reg(tplg, ec->channel, SNDRV_CHMAP_FL);
se               1045 sound/soc/soc-topology.c 		se->shift_l = tplc_chan_get_shift(tplg, ec->channel,
se               1047 sound/soc/soc-topology.c 		se->shift_r = tplc_chan_get_shift(tplg, ec->channel,
se               1050 sound/soc/soc-topology.c 		se->mask = le32_to_cpu(ec->mask);
se               1051 sound/soc/soc-topology.c 		se->dobj.index = tplg->index;
se               1052 sound/soc/soc-topology.c 		se->dobj.type = SND_SOC_DOBJ_ENUM;
se               1053 sound/soc/soc-topology.c 		se->dobj.ops = tplg->ops;
se               1054 sound/soc/soc-topology.c 		INIT_LIST_HEAD(&se->dobj.list);
se               1059 sound/soc/soc-topology.c 			err = soc_tplg_denum_create_values(se, ec);
se               1064 sound/soc/soc-topology.c 				kfree(se);
se               1071 sound/soc/soc-topology.c 			err = soc_tplg_denum_create_texts(se, ec);
se               1076 sound/soc/soc-topology.c 				kfree(se);
se               1084 sound/soc/soc-topology.c 			kfree(se);
se               1092 sound/soc/soc-topology.c 			kfree(se);
se               1102 sound/soc/soc-topology.c 			kfree(se);
se               1108 sound/soc/soc-topology.c 			&kc, &se->dobj.control.kcontrol);
se               1112 sound/soc/soc-topology.c 			kfree(se);
se               1116 sound/soc/soc-topology.c 		list_add(&se->dobj.list, &tplg->comp->dobj_list);
se               1407 sound/soc/soc-topology.c 	struct soc_enum *se;
se               1421 sound/soc/soc-topology.c 		se = kzalloc(sizeof(*se), GFP_KERNEL);
se               1422 sound/soc/soc-topology.c 		if (se == NULL)
se               1431 sound/soc/soc-topology.c 		kc[i].private_value = (long)se;
se               1439 sound/soc/soc-topology.c 		se->reg = tplc_chan_get_reg(tplg, ec->channel, SNDRV_CHMAP_FL);
se               1440 sound/soc/soc-topology.c 		se->shift_l = tplc_chan_get_shift(tplg, ec->channel,
se               1442 sound/soc/soc-topology.c 		se->shift_r = tplc_chan_get_shift(tplg, ec->channel,
se               1445 sound/soc/soc-topology.c 		se->items = le32_to_cpu(ec->items);
se               1446 sound/soc/soc-topology.c 		se->mask = le32_to_cpu(ec->mask);
se               1447 sound/soc/soc-topology.c 		se->dobj.index = tplg->index;
se               1452 sound/soc/soc-topology.c 			err = soc_tplg_denum_create_values(se, ec);
se               1462 sound/soc/soc-topology.c 			err = soc_tplg_denum_create_texts(se, ec);
se               1497 sound/soc/soc-topology.c 		se = (struct soc_enum *)kc[i].private_value;
se               1499 sound/soc/soc-topology.c 		if (se) {
se               1500 sound/soc/soc-topology.c 			soc_tplg_denum_remove_values(se);
se               1501 sound/soc/soc-topology.c 			soc_tplg_denum_remove_texts(se);
se               1504 sound/soc/soc-topology.c 		kfree(se);
se                135 sound/soc/sof/control.c 	struct soc_enum *se =
se                137 sound/soc/sof/control.c 	struct snd_sof_control *scontrol = se->dobj.private;
se                151 sound/soc/sof/control.c 	struct soc_enum *se =
se                153 sound/soc/sof/control.c 	struct snd_sof_control *scontrol = se->dobj.private;
se               1050 sound/soc/sof/topology.c 	struct soc_enum *se;
se               1080 sound/soc/sof/topology.c 		se = (struct soc_enum *)kc->private_value;
se               1081 sound/soc/sof/topology.c 		dobj = &se->dobj;
se               1766 sound/soc/sof/topology.c 	struct soc_enum *se;
se               1784 sound/soc/sof/topology.c 			se = (struct soc_enum *)kc->private_value;
se               1785 sound/soc/sof/topology.c 			wdata[i].control = se->dobj.private;
se               2190 sound/soc/sof/topology.c 	struct soc_enum *se;
se               2237 sound/soc/sof/topology.c 			se = (struct soc_enum *)kc->private_value;
se               2238 sound/soc/sof/topology.c 			scontrol = se->dobj.private;
se                398 tools/perf/builtin-c2c.c 	struct sort_entry	*se;
se                418 tools/perf/builtin-c2c.c static int symbol_width(struct hists *hists, struct sort_entry *se)
se                420 tools/perf/builtin-c2c.c 	int width = hists__col_len(hists, se->se_width_idx);
se                439 tools/perf/builtin-c2c.c 		return symbol_width(hists, dim->se);
se                441 tools/perf/builtin-c2c.c 	return dim->se ? hists__col_len(hists, dim->se->se_width_idx) :
se                457 tools/perf/builtin-c2c.c 	if (dim->se) {
se                461 tools/perf/builtin-c2c.c 			text = dim->se->se_header;
se               1547 tools/perf/builtin-c2c.c 	.se		= &sort_thread,
se               1552 tools/perf/builtin-c2c.c 	.se		= &sort_sym,
se               1558 tools/perf/builtin-c2c.c 	.se		= &sort_dso,
se               1608 tools/perf/builtin-c2c.c 	.se		= &sort_srcline,
se               1721 tools/perf/builtin-c2c.c 		len = hists__col_len(he->hists, dim->se->se_width_idx);
se               1724 tools/perf/builtin-c2c.c 			len = symbol_width(he->hists, dim->se);
se               1727 tools/perf/builtin-c2c.c 	return dim->se->se_snprintf(he, hpp->buf, hpp->size, len);
se               1736 tools/perf/builtin-c2c.c 	return dim->se->se_cmp(a, b);
se               1746 tools/perf/builtin-c2c.c 	collapse_fn = dim->se->se_collapse ?: dim->se->se_cmp;
se               1769 tools/perf/builtin-c2c.c 	fmt->cmp	= dim->se ? c2c_se_cmp   : dim->cmp;
se               1770 tools/perf/builtin-c2c.c 	fmt->sort	= dim->se ? c2c_se_cmp   : dim->cmp;
se               1771 tools/perf/builtin-c2c.c 	fmt->color	= dim->se ? NULL	 : dim->color;
se               1772 tools/perf/builtin-c2c.c 	fmt->entry	= dim->se ? c2c_se_entry : dim->entry;
se               1775 tools/perf/builtin-c2c.c 	fmt->collapse	= dim->se ? c2c_se_collapse : dim->cmp;
se               1741 tools/perf/util/sort.c 	struct sort_entry *se;
se               1752 tools/perf/util/sort.c 	hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
se               1765 tools/perf/util/sort.c 		len = hists__col_len(hists, hse->se->se_width_idx);
se               1780 tools/perf/util/sort.c 		len = hists__col_len(hists, hse->se->se_width_idx);
se               1794 tools/perf/util/sort.c 		len = hists__col_len(he->hists, hse->se->se_width_idx);
se               1796 tools/perf/util/sort.c 	return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
se               1805 tools/perf/util/sort.c 	return hse->se->se_cmp(a, b);
se               1815 tools/perf/util/sort.c 	collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
se               1826 tools/perf/util/sort.c 	sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
se               1844 tools/perf/util/sort.c 	return hse->se == &sort_ ## key ;			\
se               1867 tools/perf/util/sort.c 	return hse_a->se == hse_b->se;
se               1889 tools/perf/util/sort.c 	hse->se = sd->entry;
se               1945 tools/perf/util/sort.c 		if (hse->se->se_filter == NULL)
se               1952 tools/perf/util/sort.c 		r = hse->se->se_filter(he, type, arg);
se               2866 tools/perf/util/sort.c 		if (hse->se->se_width_idx == idx) {
se               2926 tools/perf/util/sort.c 		fmt->elide = get_elide(hse->se->se_width_idx, output);
se                123 tools/testing/selftests/timers/alarmtimer-suspend.c 	struct sigevent se;
se                134 tools/testing/selftests/timers/alarmtimer-suspend.c 	memset(&se, 0, sizeof(se));
se                135 tools/testing/selftests/timers/alarmtimer-suspend.c 	se.sigev_notify = SIGEV_SIGNAL;
se                136 tools/testing/selftests/timers/alarmtimer-suspend.c 	se.sigev_signo = signum;
se                137 tools/testing/selftests/timers/alarmtimer-suspend.c 	se.sigev_value.sival_int = 0;
se                144 tools/testing/selftests/timers/alarmtimer-suspend.c 		if (timer_create(alarm_clock_id, &se, &tm1) == -1) {
se                179 tools/testing/selftests/timers/leap-a-day.c 	struct sigevent se;
se                292 tools/testing/selftests/timers/leap-a-day.c 		memset(&se, 0, sizeof(se));
se                293 tools/testing/selftests/timers/leap-a-day.c 		se.sigev_notify = SIGEV_SIGNAL;
se                294 tools/testing/selftests/timers/leap-a-day.c 		se.sigev_signo = signum;
se                295 tools/testing/selftests/timers/leap-a-day.c 		se.sigev_value.sival_int = 0;
se                296 tools/testing/selftests/timers/leap-a-day.c 		if (timer_create(CLOCK_REALTIME, &se, &tm1) == -1) {
se                125 tools/testing/selftests/timers/set-timer-lat.c 	struct sigevent se;
se                130 tools/testing/selftests/timers/set-timer-lat.c 	memset(&se, 0, sizeof(se));
se                131 tools/testing/selftests/timers/set-timer-lat.c 	se.sigev_notify = SIGEV_SIGNAL;
se                132 tools/testing/selftests/timers/set-timer-lat.c 	se.sigev_signo = SIGRTMAX;
se                133 tools/testing/selftests/timers/set-timer-lat.c 	se.sigev_value.sival_int = 0;
se                139 tools/testing/selftests/timers/set-timer-lat.c 	err = timer_create(clock_id, &se, tm1);