swp               350 arch/arm/include/asm/pgtable.h #define __swp_entry_to_pte(swp)	((pte_t) { (swp).val })
swp               867 arch/arm/mm/alignment.c 			goto swp;
swp               949 arch/arm/mm/alignment.c  swp:
swp               831 arch/arm64/include/asm/pgtable.h #define __swp_entry_to_pte(swp)	((pte_t) { (swp).val })
swp               197 arch/mips/include/asm/asm.h #define LONG_SP		swp
swp               389 arch/nds32/include/asm/pgtable.h #define __swp_entry_to_pte(swp)	     ((pte_t) { (swp).val })
swp               283 arch/nios2/include/asm/pgtable.h #define __swp_type(swp)		(((swp).val >> 26) & 0x3)
swp               284 arch/nios2/include/asm/pgtable.h #define __swp_offset(swp)	((swp).val & 0xfffff)
swp               287 arch/nios2/include/asm/pgtable.h #define __swp_entry_to_pte(swp)	((pte_t) { (swp).val })
swp               272 arch/unicore32/include/asm/pgtable.h #define __swp_entry_to_pte(swp)	((pte_t) { (swp).val })
swp               219 arch/unicore32/mm/alignment.c 		goto swp;
swp               235 arch/unicore32/mm/alignment.c swp:
swp              3455 drivers/gpu/drm/i915/gt/intel_lrc.c 	int swp;
swp              3470 drivers/gpu/drm/i915/gt/intel_lrc.c 	swp = prandom_u32_max(ve->num_siblings);
swp              3471 drivers/gpu/drm/i915/gt/intel_lrc.c 	if (!swp)
swp              3474 drivers/gpu/drm/i915/gt/intel_lrc.c 	swap(ve->siblings[swp], ve->siblings[0]);
swp                57 drivers/gpu/drm/i915/selftests/i915_random.c 		size_t swp;
swp                59 drivers/gpu/drm/i915/selftests/i915_random.c 		swp = i915_prandom_u32_max_state(count + 1, state);
swp                60 drivers/gpu/drm/i915/selftests/i915_random.c 		if (swp == count)
swp                64 drivers/gpu/drm/i915/selftests/i915_random.c 		memcpy(arr + count * elsz, arr + swp * elsz, elsz);
swp                65 drivers/gpu/drm/i915/selftests/i915_random.c 		memcpy(arr + swp * elsz, stack, elsz);
swp              1138 drivers/infiniband/hw/mlx5/main.c 		if (MLX5_CAP_ETH(mdev, swp)) {
swp              1261 drivers/infiniband/hw/mlx5/qp.c 	    MLX5_CAP_ETH(dev->mdev, swp))
swp               311 drivers/net/dsa/microchip/ksz_common.h #define KSZ_SPI_OP_FLAG_MASK(opcode, swp, regbits, regpad)		\
swp               312 drivers/net/dsa/microchip/ksz_common.h 	swab##swp((opcode) << ((regbits) + (regpad)))
swp               314 drivers/net/dsa/microchip/ksz_common.h #define KSZ_REGMAP_ENTRY(width, swp, regbits, regpad, regalign)		\
swp               324 drivers/net/dsa/microchip/ksz_common.h 			KSZ_SPI_OP_FLAG_MASK(KSZ_SPI_OP_RD, swp,	\
swp               327 drivers/net/dsa/microchip/ksz_common.h 			KSZ_SPI_OP_FLAG_MASK(KSZ_SPI_OP_WR, swp,	\
swp               335 drivers/net/dsa/microchip/ksz_common.h #define KSZ_REGMAP_TABLE(ksz, swp, regbits, regpad, regalign)		\
swp               337 drivers/net/dsa/microchip/ksz_common.h 		KSZ_REGMAP_ENTRY(8, swp, (regbits), (regpad), (regalign)), \
swp               338 drivers/net/dsa/microchip/ksz_common.h 		KSZ_REGMAP_ENTRY(16, swp, (regbits), (regpad), (regalign)), \
swp               339 drivers/net/dsa/microchip/ksz_common.h 		KSZ_REGMAP_ENTRY(32, swp, (regbits), (regpad), (regalign)), \
swp              1075 drivers/net/ethernet/mellanox/mlx5/core/en.h 	return MLX5_CAP_ETH(mdev, swp) &&
swp               522 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c 	    !MLX5_CAP_ETH(mdev, swp)) {
swp                23 drivers/soc/fsl/dpio/dpio-service.c 	struct qbman_swp *swp;
swp                39 drivers/soc/fsl/dpio/dpio-service.c 	struct qbman_swp *swp; /* portal used to issue VDQCR */
swp               127 drivers/soc/fsl/dpio/dpio-service.c 	obj->swp = qbman_swp_init(&obj->swp_desc);
swp               129 drivers/soc/fsl/dpio/dpio-service.c 	if (!obj->swp) {
swp               140 drivers/soc/fsl/dpio/dpio-service.c 	qbman_swp_interrupt_set_trigger(obj->swp,
swp               142 drivers/soc/fsl/dpio/dpio-service.c 	qbman_swp_interrupt_clear_status(obj->swp, 0xffffffff);
swp               144 drivers/soc/fsl/dpio/dpio-service.c 		qbman_swp_push_set(obj->swp, 0, 1);
swp               190 drivers/soc/fsl/dpio/dpio-service.c 	struct qbman_swp *swp;
swp               193 drivers/soc/fsl/dpio/dpio-service.c 	swp = obj->swp;
swp               194 drivers/soc/fsl/dpio/dpio-service.c 	status = qbman_swp_interrupt_read_status(swp);
swp               198 drivers/soc/fsl/dpio/dpio-service.c 	dq = qbman_swp_dqrr_next(swp);
swp               210 drivers/soc/fsl/dpio/dpio-service.c 		qbman_swp_dqrr_consume(swp, dq);
swp               214 drivers/soc/fsl/dpio/dpio-service.c 		dq = qbman_swp_dqrr_next(swp);
swp               217 drivers/soc/fsl/dpio/dpio-service.c 	qbman_swp_interrupt_clear_status(swp, status);
swp               218 drivers/soc/fsl/dpio/dpio-service.c 	qbman_swp_interrupt_set_inhibit(swp, 0);
swp               278 drivers/soc/fsl/dpio/dpio-service.c 		return qbman_swp_CDAN_set_context_enable(d->swp,
swp               302 drivers/soc/fsl/dpio/dpio-service.c 		qbman_swp_CDAN_disable(d->swp, (u16)ctx->id);
swp               336 drivers/soc/fsl/dpio/dpio-service.c 		err = qbman_swp_CDAN_enable(d->swp, (u16)ctx->id);
swp               338 drivers/soc/fsl/dpio/dpio-service.c 		err = qbman_swp_fq_schedule(d->swp, ctx->id);
swp               367 drivers/soc/fsl/dpio/dpio-service.c 	s->swp = d->swp;
swp               368 drivers/soc/fsl/dpio/dpio-service.c 	err = qbman_swp_pull(d->swp, &pd);
swp               370 drivers/soc/fsl/dpio/dpio-service.c 		s->swp = NULL;
swp               399 drivers/soc/fsl/dpio/dpio-service.c 	s->swp = d->swp;
swp               400 drivers/soc/fsl/dpio/dpio-service.c 	err = qbman_swp_pull(d->swp, &pd);
swp               402 drivers/soc/fsl/dpio/dpio-service.c 		s->swp = NULL;
swp               431 drivers/soc/fsl/dpio/dpio-service.c 	return qbman_swp_enqueue(d->swp, &ed, fd);
swp               460 drivers/soc/fsl/dpio/dpio-service.c 	return qbman_swp_enqueue(d->swp, &ed, fd);
swp               487 drivers/soc/fsl/dpio/dpio-service.c 	return qbman_swp_release(d->swp, &rd, buffers, num_buffers);
swp               515 drivers/soc/fsl/dpio/dpio-service.c 	err = qbman_swp_acquire(d->swp, bpid, buffers, num_buffers);
swp               612 drivers/soc/fsl/dpio/dpio-service.c 	match = qbman_result_has_new_result(s->swp, ret);
swp               655 drivers/soc/fsl/dpio/dpio-service.c 	struct qbman_swp *swp;
swp               663 drivers/soc/fsl/dpio/dpio-service.c 	swp = d->swp;
swp               665 drivers/soc/fsl/dpio/dpio-service.c 	ret = qbman_fq_query_state(swp, fqid, &state);
swp               688 drivers/soc/fsl/dpio/dpio-service.c 	struct qbman_swp *swp;
swp               696 drivers/soc/fsl/dpio/dpio-service.c 	swp = d->swp;
swp               698 drivers/soc/fsl/dpio/dpio-service.c 	ret = qbman_bp_query(swp, bpid, &state);
swp               433 drivers/soc/fsl/dpio/qbman-portal.h static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
swp               438 drivers/soc/fsl/dpio/qbman-portal.h 	qbman_swp_mc_submit(swp, cmd, cmd_verb);
swp               441 drivers/soc/fsl/dpio/qbman-portal.h 		cmd = qbman_swp_mc_result(swp);
swp               498 fs/nilfs2/the_nilfs.c 	int valid[2], swp = 0;
swp               524 fs/nilfs2/the_nilfs.c 	swp = valid[1] && (!valid[0] ||
swp               528 fs/nilfs2/the_nilfs.c 	if (valid[swp] && nilfs_sb2_bad_offset(sbp[swp], sb2off)) {
swp               533 fs/nilfs2/the_nilfs.c 		swp = 0;
swp               535 fs/nilfs2/the_nilfs.c 	if (!valid[swp]) {
swp               541 fs/nilfs2/the_nilfs.c 	if (!valid[!swp])
swp               545 fs/nilfs2/the_nilfs.c 	if (swp)
swp               550 fs/nilfs2/the_nilfs.c 	nilfs->ns_prot_seq = le64_to_cpu(sbp[valid[1] & !swp]->s_last_seq);
swp               856 include/linux/mlx5/mlx5_ifc.h 	u8         swp[0x1];
swp               516 include/linux/swap.h static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
swp               521 include/linux/swap.h static inline void swap_shmem_alloc(swp_entry_t swp)
swp               525 include/linux/swap.h static inline int swap_duplicate(swp_entry_t swp)
swp               530 include/linux/swap.h static inline void swap_free(swp_entry_t swp)
swp               534 include/linux/swap.h static inline void put_swap_page(struct page *page, swp_entry_t swp)
swp               544 include/linux/swap.h static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
swp               555 include/linux/swap.h static inline struct page *lookup_swap_cache(swp_entry_t swp,
swp               213 include/linux/swapops.h static inline int is_migration_entry(swp_entry_t swp)
swp               341 include/linux/swapops.h static inline int is_hwpoison_entry(swp_entry_t swp)
swp               861 ipc/shm.c      		unsigned long *swp)
swp               867 ipc/shm.c      	*swp = 0;
swp               880 ipc/shm.c      		shm_add_rss_swap(shp, rss, swp);
swp              1745 ipc/shm.c      	unsigned long rss = 0, swp = 0;
swp              1748 ipc/shm.c      	shm_add_rss_swap(shp, &rss, &swp);
swp              1775 ipc/shm.c      		   swp * PAGE_SIZE);
swp              3395 mm/hugetlb.c   	swp_entry_t swp;
swp              3399 mm/hugetlb.c   	swp = pte_to_swp_entry(pte);
swp              3400 mm/hugetlb.c   	if (non_swap_entry(swp) && is_migration_entry(swp))
swp              3408 mm/hugetlb.c   	swp_entry_t swp;
swp              3412 mm/hugetlb.c   	swp = pte_to_swp_entry(pte);
swp              3413 mm/hugetlb.c   	if (non_swap_entry(swp) && is_hwpoison_entry(swp))
swp              5457 mm/memcontrol.c 			swp_entry_t swp = radix_to_swp_entry(page);
swp              5459 mm/memcontrol.c 				*entry = swp;
swp              5460 mm/memcontrol.c 			page = find_get_page(swap_address_space(swp),
swp              5461 mm/memcontrol.c 					     swp_offset(swp));
swp                70 mm/mincore.c   			swp_entry_t swp = radix_to_swp_entry(page);
swp                74 mm/mincore.c   			si = get_swap_device(swp);
swp                76 mm/mincore.c   				page = find_get_page(swap_address_space(swp),
swp                77 mm/mincore.c   						     swp_offset(swp));