sctx               32 arch/arm/crypto/sha1-ce-glue.c 	struct sha1_state *sctx = shash_desc_ctx(desc);
sctx               35 arch/arm/crypto/sha1-ce-glue.c 	    (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
sctx               36 arch/arm/crypto/sha1_neon_glue.c 	struct sha1_state *sctx = shash_desc_ctx(desc);
sctx               39 arch/arm/crypto/sha1_neon_glue.c 	    (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
sctx               33 arch/arm/crypto/sha2-ce-glue.c 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx               36 arch/arm/crypto/sha2-ce-glue.c 	    (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
sctx               31 arch/arm/crypto/sha256_neon_glue.c 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx               34 arch/arm/crypto/sha256_neon_glue.c 	    (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
sctx               29 arch/arm/crypto/sha512-neon-glue.c 	struct sha512_state *sctx = shash_desc_ctx(desc);
sctx               32 arch/arm/crypto/sha512-neon-glue.c 	    (sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE)
sctx               37 arch/arm64/crypto/sha1-ce-glue.c 	struct sha1_ce_state *sctx = shash_desc_ctx(desc);
sctx               42 arch/arm64/crypto/sha1-ce-glue.c 	sctx->finalize = 0;
sctx               54 arch/arm64/crypto/sha1-ce-glue.c 	struct sha1_ce_state *sctx = shash_desc_ctx(desc);
sctx               55 arch/arm64/crypto/sha1-ce-glue.c 	bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE) && len;
sctx               64 arch/arm64/crypto/sha1-ce-glue.c 	sctx->finalize = finalize;
sctx               77 arch/arm64/crypto/sha1-ce-glue.c 	struct sha1_ce_state *sctx = shash_desc_ctx(desc);
sctx               82 arch/arm64/crypto/sha1-ce-glue.c 	sctx->finalize = 0;
sctx               41 arch/arm64/crypto/sha2-ce-glue.c 	struct sha256_ce_state *sctx = shash_desc_ctx(desc);
sctx               47 arch/arm64/crypto/sha2-ce-glue.c 	sctx->finalize = 0;
sctx               59 arch/arm64/crypto/sha2-ce-glue.c 	struct sha256_ce_state *sctx = shash_desc_ctx(desc);
sctx               60 arch/arm64/crypto/sha2-ce-glue.c 	bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE) && len;
sctx               75 arch/arm64/crypto/sha2-ce-glue.c 	sctx->finalize = finalize;
sctx               89 arch/arm64/crypto/sha2-ce-glue.c 	struct sha256_ce_state *sctx = shash_desc_ctx(desc);
sctx               97 arch/arm64/crypto/sha2-ce-glue.c 	sctx->finalize = 0;
sctx               86 arch/arm64/crypto/sha256-glue.c 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx              101 arch/arm64/crypto/sha256-glue.c 		    chunk + sctx->count % SHA256_BLOCK_SIZE > SHA256_BLOCK_SIZE)
sctx              103 arch/arm64/crypto/sha256-glue.c 				sctx->count % SHA256_BLOCK_SIZE;
sctx               33 arch/arm64/crypto/sha3-ce-glue.c 	struct sha3_state *sctx = shash_desc_ctx(desc);
sctx               39 arch/arm64/crypto/sha3-ce-glue.c 	if ((sctx->partial + len) >= sctx->rsiz) {
sctx               42 arch/arm64/crypto/sha3-ce-glue.c 		if (sctx->partial) {
sctx               43 arch/arm64/crypto/sha3-ce-glue.c 			int p = sctx->rsiz - sctx->partial;
sctx               45 arch/arm64/crypto/sha3-ce-glue.c 			memcpy(sctx->buf + sctx->partial, data, p);
sctx               47 arch/arm64/crypto/sha3-ce-glue.c 			sha3_ce_transform(sctx->st, sctx->buf, 1, digest_size);
sctx               52 arch/arm64/crypto/sha3-ce-glue.c 			sctx->partial = 0;
sctx               55 arch/arm64/crypto/sha3-ce-glue.c 		blocks = len / sctx->rsiz;
sctx               56 arch/arm64/crypto/sha3-ce-glue.c 		len %= sctx->rsiz;
sctx               60 arch/arm64/crypto/sha3-ce-glue.c 			sha3_ce_transform(sctx->st, data, blocks, digest_size);
sctx               62 arch/arm64/crypto/sha3-ce-glue.c 			data += blocks * sctx->rsiz;
sctx               67 arch/arm64/crypto/sha3-ce-glue.c 		memcpy(sctx->buf + sctx->partial, data, len);
sctx               68 arch/arm64/crypto/sha3-ce-glue.c 		sctx->partial += len;
sctx               75 arch/arm64/crypto/sha3-ce-glue.c 	struct sha3_state *sctx = shash_desc_ctx(desc);
sctx               83 arch/arm64/crypto/sha3-ce-glue.c 	sctx->buf[sctx->partial++] = 0x06;
sctx               84 arch/arm64/crypto/sha3-ce-glue.c 	memset(sctx->buf + sctx->partial, 0, sctx->rsiz - sctx->partial);
sctx               85 arch/arm64/crypto/sha3-ce-glue.c 	sctx->buf[sctx->rsiz - 1] |= 0x80;
sctx               88 arch/arm64/crypto/sha3-ce-glue.c 	sha3_ce_transform(sctx->st, sctx->buf, 1, digest_size);
sctx               92 arch/arm64/crypto/sha3-ce-glue.c 		put_unaligned_le64(sctx->st[i], digest++);
sctx               95 arch/arm64/crypto/sha3-ce-glue.c 		put_unaligned_le32(sctx->st[i], (__le32 *)digest);
sctx               97 arch/arm64/crypto/sha3-ce-glue.c 	*sctx = (struct sha3_state){};
sctx               31 arch/mips/cavium-octeon/crypto/octeon-sha1.c static void octeon_sha1_store_hash(struct sha1_state *sctx)
sctx               33 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	u64 *hash = (u64 *)sctx->state;
sctx               37 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	} hash_tail = { { sctx->state[4], } };
sctx               45 arch/mips/cavium-octeon/crypto/octeon-sha1.c static void octeon_sha1_read_hash(struct sha1_state *sctx)
sctx               47 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	u64 *hash = (u64 *)sctx->state;
sctx               56 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	sctx->state[4]	= hash_tail.word[0];
sctx               76 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	struct sha1_state *sctx = shash_desc_ctx(desc);
sctx               78 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	sctx->state[0] = SHA1_H0;
sctx               79 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	sctx->state[1] = SHA1_H1;
sctx               80 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	sctx->state[2] = SHA1_H2;
sctx               81 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	sctx->state[3] = SHA1_H3;
sctx               82 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	sctx->state[4] = SHA1_H4;
sctx               83 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	sctx->count = 0;
sctx               88 arch/mips/cavium-octeon/crypto/octeon-sha1.c static void __octeon_sha1_update(struct sha1_state *sctx, const u8 *data,
sctx               95 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	partial = sctx->count % SHA1_BLOCK_SIZE;
sctx               96 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	sctx->count += len;
sctx              103 arch/mips/cavium-octeon/crypto/octeon-sha1.c 			memcpy(sctx->buffer + partial, data,
sctx              105 arch/mips/cavium-octeon/crypto/octeon-sha1.c 			src = sctx->buffer;
sctx              116 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	memcpy(sctx->buffer + partial, src, len - done);
sctx              122 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	struct sha1_state *sctx = shash_desc_ctx(desc);
sctx              131 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	if ((sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
sctx              135 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	octeon_sha1_store_hash(sctx);
sctx              137 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	__octeon_sha1_update(sctx, data, len);
sctx              139 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	octeon_sha1_read_hash(sctx);
sctx              147 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	struct sha1_state *sctx = shash_desc_ctx(desc);
sctx              158 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	bits = cpu_to_be64(sctx->count << 3);
sctx              161 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	index = sctx->count & 0x3f;
sctx              165 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	octeon_sha1_store_hash(sctx);
sctx              167 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	__octeon_sha1_update(sctx, padding, pad_len);
sctx              170 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	__octeon_sha1_update(sctx, (const u8 *)&bits, sizeof(bits));
sctx              172 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	octeon_sha1_read_hash(sctx);
sctx              177 arch/mips/cavium-octeon/crypto/octeon-sha1.c 		dst[i] = cpu_to_be32(sctx->state[i]);
sctx              180 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	memset(sctx, 0, sizeof(*sctx));
sctx              187 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	struct sha1_state *sctx = shash_desc_ctx(desc);
sctx              189 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	memcpy(out, sctx, sizeof(*sctx));
sctx              195 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	struct sha1_state *sctx = shash_desc_ctx(desc);
sctx              197 arch/mips/cavium-octeon/crypto/octeon-sha1.c 	memcpy(sctx, in, sizeof(*sctx));
sctx               32 arch/mips/cavium-octeon/crypto/octeon-sha256.c static void octeon_sha256_store_hash(struct sha256_state *sctx)
sctx               34 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	u64 *hash = (u64 *)sctx->state;
sctx               42 arch/mips/cavium-octeon/crypto/octeon-sha256.c static void octeon_sha256_read_hash(struct sha256_state *sctx)
sctx               44 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	u64 *hash = (u64 *)sctx->state;
sctx               68 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx               70 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	sctx->state[0] = SHA224_H0;
sctx               71 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	sctx->state[1] = SHA224_H1;
sctx               72 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	sctx->state[2] = SHA224_H2;
sctx               73 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	sctx->state[3] = SHA224_H3;
sctx               74 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	sctx->state[4] = SHA224_H4;
sctx               75 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	sctx->state[5] = SHA224_H5;
sctx               76 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	sctx->state[6] = SHA224_H6;
sctx               77 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	sctx->state[7] = SHA224_H7;
sctx               78 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	sctx->count = 0;
sctx               85 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx               87 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	sctx->state[0] = SHA256_H0;
sctx               88 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	sctx->state[1] = SHA256_H1;
sctx               89 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	sctx->state[2] = SHA256_H2;
sctx               90 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	sctx->state[3] = SHA256_H3;
sctx               91 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	sctx->state[4] = SHA256_H4;
sctx               92 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	sctx->state[5] = SHA256_H5;
sctx               93 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	sctx->state[6] = SHA256_H6;
sctx               94 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	sctx->state[7] = SHA256_H7;
sctx               95 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	sctx->count = 0;
sctx              100 arch/mips/cavium-octeon/crypto/octeon-sha256.c static void __octeon_sha256_update(struct sha256_state *sctx, const u8 *data,
sctx              107 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	partial = sctx->count % SHA256_BLOCK_SIZE;
sctx              108 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	sctx->count += len;
sctx              115 arch/mips/cavium-octeon/crypto/octeon-sha256.c 			memcpy(sctx->buf + partial, data,
sctx              117 arch/mips/cavium-octeon/crypto/octeon-sha256.c 			src = sctx->buf;
sctx              128 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	memcpy(sctx->buf + partial, src, len - done);
sctx              134 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx              143 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	if ((sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
sctx              147 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	octeon_sha256_store_hash(sctx);
sctx              149 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	__octeon_sha256_update(sctx, data, len);
sctx              151 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	octeon_sha256_read_hash(sctx);
sctx              159 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx              170 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	bits = cpu_to_be64(sctx->count << 3);
sctx              173 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	index = sctx->count & 0x3f;
sctx              177 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	octeon_sha256_store_hash(sctx);
sctx              179 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	__octeon_sha256_update(sctx, padding, pad_len);
sctx              182 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	__octeon_sha256_update(sctx, (const u8 *)&bits, sizeof(bits));
sctx              184 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	octeon_sha256_read_hash(sctx);
sctx              189 arch/mips/cavium-octeon/crypto/octeon-sha256.c 		dst[i] = cpu_to_be32(sctx->state[i]);
sctx              192 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	memset(sctx, 0, sizeof(*sctx));
sctx              211 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx              213 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	memcpy(out, sctx, sizeof(*sctx));
sctx              219 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx              221 arch/mips/cavium-octeon/crypto/octeon-sha256.c 	memcpy(sctx, in, sizeof(*sctx));
sctx               31 arch/mips/cavium-octeon/crypto/octeon-sha512.c static void octeon_sha512_store_hash(struct sha512_state *sctx)
sctx               33 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	write_octeon_64bit_hash_sha512(sctx->state[0], 0);
sctx               34 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	write_octeon_64bit_hash_sha512(sctx->state[1], 1);
sctx               35 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	write_octeon_64bit_hash_sha512(sctx->state[2], 2);
sctx               36 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	write_octeon_64bit_hash_sha512(sctx->state[3], 3);
sctx               37 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	write_octeon_64bit_hash_sha512(sctx->state[4], 4);
sctx               38 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	write_octeon_64bit_hash_sha512(sctx->state[5], 5);
sctx               39 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	write_octeon_64bit_hash_sha512(sctx->state[6], 6);
sctx               40 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	write_octeon_64bit_hash_sha512(sctx->state[7], 7);
sctx               43 arch/mips/cavium-octeon/crypto/octeon-sha512.c static void octeon_sha512_read_hash(struct sha512_state *sctx)
sctx               45 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	sctx->state[0] = read_octeon_64bit_hash_sha512(0);
sctx               46 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	sctx->state[1] = read_octeon_64bit_hash_sha512(1);
sctx               47 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	sctx->state[2] = read_octeon_64bit_hash_sha512(2);
sctx               48 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	sctx->state[3] = read_octeon_64bit_hash_sha512(3);
sctx               49 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	sctx->state[4] = read_octeon_64bit_hash_sha512(4);
sctx               50 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	sctx->state[5] = read_octeon_64bit_hash_sha512(5);
sctx               51 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	sctx->state[6] = read_octeon_64bit_hash_sha512(6);
sctx               52 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	sctx->state[7] = read_octeon_64bit_hash_sha512(7);
sctx               79 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	struct sha512_state *sctx = shash_desc_ctx(desc);
sctx               81 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	sctx->state[0] = SHA512_H0;
sctx               82 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	sctx->state[1] = SHA512_H1;
sctx               83 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	sctx->state[2] = SHA512_H2;
sctx               84 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	sctx->state[3] = SHA512_H3;
sctx               85 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	sctx->state[4] = SHA512_H4;
sctx               86 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	sctx->state[5] = SHA512_H5;
sctx               87 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	sctx->state[6] = SHA512_H6;
sctx               88 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	sctx->state[7] = SHA512_H7;
sctx               89 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	sctx->count[0] = sctx->count[1] = 0;
sctx               96 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	struct sha512_state *sctx = shash_desc_ctx(desc);
sctx               98 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	sctx->state[0] = SHA384_H0;
sctx               99 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	sctx->state[1] = SHA384_H1;
sctx              100 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	sctx->state[2] = SHA384_H2;
sctx              101 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	sctx->state[3] = SHA384_H3;
sctx              102 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	sctx->state[4] = SHA384_H4;
sctx              103 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	sctx->state[5] = SHA384_H5;
sctx              104 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	sctx->state[6] = SHA384_H6;
sctx              105 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	sctx->state[7] = SHA384_H7;
sctx              106 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	sctx->count[0] = sctx->count[1] = 0;
sctx              111 arch/mips/cavium-octeon/crypto/octeon-sha512.c static void __octeon_sha512_update(struct sha512_state *sctx, const u8 *data,
sctx              119 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	index = sctx->count[0] % SHA512_BLOCK_SIZE;
sctx              122 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	if ((sctx->count[0] += len) < len)
sctx              123 arch/mips/cavium-octeon/crypto/octeon-sha512.c 		sctx->count[1]++;
sctx              129 arch/mips/cavium-octeon/crypto/octeon-sha512.c 		memcpy(&sctx->buf[index], data, part_len);
sctx              130 arch/mips/cavium-octeon/crypto/octeon-sha512.c 		octeon_sha512_transform(sctx->buf);
sctx              142 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	memcpy(&sctx->buf[index], &data[i], len - i);
sctx              148 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	struct sha512_state *sctx = shash_desc_ctx(desc);
sctx              157 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	if ((sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE)
sctx              161 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	octeon_sha512_store_hash(sctx);
sctx              163 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	__octeon_sha512_update(sctx, data, len);
sctx              165 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	octeon_sha512_read_hash(sctx);
sctx              173 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	struct sha512_state *sctx = shash_desc_ctx(desc);
sctx              184 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	bits[1] = cpu_to_be64(sctx->count[0] << 3);
sctx              185 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
sctx              188 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	index = sctx->count[0] & 0x7f;
sctx              192 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	octeon_sha512_store_hash(sctx);
sctx              194 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	__octeon_sha512_update(sctx, padding, pad_len);
sctx              197 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	__octeon_sha512_update(sctx, (const u8 *)bits, sizeof(bits));
sctx              199 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	octeon_sha512_read_hash(sctx);
sctx              204 arch/mips/cavium-octeon/crypto/octeon-sha512.c 		dst[i] = cpu_to_be64(sctx->state[i]);
sctx              207 arch/mips/cavium-octeon/crypto/octeon-sha512.c 	memset(sctx, 0, sizeof(struct sha512_state));
sctx               21 arch/powerpc/crypto/md5-glue.c static inline void ppc_md5_clear_context(struct md5_state *sctx)
sctx               24 arch/powerpc/crypto/md5-glue.c 	u32 *ptr = (u32 *)sctx;
sctx               33 arch/powerpc/crypto/md5-glue.c 	struct md5_state *sctx = shash_desc_ctx(desc);
sctx               35 arch/powerpc/crypto/md5-glue.c 	sctx->hash[0] = MD5_H0;
sctx               36 arch/powerpc/crypto/md5-glue.c 	sctx->hash[1] = MD5_H1;
sctx               37 arch/powerpc/crypto/md5-glue.c 	sctx->hash[2] = MD5_H2;
sctx               38 arch/powerpc/crypto/md5-glue.c 	sctx->hash[3] =	MD5_H3;
sctx               39 arch/powerpc/crypto/md5-glue.c 	sctx->byte_count = 0;
sctx               47 arch/powerpc/crypto/md5-glue.c 	struct md5_state *sctx = shash_desc_ctx(desc);
sctx               48 arch/powerpc/crypto/md5-glue.c 	const unsigned int offset = sctx->byte_count & 0x3f;
sctx               52 arch/powerpc/crypto/md5-glue.c 	sctx->byte_count += len;
sctx               55 arch/powerpc/crypto/md5-glue.c 		memcpy((char *)sctx->block + offset, src, len);
sctx               60 arch/powerpc/crypto/md5-glue.c 		memcpy((char *)sctx->block + offset, src, avail);
sctx               61 arch/powerpc/crypto/md5-glue.c 		ppc_md5_transform(sctx->hash, (const u8 *)sctx->block, 1);
sctx               67 arch/powerpc/crypto/md5-glue.c 		ppc_md5_transform(sctx->hash, src, len >> 6);
sctx               72 arch/powerpc/crypto/md5-glue.c 	memcpy((char *)sctx->block, src, len);
sctx               78 arch/powerpc/crypto/md5-glue.c 	struct md5_state *sctx = shash_desc_ctx(desc);
sctx               79 arch/powerpc/crypto/md5-glue.c 	const unsigned int offset = sctx->byte_count & 0x3f;
sctx               80 arch/powerpc/crypto/md5-glue.c 	const u8 *src = (const u8 *)sctx->block;
sctx               83 arch/powerpc/crypto/md5-glue.c 	__le64 *pbits = (__le64 *)((char *)sctx->block + 56);
sctx               90 arch/powerpc/crypto/md5-glue.c 		ppc_md5_transform(sctx->hash, src, 1);
sctx               91 arch/powerpc/crypto/md5-glue.c 		p = (char *)sctx->block;
sctx               96 arch/powerpc/crypto/md5-glue.c 	*pbits = cpu_to_le64(sctx->byte_count << 3);
sctx               97 arch/powerpc/crypto/md5-glue.c 	ppc_md5_transform(sctx->hash, src, 1);
sctx               99 arch/powerpc/crypto/md5-glue.c 	dst[0] = cpu_to_le32(sctx->hash[0]);
sctx              100 arch/powerpc/crypto/md5-glue.c 	dst[1] = cpu_to_le32(sctx->hash[1]);
sctx              101 arch/powerpc/crypto/md5-glue.c 	dst[2] = cpu_to_le32(sctx->hash[2]);
sctx              102 arch/powerpc/crypto/md5-glue.c 	dst[3] = cpu_to_le32(sctx->hash[3]);
sctx              104 arch/powerpc/crypto/md5-glue.c 	ppc_md5_clear_context(sctx);
sctx              110 arch/powerpc/crypto/md5-glue.c 	struct md5_state *sctx = shash_desc_ctx(desc);
sctx              112 arch/powerpc/crypto/md5-glue.c 	memcpy(out, sctx, sizeof(*sctx));
sctx              118 arch/powerpc/crypto/md5-glue.c 	struct md5_state *sctx = shash_desc_ctx(desc);
sctx              120 arch/powerpc/crypto/md5-glue.c 	memcpy(sctx, in, sizeof(*sctx));
sctx               49 arch/powerpc/crypto/sha1-spe-glue.c static inline void ppc_sha1_clear_context(struct sha1_state *sctx)
sctx               52 arch/powerpc/crypto/sha1-spe-glue.c 	u32 *ptr = (u32 *)sctx;
sctx               61 arch/powerpc/crypto/sha1-spe-glue.c 	struct sha1_state *sctx = shash_desc_ctx(desc);
sctx               63 arch/powerpc/crypto/sha1-spe-glue.c 	sctx->state[0] = SHA1_H0;
sctx               64 arch/powerpc/crypto/sha1-spe-glue.c 	sctx->state[1] = SHA1_H1;
sctx               65 arch/powerpc/crypto/sha1-spe-glue.c 	sctx->state[2] = SHA1_H2;
sctx               66 arch/powerpc/crypto/sha1-spe-glue.c 	sctx->state[3] = SHA1_H3;
sctx               67 arch/powerpc/crypto/sha1-spe-glue.c 	sctx->state[4] = SHA1_H4;
sctx               68 arch/powerpc/crypto/sha1-spe-glue.c 	sctx->count = 0;
sctx               76 arch/powerpc/crypto/sha1-spe-glue.c 	struct sha1_state *sctx = shash_desc_ctx(desc);
sctx               77 arch/powerpc/crypto/sha1-spe-glue.c 	const unsigned int offset = sctx->count & 0x3f;
sctx               83 arch/powerpc/crypto/sha1-spe-glue.c 		sctx->count += len;
sctx               84 arch/powerpc/crypto/sha1-spe-glue.c 		memcpy((char *)sctx->buffer + offset, src, len);
sctx               88 arch/powerpc/crypto/sha1-spe-glue.c 	sctx->count += len;
sctx               91 arch/powerpc/crypto/sha1-spe-glue.c 		memcpy((char *)sctx->buffer + offset, src, avail);
sctx               94 arch/powerpc/crypto/sha1-spe-glue.c 		ppc_spe_sha1_transform(sctx->state, (const u8 *)sctx->buffer, 1);
sctx              106 arch/powerpc/crypto/sha1-spe-glue.c 		ppc_spe_sha1_transform(sctx->state, src, bytes >> 6);
sctx              113 arch/powerpc/crypto/sha1-spe-glue.c 	memcpy((char *)sctx->buffer, src, len);
sctx              119 arch/powerpc/crypto/sha1-spe-glue.c 	struct sha1_state *sctx = shash_desc_ctx(desc);
sctx              120 arch/powerpc/crypto/sha1-spe-glue.c 	const unsigned int offset = sctx->count & 0x3f;
sctx              121 arch/powerpc/crypto/sha1-spe-glue.c 	char *p = (char *)sctx->buffer + offset;
sctx              123 arch/powerpc/crypto/sha1-spe-glue.c 	__be64 *pbits = (__be64 *)(((char *)&sctx->buffer) + 56);
sctx              133 arch/powerpc/crypto/sha1-spe-glue.c 		ppc_spe_sha1_transform(sctx->state, sctx->buffer, 1);
sctx              134 arch/powerpc/crypto/sha1-spe-glue.c 		p = (char *)sctx->buffer;
sctx              139 arch/powerpc/crypto/sha1-spe-glue.c 	*pbits = cpu_to_be64(sctx->count << 3);
sctx              140 arch/powerpc/crypto/sha1-spe-glue.c 	ppc_spe_sha1_transform(sctx->state, sctx->buffer, 1);
sctx              144 arch/powerpc/crypto/sha1-spe-glue.c 	dst[0] = cpu_to_be32(sctx->state[0]);
sctx              145 arch/powerpc/crypto/sha1-spe-glue.c 	dst[1] = cpu_to_be32(sctx->state[1]);
sctx              146 arch/powerpc/crypto/sha1-spe-glue.c 	dst[2] = cpu_to_be32(sctx->state[2]);
sctx              147 arch/powerpc/crypto/sha1-spe-glue.c 	dst[3] = cpu_to_be32(sctx->state[3]);
sctx              148 arch/powerpc/crypto/sha1-spe-glue.c 	dst[4] = cpu_to_be32(sctx->state[4]);
sctx              150 arch/powerpc/crypto/sha1-spe-glue.c 	ppc_sha1_clear_context(sctx);
sctx              156 arch/powerpc/crypto/sha1-spe-glue.c 	struct sha1_state *sctx = shash_desc_ctx(desc);
sctx              158 arch/powerpc/crypto/sha1-spe-glue.c 	memcpy(out, sctx, sizeof(*sctx));
sctx              164 arch/powerpc/crypto/sha1-spe-glue.c 	struct sha1_state *sctx = shash_desc_ctx(desc);
sctx              166 arch/powerpc/crypto/sha1-spe-glue.c 	memcpy(sctx, in, sizeof(*sctx));
sctx               28 arch/powerpc/crypto/sha1.c 	struct sha1_state *sctx = shash_desc_ctx(desc);
sctx               30 arch/powerpc/crypto/sha1.c 	*sctx = (struct sha1_state){
sctx               40 arch/powerpc/crypto/sha1.c 	struct sha1_state *sctx = shash_desc_ctx(desc);
sctx               44 arch/powerpc/crypto/sha1.c 	partial = sctx->count & 0x3f;
sctx               45 arch/powerpc/crypto/sha1.c 	sctx->count += len;
sctx               54 arch/powerpc/crypto/sha1.c 			memcpy(sctx->buffer + partial, data, done + 64);
sctx               55 arch/powerpc/crypto/sha1.c 			src = sctx->buffer;
sctx               59 arch/powerpc/crypto/sha1.c 			powerpc_sha_transform(sctx->state, src, temp);
sctx               67 arch/powerpc/crypto/sha1.c 	memcpy(sctx->buffer + partial, src, len - done);
sctx               76 arch/powerpc/crypto/sha1.c 	struct sha1_state *sctx = shash_desc_ctx(desc);
sctx               82 arch/powerpc/crypto/sha1.c 	bits = cpu_to_be64(sctx->count << 3);
sctx               85 arch/powerpc/crypto/sha1.c 	index = sctx->count & 0x3f;
sctx               94 arch/powerpc/crypto/sha1.c 		dst[i] = cpu_to_be32(sctx->state[i]);
sctx               97 arch/powerpc/crypto/sha1.c 	memset(sctx, 0, sizeof *sctx);
sctx              104 arch/powerpc/crypto/sha1.c 	struct sha1_state *sctx = shash_desc_ctx(desc);
sctx              106 arch/powerpc/crypto/sha1.c 	memcpy(out, sctx, sizeof(*sctx));
sctx              112 arch/powerpc/crypto/sha1.c 	struct sha1_state *sctx = shash_desc_ctx(desc);
sctx              114 arch/powerpc/crypto/sha1.c 	memcpy(sctx, in, sizeof(*sctx));
sctx               50 arch/powerpc/crypto/sha256-spe-glue.c static inline void ppc_sha256_clear_context(struct sha256_state *sctx)
sctx               53 arch/powerpc/crypto/sha256-spe-glue.c 	u32 *ptr = (u32 *)sctx;
sctx               62 arch/powerpc/crypto/sha256-spe-glue.c 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx               64 arch/powerpc/crypto/sha256-spe-glue.c 	sctx->state[0] = SHA256_H0;
sctx               65 arch/powerpc/crypto/sha256-spe-glue.c 	sctx->state[1] = SHA256_H1;
sctx               66 arch/powerpc/crypto/sha256-spe-glue.c 	sctx->state[2] = SHA256_H2;
sctx               67 arch/powerpc/crypto/sha256-spe-glue.c 	sctx->state[3] = SHA256_H3;
sctx               68 arch/powerpc/crypto/sha256-spe-glue.c 	sctx->state[4] = SHA256_H4;
sctx               69 arch/powerpc/crypto/sha256-spe-glue.c 	sctx->state[5] = SHA256_H5;
sctx               70 arch/powerpc/crypto/sha256-spe-glue.c 	sctx->state[6] = SHA256_H6;
sctx               71 arch/powerpc/crypto/sha256-spe-glue.c 	sctx->state[7] = SHA256_H7;
sctx               72 arch/powerpc/crypto/sha256-spe-glue.c 	sctx->count = 0;
sctx               79 arch/powerpc/crypto/sha256-spe-glue.c 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx               81 arch/powerpc/crypto/sha256-spe-glue.c 	sctx->state[0] = SHA224_H0;
sctx               82 arch/powerpc/crypto/sha256-spe-glue.c 	sctx->state[1] = SHA224_H1;
sctx               83 arch/powerpc/crypto/sha256-spe-glue.c 	sctx->state[2] = SHA224_H2;
sctx               84 arch/powerpc/crypto/sha256-spe-glue.c 	sctx->state[3] = SHA224_H3;
sctx               85 arch/powerpc/crypto/sha256-spe-glue.c 	sctx->state[4] = SHA224_H4;
sctx               86 arch/powerpc/crypto/sha256-spe-glue.c 	sctx->state[5] = SHA224_H5;
sctx               87 arch/powerpc/crypto/sha256-spe-glue.c 	sctx->state[6] = SHA224_H6;
sctx               88 arch/powerpc/crypto/sha256-spe-glue.c 	sctx->state[7] = SHA224_H7;
sctx               89 arch/powerpc/crypto/sha256-spe-glue.c 	sctx->count = 0;
sctx               97 arch/powerpc/crypto/sha256-spe-glue.c 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx               98 arch/powerpc/crypto/sha256-spe-glue.c 	const unsigned int offset = sctx->count & 0x3f;
sctx              104 arch/powerpc/crypto/sha256-spe-glue.c 		sctx->count += len;
sctx              105 arch/powerpc/crypto/sha256-spe-glue.c 		memcpy((char *)sctx->buf + offset, src, len);
sctx              109 arch/powerpc/crypto/sha256-spe-glue.c 	sctx->count += len;
sctx              112 arch/powerpc/crypto/sha256-spe-glue.c 		memcpy((char *)sctx->buf + offset, src, avail);
sctx              115 arch/powerpc/crypto/sha256-spe-glue.c 		ppc_spe_sha256_transform(sctx->state, (const u8 *)sctx->buf, 1);
sctx              128 arch/powerpc/crypto/sha256-spe-glue.c 		ppc_spe_sha256_transform(sctx->state, src, bytes >> 6);
sctx              135 arch/powerpc/crypto/sha256-spe-glue.c 	memcpy((char *)sctx->buf, src, len);
sctx              141 arch/powerpc/crypto/sha256-spe-glue.c 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx              142 arch/powerpc/crypto/sha256-spe-glue.c 	const unsigned int offset = sctx->count & 0x3f;
sctx              143 arch/powerpc/crypto/sha256-spe-glue.c 	char *p = (char *)sctx->buf + offset;
sctx              145 arch/powerpc/crypto/sha256-spe-glue.c 	__be64 *pbits = (__be64 *)(((char *)&sctx->buf) + 56);
sctx              155 arch/powerpc/crypto/sha256-spe-glue.c 		ppc_spe_sha256_transform(sctx->state, sctx->buf, 1);
sctx              156 arch/powerpc/crypto/sha256-spe-glue.c 		p = (char *)sctx->buf;
sctx              161 arch/powerpc/crypto/sha256-spe-glue.c 	*pbits = cpu_to_be64(sctx->count << 3);
sctx              162 arch/powerpc/crypto/sha256-spe-glue.c 	ppc_spe_sha256_transform(sctx->state, sctx->buf, 1);
sctx              166 arch/powerpc/crypto/sha256-spe-glue.c 	dst[0] = cpu_to_be32(sctx->state[0]);
sctx              167 arch/powerpc/crypto/sha256-spe-glue.c 	dst[1] = cpu_to_be32(sctx->state[1]);
sctx              168 arch/powerpc/crypto/sha256-spe-glue.c 	dst[2] = cpu_to_be32(sctx->state[2]);
sctx              169 arch/powerpc/crypto/sha256-spe-glue.c 	dst[3] = cpu_to_be32(sctx->state[3]);
sctx              170 arch/powerpc/crypto/sha256-spe-glue.c 	dst[4] = cpu_to_be32(sctx->state[4]);
sctx              171 arch/powerpc/crypto/sha256-spe-glue.c 	dst[5] = cpu_to_be32(sctx->state[5]);
sctx              172 arch/powerpc/crypto/sha256-spe-glue.c 	dst[6] = cpu_to_be32(sctx->state[6]);
sctx              173 arch/powerpc/crypto/sha256-spe-glue.c 	dst[7] = cpu_to_be32(sctx->state[7]);
sctx              175 arch/powerpc/crypto/sha256-spe-glue.c 	ppc_sha256_clear_context(sctx);
sctx              202 arch/powerpc/crypto/sha256-spe-glue.c 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx              204 arch/powerpc/crypto/sha256-spe-glue.c 	memcpy(out, sctx, sizeof(*sctx));
sctx              210 arch/powerpc/crypto/sha256-spe-glue.c 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx              212 arch/powerpc/crypto/sha256-spe-glue.c 	memcpy(sctx, in, sizeof(*sctx));
sctx              194 arch/powerpc/kernel/signal_32.c 	struct sigcontext sctx;		/* the sigcontext */
sctx             1379 arch/powerpc/kernel/signal_32.c 	sc = (struct sigcontext __user *) &frame->sctx;
sctx             1470 arch/powerpc/kernel/signal_32.c 	sc = &sf->sctx;
sctx              349 arch/powerpc/perf/callchain.c 	struct sigcontext32	sctx;
sctx              391 arch/powerpc/perf/callchain.c 	if (read_user_stack_32((unsigned int __user *) &sf->sctx.regs, &regs))
sctx               74 arch/s390/crypto/aes_s390.c 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
sctx               77 arch/s390/crypto/aes_s390.c 	sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
sctx               78 arch/s390/crypto/aes_s390.c 	sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
sctx               81 arch/s390/crypto/aes_s390.c 	ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
sctx               84 arch/s390/crypto/aes_s390.c 		tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags &
sctx               93 arch/s390/crypto/aes_s390.c 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
sctx              102 arch/s390/crypto/aes_s390.c 	sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
sctx              103 arch/s390/crypto/aes_s390.c 	if (!sctx->fc)
sctx              106 arch/s390/crypto/aes_s390.c 	sctx->key_len = key_len;
sctx              107 arch/s390/crypto/aes_s390.c 	memcpy(sctx->key, in_key, key_len);
sctx              113 arch/s390/crypto/aes_s390.c 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
sctx              115 arch/s390/crypto/aes_s390.c 	if (unlikely(!sctx->fc)) {
sctx              116 arch/s390/crypto/aes_s390.c 		crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
sctx              119 arch/s390/crypto/aes_s390.c 	cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE);
sctx              124 arch/s390/crypto/aes_s390.c 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
sctx              126 arch/s390/crypto/aes_s390.c 	if (unlikely(!sctx->fc)) {
sctx              127 arch/s390/crypto/aes_s390.c 		crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
sctx              130 arch/s390/crypto/aes_s390.c 	cpacf_km(sctx->fc | CPACF_DECRYPT,
sctx              131 arch/s390/crypto/aes_s390.c 		 &sctx->key, out, in, AES_BLOCK_SIZE);
sctx              137 arch/s390/crypto/aes_s390.c 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
sctx              139 arch/s390/crypto/aes_s390.c 	sctx->fallback.cip = crypto_alloc_cipher(name, 0,
sctx              142 arch/s390/crypto/aes_s390.c 	if (IS_ERR(sctx->fallback.cip)) {
sctx              145 arch/s390/crypto/aes_s390.c 		return PTR_ERR(sctx->fallback.cip);
sctx              153 arch/s390/crypto/aes_s390.c 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
sctx              155 arch/s390/crypto/aes_s390.c 	crypto_free_cipher(sctx->fallback.cip);
sctx              156 arch/s390/crypto/aes_s390.c 	sctx->fallback.cip = NULL;
sctx              184 arch/s390/crypto/aes_s390.c 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
sctx              187 arch/s390/crypto/aes_s390.c 	crypto_sync_skcipher_clear_flags(sctx->fallback.blk,
sctx              189 arch/s390/crypto/aes_s390.c 	crypto_sync_skcipher_set_flags(sctx->fallback.blk, tfm->crt_flags &
sctx              192 arch/s390/crypto/aes_s390.c 	ret = crypto_sync_skcipher_setkey(sctx->fallback.blk, key, len);
sctx              195 arch/s390/crypto/aes_s390.c 	tfm->crt_flags |= crypto_sync_skcipher_get_flags(sctx->fallback.blk) &
sctx              207 arch/s390/crypto/aes_s390.c 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
sctx              208 arch/s390/crypto/aes_s390.c 	SYNC_SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
sctx              210 arch/s390/crypto/aes_s390.c 	skcipher_request_set_sync_tfm(req, sctx->fallback.blk);
sctx              226 arch/s390/crypto/aes_s390.c 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
sctx              227 arch/s390/crypto/aes_s390.c 	SYNC_SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
sctx              229 arch/s390/crypto/aes_s390.c 	skcipher_request_set_sync_tfm(req, sctx->fallback.blk);
sctx              240 arch/s390/crypto/aes_s390.c 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
sctx              249 arch/s390/crypto/aes_s390.c 	sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
sctx              250 arch/s390/crypto/aes_s390.c 	if (!sctx->fc)
sctx              253 arch/s390/crypto/aes_s390.c 	sctx->key_len = key_len;
sctx              254 arch/s390/crypto/aes_s390.c 	memcpy(sctx->key, in_key, key_len);
sctx              261 arch/s390/crypto/aes_s390.c 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
sctx              269 arch/s390/crypto/aes_s390.c 		cpacf_km(sctx->fc | modifier, sctx->key,
sctx              281 arch/s390/crypto/aes_s390.c 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
sctx              284 arch/s390/crypto/aes_s390.c 	if (unlikely(!sctx->fc))
sctx              295 arch/s390/crypto/aes_s390.c 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
sctx              298 arch/s390/crypto/aes_s390.c 	if (unlikely(!sctx->fc))
sctx              308 arch/s390/crypto/aes_s390.c 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
sctx              310 arch/s390/crypto/aes_s390.c 	sctx->fallback.blk = crypto_alloc_sync_skcipher(name, 0,
sctx              313 arch/s390/crypto/aes_s390.c 	if (IS_ERR(sctx->fallback.blk)) {
sctx              316 arch/s390/crypto/aes_s390.c 		return PTR_ERR(sctx->fallback.blk);
sctx              324 arch/s390/crypto/aes_s390.c 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
sctx              326 arch/s390/crypto/aes_s390.c 	crypto_free_sync_skcipher(sctx->fallback.blk);
sctx              355 arch/s390/crypto/aes_s390.c 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
sctx              364 arch/s390/crypto/aes_s390.c 	sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
sctx              365 arch/s390/crypto/aes_s390.c 	if (!sctx->fc)
sctx              368 arch/s390/crypto/aes_s390.c 	sctx->key_len = key_len;
sctx              369 arch/s390/crypto/aes_s390.c 	memcpy(sctx->key, in_key, key_len);
sctx              376 arch/s390/crypto/aes_s390.c 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
sctx              386 arch/s390/crypto/aes_s390.c 	memcpy(param.key, sctx->key, sctx->key_len);
sctx              390 arch/s390/crypto/aes_s390.c 		cpacf_kmc(sctx->fc | modifier, &param,
sctx              402 arch/s390/crypto/aes_s390.c 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
sctx              405 arch/s390/crypto/aes_s390.c 	if (unlikely(!sctx->fc))
sctx              416 arch/s390/crypto/aes_s390.c 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
sctx              419 arch/s390/crypto/aes_s390.c 	if (unlikely(!sctx->fc))
sctx              666 arch/s390/crypto/aes_s390.c 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
sctx              675 arch/s390/crypto/aes_s390.c 	sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
sctx              676 arch/s390/crypto/aes_s390.c 	if (!sctx->fc)
sctx              679 arch/s390/crypto/aes_s390.c 	sctx->key_len = key_len;
sctx              680 arch/s390/crypto/aes_s390.c 	memcpy(sctx->key, in_key, key_len);
sctx              702 arch/s390/crypto/aes_s390.c 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
sctx              715 arch/s390/crypto/aes_s390.c 		cpacf_kmctr(sctx->fc | modifier, sctx->key,
sctx              730 arch/s390/crypto/aes_s390.c 		cpacf_kmctr(sctx->fc | modifier, sctx->key,
sctx              745 arch/s390/crypto/aes_s390.c 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
sctx              748 arch/s390/crypto/aes_s390.c 	if (unlikely(!sctx->fc))
sctx              759 arch/s390/crypto/aes_s390.c 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
sctx              762 arch/s390/crypto/aes_s390.c 	if (unlikely(!sctx->fc))
sctx               32 arch/s390/crypto/sha1_s390.c 	struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
sctx               34 arch/s390/crypto/sha1_s390.c 	sctx->state[0] = SHA1_H0;
sctx               35 arch/s390/crypto/sha1_s390.c 	sctx->state[1] = SHA1_H1;
sctx               36 arch/s390/crypto/sha1_s390.c 	sctx->state[2] = SHA1_H2;
sctx               37 arch/s390/crypto/sha1_s390.c 	sctx->state[3] = SHA1_H3;
sctx               38 arch/s390/crypto/sha1_s390.c 	sctx->state[4] = SHA1_H4;
sctx               39 arch/s390/crypto/sha1_s390.c 	sctx->count = 0;
sctx               40 arch/s390/crypto/sha1_s390.c 	sctx->func = CPACF_KIMD_SHA_1;
sctx               47 arch/s390/crypto/sha1_s390.c 	struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
sctx               50 arch/s390/crypto/sha1_s390.c 	octx->count = sctx->count;
sctx               51 arch/s390/crypto/sha1_s390.c 	memcpy(octx->state, sctx->state, sizeof(octx->state));
sctx               52 arch/s390/crypto/sha1_s390.c 	memcpy(octx->buffer, sctx->buf, sizeof(octx->buffer));
sctx               58 arch/s390/crypto/sha1_s390.c 	struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
sctx               61 arch/s390/crypto/sha1_s390.c 	sctx->count = ictx->count;
sctx               62 arch/s390/crypto/sha1_s390.c 	memcpy(sctx->state, ictx->state, sizeof(ictx->state));
sctx               63 arch/s390/crypto/sha1_s390.c 	memcpy(sctx->buf, ictx->buffer, sizeof(ictx->buffer));
sctx               64 arch/s390/crypto/sha1_s390.c 	sctx->func = CPACF_KIMD_SHA_1;
sctx               22 arch/s390/crypto/sha256_s390.c 	struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
sctx               24 arch/s390/crypto/sha256_s390.c 	sctx->state[0] = SHA256_H0;
sctx               25 arch/s390/crypto/sha256_s390.c 	sctx->state[1] = SHA256_H1;
sctx               26 arch/s390/crypto/sha256_s390.c 	sctx->state[2] = SHA256_H2;
sctx               27 arch/s390/crypto/sha256_s390.c 	sctx->state[3] = SHA256_H3;
sctx               28 arch/s390/crypto/sha256_s390.c 	sctx->state[4] = SHA256_H4;
sctx               29 arch/s390/crypto/sha256_s390.c 	sctx->state[5] = SHA256_H5;
sctx               30 arch/s390/crypto/sha256_s390.c 	sctx->state[6] = SHA256_H6;
sctx               31 arch/s390/crypto/sha256_s390.c 	sctx->state[7] = SHA256_H7;
sctx               32 arch/s390/crypto/sha256_s390.c 	sctx->count = 0;
sctx               33 arch/s390/crypto/sha256_s390.c 	sctx->func = CPACF_KIMD_SHA_256;
sctx               40 arch/s390/crypto/sha256_s390.c 	struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
sctx               43 arch/s390/crypto/sha256_s390.c 	octx->count = sctx->count;
sctx               44 arch/s390/crypto/sha256_s390.c 	memcpy(octx->state, sctx->state, sizeof(octx->state));
sctx               45 arch/s390/crypto/sha256_s390.c 	memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
sctx               51 arch/s390/crypto/sha256_s390.c 	struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
sctx               54 arch/s390/crypto/sha256_s390.c 	sctx->count = ictx->count;
sctx               55 arch/s390/crypto/sha256_s390.c 	memcpy(sctx->state, ictx->state, sizeof(ictx->state));
sctx               56 arch/s390/crypto/sha256_s390.c 	memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
sctx               57 arch/s390/crypto/sha256_s390.c 	sctx->func = CPACF_KIMD_SHA_256;
sctx               81 arch/s390/crypto/sha256_s390.c 	struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
sctx               83 arch/s390/crypto/sha256_s390.c 	sctx->state[0] = SHA224_H0;
sctx               84 arch/s390/crypto/sha256_s390.c 	sctx->state[1] = SHA224_H1;
sctx               85 arch/s390/crypto/sha256_s390.c 	sctx->state[2] = SHA224_H2;
sctx               86 arch/s390/crypto/sha256_s390.c 	sctx->state[3] = SHA224_H3;
sctx               87 arch/s390/crypto/sha256_s390.c 	sctx->state[4] = SHA224_H4;
sctx               88 arch/s390/crypto/sha256_s390.c 	sctx->state[5] = SHA224_H5;
sctx               89 arch/s390/crypto/sha256_s390.c 	sctx->state[6] = SHA224_H6;
sctx               90 arch/s390/crypto/sha256_s390.c 	sctx->state[7] = SHA224_H7;
sctx               91 arch/s390/crypto/sha256_s390.c 	sctx->count = 0;
sctx               92 arch/s390/crypto/sha256_s390.c 	sctx->func = CPACF_KIMD_SHA_256;
sctx               23 arch/s390/crypto/sha3_256_s390.c 	struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
sctx               25 arch/s390/crypto/sha3_256_s390.c 	memset(sctx->state, 0, sizeof(sctx->state));
sctx               26 arch/s390/crypto/sha3_256_s390.c 	sctx->count = 0;
sctx               27 arch/s390/crypto/sha3_256_s390.c 	sctx->func = CPACF_KIMD_SHA3_256;
sctx               34 arch/s390/crypto/sha3_256_s390.c 	struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
sctx               37 arch/s390/crypto/sha3_256_s390.c 	octx->rsiz = sctx->count;
sctx               38 arch/s390/crypto/sha3_256_s390.c 	memcpy(octx->st, sctx->state, sizeof(octx->st));
sctx               39 arch/s390/crypto/sha3_256_s390.c 	memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
sctx               46 arch/s390/crypto/sha3_256_s390.c 	struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
sctx               49 arch/s390/crypto/sha3_256_s390.c 	sctx->count = ictx->rsiz;
sctx               50 arch/s390/crypto/sha3_256_s390.c 	memcpy(sctx->state, ictx->st, sizeof(ictx->st));
sctx               51 arch/s390/crypto/sha3_256_s390.c 	memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
sctx               52 arch/s390/crypto/sha3_256_s390.c 	sctx->func = CPACF_KIMD_SHA3_256;
sctx               59 arch/s390/crypto/sha3_256_s390.c 	struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
sctx               62 arch/s390/crypto/sha3_256_s390.c 	sctx->count = ictx->rsiz;
sctx               63 arch/s390/crypto/sha3_256_s390.c 	memcpy(sctx->state, ictx->st, sizeof(ictx->st));
sctx               64 arch/s390/crypto/sha3_256_s390.c 	memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
sctx               65 arch/s390/crypto/sha3_256_s390.c 	sctx->func = CPACF_KIMD_SHA3_224;
sctx               90 arch/s390/crypto/sha3_256_s390.c 	struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
sctx               92 arch/s390/crypto/sha3_256_s390.c 	memset(sctx->state, 0, sizeof(sctx->state));
sctx               93 arch/s390/crypto/sha3_256_s390.c 	sctx->count = 0;
sctx               94 arch/s390/crypto/sha3_256_s390.c 	sctx->func = CPACF_KIMD_SHA3_224;
sctx               22 arch/s390/crypto/sha3_512_s390.c 	struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
sctx               24 arch/s390/crypto/sha3_512_s390.c 	memset(sctx->state, 0, sizeof(sctx->state));
sctx               25 arch/s390/crypto/sha3_512_s390.c 	sctx->count = 0;
sctx               26 arch/s390/crypto/sha3_512_s390.c 	sctx->func = CPACF_KIMD_SHA3_512;
sctx               33 arch/s390/crypto/sha3_512_s390.c 	struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
sctx               36 arch/s390/crypto/sha3_512_s390.c 	octx->rsiz = sctx->count;
sctx               37 arch/s390/crypto/sha3_512_s390.c 	octx->rsizw = sctx->count >> 32;
sctx               39 arch/s390/crypto/sha3_512_s390.c 	memcpy(octx->st, sctx->state, sizeof(octx->st));
sctx               40 arch/s390/crypto/sha3_512_s390.c 	memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
sctx               47 arch/s390/crypto/sha3_512_s390.c 	struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
sctx               52 arch/s390/crypto/sha3_512_s390.c 	sctx->count = ictx->rsiz;
sctx               54 arch/s390/crypto/sha3_512_s390.c 	memcpy(sctx->state, ictx->st, sizeof(ictx->st));
sctx               55 arch/s390/crypto/sha3_512_s390.c 	memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
sctx               56 arch/s390/crypto/sha3_512_s390.c 	sctx->func = CPACF_KIMD_SHA3_512;
sctx               63 arch/s390/crypto/sha3_512_s390.c 	struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
sctx               68 arch/s390/crypto/sha3_512_s390.c 	sctx->count = ictx->rsiz;
sctx               70 arch/s390/crypto/sha3_512_s390.c 	memcpy(sctx->state, ictx->st, sizeof(ictx->st));
sctx               71 arch/s390/crypto/sha3_512_s390.c 	memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
sctx               72 arch/s390/crypto/sha3_512_s390.c 	sctx->func = CPACF_KIMD_SHA3_384;
sctx               99 arch/s390/crypto/sha3_512_s390.c 	struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
sctx              101 arch/s390/crypto/sha3_512_s390.c 	memset(sctx->state, 0, sizeof(sctx->state));
sctx              102 arch/s390/crypto/sha3_512_s390.c 	sctx->count = 0;
sctx              103 arch/s390/crypto/sha3_512_s390.c 	sctx->func = CPACF_KIMD_SHA3_384;
sctx               41 arch/s390/crypto/sha512_s390.c 	struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
sctx               44 arch/s390/crypto/sha512_s390.c 	octx->count[0] = sctx->count;
sctx               46 arch/s390/crypto/sha512_s390.c 	memcpy(octx->state, sctx->state, sizeof(octx->state));
sctx               47 arch/s390/crypto/sha512_s390.c 	memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
sctx               53 arch/s390/crypto/sha512_s390.c 	struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
sctx               58 arch/s390/crypto/sha512_s390.c 	sctx->count = ictx->count[0];
sctx               60 arch/s390/crypto/sha512_s390.c 	memcpy(sctx->state, ictx->state, sizeof(ictx->state));
sctx               61 arch/s390/crypto/sha512_s390.c 	memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
sctx               62 arch/s390/crypto/sha512_s390.c 	sctx->func = CPACF_KIMD_SHA_512;
sctx               19 arch/s390/purgatory/purgatory.c 	struct sha256_state sctx;
sctx               21 arch/s390/purgatory/purgatory.c 	sha256_init(&sctx);
sctx               25 arch/s390/purgatory/purgatory.c 		sha256_update(&sctx, (uint8_t *)(ptr->start), ptr->len);
sctx               27 arch/s390/purgatory/purgatory.c 	sha256_final(&sctx, digest);
sctx               46 arch/sparc/crypto/md5_glue.c static void __md5_sparc64_update(struct md5_state *sctx, const u8 *data,
sctx               51 arch/sparc/crypto/md5_glue.c 	sctx->byte_count += len;
sctx               54 arch/sparc/crypto/md5_glue.c 		memcpy((u8 *)sctx->block + partial, data, done);
sctx               55 arch/sparc/crypto/md5_glue.c 		md5_sparc64_transform(sctx->hash, (u8 *)sctx->block, 1);
sctx               60 arch/sparc/crypto/md5_glue.c 		md5_sparc64_transform(sctx->hash, data + done, rounds);
sctx               64 arch/sparc/crypto/md5_glue.c 	memcpy(sctx->block, data + done, len - done);
sctx               70 arch/sparc/crypto/md5_glue.c 	struct md5_state *sctx = shash_desc_ctx(desc);
sctx               71 arch/sparc/crypto/md5_glue.c 	unsigned int partial = sctx->byte_count % MD5_HMAC_BLOCK_SIZE;
sctx               75 arch/sparc/crypto/md5_glue.c 		sctx->byte_count += len;
sctx               76 arch/sparc/crypto/md5_glue.c 		memcpy((u8 *)sctx->block + partial, data, len);
sctx               78 arch/sparc/crypto/md5_glue.c 		__md5_sparc64_update(sctx, data, len, partial);
sctx               86 arch/sparc/crypto/md5_glue.c 	struct md5_state *sctx = shash_desc_ctx(desc);
sctx               92 arch/sparc/crypto/md5_glue.c 	bits = cpu_to_le64(sctx->byte_count << 3);
sctx               95 arch/sparc/crypto/md5_glue.c 	index = sctx->byte_count % MD5_HMAC_BLOCK_SIZE;
sctx              100 arch/sparc/crypto/md5_glue.c 		sctx->byte_count += padlen;
sctx              101 arch/sparc/crypto/md5_glue.c 		memcpy((u8 *)sctx->block + index, padding, padlen);
sctx              103 arch/sparc/crypto/md5_glue.c 		__md5_sparc64_update(sctx, padding, padlen, index);
sctx              105 arch/sparc/crypto/md5_glue.c 	__md5_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 56);
sctx              109 arch/sparc/crypto/md5_glue.c 		dst[i] = sctx->hash[i];
sctx              112 arch/sparc/crypto/md5_glue.c 	memset(sctx, 0, sizeof(*sctx));
sctx              119 arch/sparc/crypto/md5_glue.c 	struct md5_state *sctx = shash_desc_ctx(desc);
sctx              121 arch/sparc/crypto/md5_glue.c 	memcpy(out, sctx, sizeof(*sctx));
sctx              128 arch/sparc/crypto/md5_glue.c 	struct md5_state *sctx = shash_desc_ctx(desc);
sctx              130 arch/sparc/crypto/md5_glue.c 	memcpy(sctx, in, sizeof(*sctx));
sctx               32 arch/sparc/crypto/sha1_glue.c 	struct sha1_state *sctx = shash_desc_ctx(desc);
sctx               34 arch/sparc/crypto/sha1_glue.c 	*sctx = (struct sha1_state){
sctx               41 arch/sparc/crypto/sha1_glue.c static void __sha1_sparc64_update(struct sha1_state *sctx, const u8 *data,
sctx               46 arch/sparc/crypto/sha1_glue.c 	sctx->count += len;
sctx               49 arch/sparc/crypto/sha1_glue.c 		memcpy(sctx->buffer + partial, data, done);
sctx               50 arch/sparc/crypto/sha1_glue.c 		sha1_sparc64_transform(sctx->state, sctx->buffer, 1);
sctx               55 arch/sparc/crypto/sha1_glue.c 		sha1_sparc64_transform(sctx->state, data + done, rounds);
sctx               59 arch/sparc/crypto/sha1_glue.c 	memcpy(sctx->buffer, data + done, len - done);
sctx               65 arch/sparc/crypto/sha1_glue.c 	struct sha1_state *sctx = shash_desc_ctx(desc);
sctx               66 arch/sparc/crypto/sha1_glue.c 	unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
sctx               70 arch/sparc/crypto/sha1_glue.c 		sctx->count += len;
sctx               71 arch/sparc/crypto/sha1_glue.c 		memcpy(sctx->buffer + partial, data, len);
sctx               73 arch/sparc/crypto/sha1_glue.c 		__sha1_sparc64_update(sctx, data, len, partial);
sctx               81 arch/sparc/crypto/sha1_glue.c 	struct sha1_state *sctx = shash_desc_ctx(desc);
sctx               87 arch/sparc/crypto/sha1_glue.c 	bits = cpu_to_be64(sctx->count << 3);
sctx               90 arch/sparc/crypto/sha1_glue.c 	index = sctx->count % SHA1_BLOCK_SIZE;
sctx               95 arch/sparc/crypto/sha1_glue.c 		sctx->count += padlen;
sctx               96 arch/sparc/crypto/sha1_glue.c 		memcpy(sctx->buffer + index, padding, padlen);
sctx               98 arch/sparc/crypto/sha1_glue.c 		__sha1_sparc64_update(sctx, padding, padlen, index);
sctx              100 arch/sparc/crypto/sha1_glue.c 	__sha1_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 56);
sctx              104 arch/sparc/crypto/sha1_glue.c 		dst[i] = cpu_to_be32(sctx->state[i]);
sctx              107 arch/sparc/crypto/sha1_glue.c 	memset(sctx, 0, sizeof(*sctx));
sctx              114 arch/sparc/crypto/sha1_glue.c 	struct sha1_state *sctx = shash_desc_ctx(desc);
sctx              116 arch/sparc/crypto/sha1_glue.c 	memcpy(out, sctx, sizeof(*sctx));
sctx              123 arch/sparc/crypto/sha1_glue.c 	struct sha1_state *sctx = shash_desc_ctx(desc);
sctx              125 arch/sparc/crypto/sha1_glue.c 	memcpy(sctx, in, sizeof(*sctx));
sctx               32 arch/sparc/crypto/sha256_glue.c 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx               33 arch/sparc/crypto/sha256_glue.c 	sctx->state[0] = SHA224_H0;
sctx               34 arch/sparc/crypto/sha256_glue.c 	sctx->state[1] = SHA224_H1;
sctx               35 arch/sparc/crypto/sha256_glue.c 	sctx->state[2] = SHA224_H2;
sctx               36 arch/sparc/crypto/sha256_glue.c 	sctx->state[3] = SHA224_H3;
sctx               37 arch/sparc/crypto/sha256_glue.c 	sctx->state[4] = SHA224_H4;
sctx               38 arch/sparc/crypto/sha256_glue.c 	sctx->state[5] = SHA224_H5;
sctx               39 arch/sparc/crypto/sha256_glue.c 	sctx->state[6] = SHA224_H6;
sctx               40 arch/sparc/crypto/sha256_glue.c 	sctx->state[7] = SHA224_H7;
sctx               41 arch/sparc/crypto/sha256_glue.c 	sctx->count = 0;
sctx               48 arch/sparc/crypto/sha256_glue.c 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx               49 arch/sparc/crypto/sha256_glue.c 	sctx->state[0] = SHA256_H0;
sctx               50 arch/sparc/crypto/sha256_glue.c 	sctx->state[1] = SHA256_H1;
sctx               51 arch/sparc/crypto/sha256_glue.c 	sctx->state[2] = SHA256_H2;
sctx               52 arch/sparc/crypto/sha256_glue.c 	sctx->state[3] = SHA256_H3;
sctx               53 arch/sparc/crypto/sha256_glue.c 	sctx->state[4] = SHA256_H4;
sctx               54 arch/sparc/crypto/sha256_glue.c 	sctx->state[5] = SHA256_H5;
sctx               55 arch/sparc/crypto/sha256_glue.c 	sctx->state[6] = SHA256_H6;
sctx               56 arch/sparc/crypto/sha256_glue.c 	sctx->state[7] = SHA256_H7;
sctx               57 arch/sparc/crypto/sha256_glue.c 	sctx->count = 0;
sctx               62 arch/sparc/crypto/sha256_glue.c static void __sha256_sparc64_update(struct sha256_state *sctx, const u8 *data,
sctx               67 arch/sparc/crypto/sha256_glue.c 	sctx->count += len;
sctx               70 arch/sparc/crypto/sha256_glue.c 		memcpy(sctx->buf + partial, data, done);
sctx               71 arch/sparc/crypto/sha256_glue.c 		sha256_sparc64_transform(sctx->state, sctx->buf, 1);
sctx               76 arch/sparc/crypto/sha256_glue.c 		sha256_sparc64_transform(sctx->state, data + done, rounds);
sctx               80 arch/sparc/crypto/sha256_glue.c 	memcpy(sctx->buf, data + done, len - done);
sctx               86 arch/sparc/crypto/sha256_glue.c 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx               87 arch/sparc/crypto/sha256_glue.c 	unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
sctx               91 arch/sparc/crypto/sha256_glue.c 		sctx->count += len;
sctx               92 arch/sparc/crypto/sha256_glue.c 		memcpy(sctx->buf + partial, data, len);
sctx               94 arch/sparc/crypto/sha256_glue.c 		__sha256_sparc64_update(sctx, data, len, partial);
sctx              101 arch/sparc/crypto/sha256_glue.c 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx              107 arch/sparc/crypto/sha256_glue.c 	bits = cpu_to_be64(sctx->count << 3);
sctx              110 arch/sparc/crypto/sha256_glue.c 	index = sctx->count % SHA256_BLOCK_SIZE;
sctx              115 arch/sparc/crypto/sha256_glue.c 		sctx->count += padlen;
sctx              116 arch/sparc/crypto/sha256_glue.c 		memcpy(sctx->buf + index, padding, padlen);
sctx              118 arch/sparc/crypto/sha256_glue.c 		__sha256_sparc64_update(sctx, padding, padlen, index);
sctx              120 arch/sparc/crypto/sha256_glue.c 	__sha256_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 56);
sctx              124 arch/sparc/crypto/sha256_glue.c 		dst[i] = cpu_to_be32(sctx->state[i]);
sctx              127 arch/sparc/crypto/sha256_glue.c 	memset(sctx, 0, sizeof(*sctx));
sctx              146 arch/sparc/crypto/sha256_glue.c 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx              148 arch/sparc/crypto/sha256_glue.c 	memcpy(out, sctx, sizeof(*sctx));
sctx              154 arch/sparc/crypto/sha256_glue.c 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx              156 arch/sparc/crypto/sha256_glue.c 	memcpy(sctx, in, sizeof(*sctx));
sctx               31 arch/sparc/crypto/sha512_glue.c 	struct sha512_state *sctx = shash_desc_ctx(desc);
sctx               32 arch/sparc/crypto/sha512_glue.c 	sctx->state[0] = SHA512_H0;
sctx               33 arch/sparc/crypto/sha512_glue.c 	sctx->state[1] = SHA512_H1;
sctx               34 arch/sparc/crypto/sha512_glue.c 	sctx->state[2] = SHA512_H2;
sctx               35 arch/sparc/crypto/sha512_glue.c 	sctx->state[3] = SHA512_H3;
sctx               36 arch/sparc/crypto/sha512_glue.c 	sctx->state[4] = SHA512_H4;
sctx               37 arch/sparc/crypto/sha512_glue.c 	sctx->state[5] = SHA512_H5;
sctx               38 arch/sparc/crypto/sha512_glue.c 	sctx->state[6] = SHA512_H6;
sctx               39 arch/sparc/crypto/sha512_glue.c 	sctx->state[7] = SHA512_H7;
sctx               40 arch/sparc/crypto/sha512_glue.c 	sctx->count[0] = sctx->count[1] = 0;
sctx               47 arch/sparc/crypto/sha512_glue.c 	struct sha512_state *sctx = shash_desc_ctx(desc);
sctx               48 arch/sparc/crypto/sha512_glue.c 	sctx->state[0] = SHA384_H0;
sctx               49 arch/sparc/crypto/sha512_glue.c 	sctx->state[1] = SHA384_H1;
sctx               50 arch/sparc/crypto/sha512_glue.c 	sctx->state[2] = SHA384_H2;
sctx               51 arch/sparc/crypto/sha512_glue.c 	sctx->state[3] = SHA384_H3;
sctx               52 arch/sparc/crypto/sha512_glue.c 	sctx->state[4] = SHA384_H4;
sctx               53 arch/sparc/crypto/sha512_glue.c 	sctx->state[5] = SHA384_H5;
sctx               54 arch/sparc/crypto/sha512_glue.c 	sctx->state[6] = SHA384_H6;
sctx               55 arch/sparc/crypto/sha512_glue.c 	sctx->state[7] = SHA384_H7;
sctx               56 arch/sparc/crypto/sha512_glue.c 	sctx->count[0] = sctx->count[1] = 0;
sctx               61 arch/sparc/crypto/sha512_glue.c static void __sha512_sparc64_update(struct sha512_state *sctx, const u8 *data,
sctx               66 arch/sparc/crypto/sha512_glue.c 	if ((sctx->count[0] += len) < len)
sctx               67 arch/sparc/crypto/sha512_glue.c 		sctx->count[1]++;
sctx               70 arch/sparc/crypto/sha512_glue.c 		memcpy(sctx->buf + partial, data, done);
sctx               71 arch/sparc/crypto/sha512_glue.c 		sha512_sparc64_transform(sctx->state, sctx->buf, 1);
sctx               76 arch/sparc/crypto/sha512_glue.c 		sha512_sparc64_transform(sctx->state, data + done, rounds);
sctx               80 arch/sparc/crypto/sha512_glue.c 	memcpy(sctx->buf, data + done, len - done);
sctx               86 arch/sparc/crypto/sha512_glue.c 	struct sha512_state *sctx = shash_desc_ctx(desc);
sctx               87 arch/sparc/crypto/sha512_glue.c 	unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
sctx               91 arch/sparc/crypto/sha512_glue.c 		if ((sctx->count[0] += len) < len)
sctx               92 arch/sparc/crypto/sha512_glue.c 			sctx->count[1]++;
sctx               93 arch/sparc/crypto/sha512_glue.c 		memcpy(sctx->buf + partial, data, len);
sctx               95 arch/sparc/crypto/sha512_glue.c 		__sha512_sparc64_update(sctx, data, len, partial);
sctx              102 arch/sparc/crypto/sha512_glue.c 	struct sha512_state *sctx = shash_desc_ctx(desc);
sctx              109 arch/sparc/crypto/sha512_glue.c 	bits[1] = cpu_to_be64(sctx->count[0] << 3);
sctx              110 arch/sparc/crypto/sha512_glue.c 	bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
sctx              113 arch/sparc/crypto/sha512_glue.c 	index = sctx->count[0] % SHA512_BLOCK_SIZE;
sctx              118 arch/sparc/crypto/sha512_glue.c 		if ((sctx->count[0] += padlen) < padlen)
sctx              119 arch/sparc/crypto/sha512_glue.c 			sctx->count[1]++;
sctx              120 arch/sparc/crypto/sha512_glue.c 		memcpy(sctx->buf + index, padding, padlen);
sctx              122 arch/sparc/crypto/sha512_glue.c 		__sha512_sparc64_update(sctx, padding, padlen, index);
sctx              124 arch/sparc/crypto/sha512_glue.c 	__sha512_sparc64_update(sctx, (const u8 *)&bits, sizeof(bits), 112);
sctx              128 arch/sparc/crypto/sha512_glue.c 		dst[i] = cpu_to_be64(sctx->state[i]);
sctx              131 arch/sparc/crypto/sha512_glue.c 	memset(sctx, 0, sizeof(*sctx));
sctx               42 arch/x86/crypto/poly1305_glue.c 	struct poly1305_simd_desc_ctx *sctx = shash_desc_ctx(desc);
sctx               44 arch/x86/crypto/poly1305_glue.c 	sctx->uset = false;
sctx               46 arch/x86/crypto/poly1305_glue.c 	sctx->wset = false;
sctx               66 arch/x86/crypto/poly1305_glue.c 	struct poly1305_simd_desc_ctx *sctx;
sctx               70 arch/x86/crypto/poly1305_glue.c 	sctx = container_of(dctx, struct poly1305_simd_desc_ctx, base);
sctx               80 arch/x86/crypto/poly1305_glue.c 		if (unlikely(!sctx->wset)) {
sctx               81 arch/x86/crypto/poly1305_glue.c 			if (!sctx->uset) {
sctx               82 arch/x86/crypto/poly1305_glue.c 				memcpy(sctx->u, dctx->r.r, sizeof(sctx->u));
sctx               83 arch/x86/crypto/poly1305_glue.c 				poly1305_simd_mult(sctx->u, dctx->r.r);
sctx               84 arch/x86/crypto/poly1305_glue.c 				sctx->uset = true;
sctx               86 arch/x86/crypto/poly1305_glue.c 			memcpy(sctx->u + 5, sctx->u, sizeof(sctx->u));
sctx               87 arch/x86/crypto/poly1305_glue.c 			poly1305_simd_mult(sctx->u + 5, dctx->r.r);
sctx               88 arch/x86/crypto/poly1305_glue.c 			memcpy(sctx->u + 10, sctx->u + 5, sizeof(sctx->u));
sctx               89 arch/x86/crypto/poly1305_glue.c 			poly1305_simd_mult(sctx->u + 10, dctx->r.r);
sctx               90 arch/x86/crypto/poly1305_glue.c 			sctx->wset = true;
sctx               94 arch/x86/crypto/poly1305_glue.c 				     sctx->u);
sctx              100 arch/x86/crypto/poly1305_glue.c 		if (unlikely(!sctx->uset)) {
sctx              101 arch/x86/crypto/poly1305_glue.c 			memcpy(sctx->u, dctx->r.r, sizeof(sctx->u));
sctx              102 arch/x86/crypto/poly1305_glue.c 			poly1305_simd_mult(sctx->u, dctx->r.r);
sctx              103 arch/x86/crypto/poly1305_glue.c 			sctx->uset = true;
sctx              107 arch/x86/crypto/poly1305_glue.c 				     sctx->u);
sctx               36 arch/x86/crypto/sha1_ssse3_glue.c 	struct sha1_state *sctx = shash_desc_ctx(desc);
sctx               39 arch/x86/crypto/sha1_ssse3_glue.c 	    (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
sctx               51 arch/x86/crypto/sha256_ssse3_glue.c 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx               54 arch/x86/crypto/sha256_ssse3_glue.c 	    (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
sctx               50 arch/x86/crypto/sha512_ssse3_glue.c 	struct sha512_state *sctx = shash_desc_ctx(desc);
sctx               53 arch/x86/crypto/sha512_ssse3_glue.c 	    (sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE)
sctx               43 arch/x86/purgatory/purgatory.c 	struct sha256_state sctx;
sctx               45 arch/x86/purgatory/purgatory.c 	sha256_init(&sctx);
sctx               49 arch/x86/purgatory/purgatory.c 		sha256_update(&sctx, (uint8_t *)(ptr->start), ptr->len);
sctx               51 arch/x86/purgatory/purgatory.c 	sha256_final(&sctx, digest);
sctx              163 crypto/sha3_generic.c 	struct sha3_state *sctx = shash_desc_ctx(desc);
sctx              166 crypto/sha3_generic.c 	sctx->rsiz = 200 - 2 * digest_size;
sctx              167 crypto/sha3_generic.c 	sctx->rsizw = sctx->rsiz / 8;
sctx              168 crypto/sha3_generic.c 	sctx->partial = 0;
sctx              170 crypto/sha3_generic.c 	memset(sctx->st, 0, sizeof(sctx->st));
sctx              178 crypto/sha3_generic.c 	struct sha3_state *sctx = shash_desc_ctx(desc);
sctx              185 crypto/sha3_generic.c 	if ((sctx->partial + len) > (sctx->rsiz - 1)) {
sctx              186 crypto/sha3_generic.c 		if (sctx->partial) {
sctx              187 crypto/sha3_generic.c 			done = -sctx->partial;
sctx              188 crypto/sha3_generic.c 			memcpy(sctx->buf + sctx->partial, data,
sctx              189 crypto/sha3_generic.c 			       done + sctx->rsiz);
sctx              190 crypto/sha3_generic.c 			src = sctx->buf;
sctx              196 crypto/sha3_generic.c 			for (i = 0; i < sctx->rsizw; i++)
sctx              197 crypto/sha3_generic.c 				sctx->st[i] ^= get_unaligned_le64(src + 8 * i);
sctx              198 crypto/sha3_generic.c 			keccakf(sctx->st);
sctx              200 crypto/sha3_generic.c 			done += sctx->rsiz;
sctx              202 crypto/sha3_generic.c 		} while (done + (sctx->rsiz - 1) < len);
sctx              204 crypto/sha3_generic.c 		sctx->partial = 0;
sctx              206 crypto/sha3_generic.c 	memcpy(sctx->buf + sctx->partial, src, len - done);
sctx              207 crypto/sha3_generic.c 	sctx->partial += (len - done);
sctx              215 crypto/sha3_generic.c 	struct sha3_state *sctx = shash_desc_ctx(desc);
sctx              216 crypto/sha3_generic.c 	unsigned int i, inlen = sctx->partial;
sctx              220 crypto/sha3_generic.c 	sctx->buf[inlen++] = 0x06;
sctx              221 crypto/sha3_generic.c 	memset(sctx->buf + inlen, 0, sctx->rsiz - inlen);
sctx              222 crypto/sha3_generic.c 	sctx->buf[sctx->rsiz - 1] |= 0x80;
sctx              224 crypto/sha3_generic.c 	for (i = 0; i < sctx->rsizw; i++)
sctx              225 crypto/sha3_generic.c 		sctx->st[i] ^= get_unaligned_le64(sctx->buf + 8 * i);
sctx              227 crypto/sha3_generic.c 	keccakf(sctx->st);
sctx              230 crypto/sha3_generic.c 		put_unaligned_le64(sctx->st[i], digest++);
sctx              233 crypto/sha3_generic.c 		put_unaligned_le32(sctx->st[i], (__le32 *)digest);
sctx              235 crypto/sha3_generic.c 	memset(sctx, 0, sizeof(*sctx));
sctx              158 drivers/crypto/nx/nx-aes-xcbc.c 	struct xcbc_state *sctx = shash_desc_ctx(desc);
sctx              160 drivers/crypto/nx/nx-aes-xcbc.c 	memset(sctx, 0, sizeof *sctx);
sctx              169 drivers/crypto/nx/nx-aes-xcbc.c 	struct xcbc_state *sctx = shash_desc_ctx(desc);
sctx              183 drivers/crypto/nx/nx-aes-xcbc.c 	total = sctx->count + len;
sctx              190 drivers/crypto/nx/nx-aes-xcbc.c 		memcpy(sctx->buffer + sctx->count, data, len);
sctx              191 drivers/crypto/nx/nx-aes-xcbc.c 		sctx->count += len;
sctx              202 drivers/crypto/nx/nx-aes-xcbc.c 	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
sctx              228 drivers/crypto/nx/nx-aes-xcbc.c 		if (sctx->count) {
sctx              229 drivers/crypto/nx/nx-aes-xcbc.c 			data_len = sctx->count;
sctx              231 drivers/crypto/nx/nx-aes-xcbc.c 						(u8 *) sctx->buffer,
sctx              234 drivers/crypto/nx/nx-aes-xcbc.c 			if (data_len != sctx->count) {
sctx              240 drivers/crypto/nx/nx-aes-xcbc.c 		data_len = to_process - sctx->count;
sctx              246 drivers/crypto/nx/nx-aes-xcbc.c 		if (data_len != to_process - sctx->count) {
sctx              278 drivers/crypto/nx/nx-aes-xcbc.c 		data += to_process - sctx->count;
sctx              279 drivers/crypto/nx/nx-aes-xcbc.c 		sctx->count = 0;
sctx              284 drivers/crypto/nx/nx-aes-xcbc.c 	memcpy(sctx->buffer, data, leftover);
sctx              285 drivers/crypto/nx/nx-aes-xcbc.c 	sctx->count = leftover;
sctx              294 drivers/crypto/nx/nx-aes-xcbc.c 	struct xcbc_state *sctx = shash_desc_ctx(desc);
sctx              309 drivers/crypto/nx/nx-aes-xcbc.c 	} else if (sctx->count == 0) {
sctx              323 drivers/crypto/nx/nx-aes-xcbc.c 	len = sctx->count;
sctx              324 drivers/crypto/nx/nx-aes-xcbc.c 	in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
sctx              327 drivers/crypto/nx/nx-aes-xcbc.c 	if (len != sctx->count) {
sctx               39 drivers/crypto/nx/nx-sha256.c 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx               41 drivers/crypto/nx/nx-sha256.c 	memset(sctx, 0, sizeof *sctx);
sctx               43 drivers/crypto/nx/nx-sha256.c 	sctx->state[0] = __cpu_to_be32(SHA256_H0);
sctx               44 drivers/crypto/nx/nx-sha256.c 	sctx->state[1] = __cpu_to_be32(SHA256_H1);
sctx               45 drivers/crypto/nx/nx-sha256.c 	sctx->state[2] = __cpu_to_be32(SHA256_H2);
sctx               46 drivers/crypto/nx/nx-sha256.c 	sctx->state[3] = __cpu_to_be32(SHA256_H3);
sctx               47 drivers/crypto/nx/nx-sha256.c 	sctx->state[4] = __cpu_to_be32(SHA256_H4);
sctx               48 drivers/crypto/nx/nx-sha256.c 	sctx->state[5] = __cpu_to_be32(SHA256_H5);
sctx               49 drivers/crypto/nx/nx-sha256.c 	sctx->state[6] = __cpu_to_be32(SHA256_H6);
sctx               50 drivers/crypto/nx/nx-sha256.c 	sctx->state[7] = __cpu_to_be32(SHA256_H7);
sctx               51 drivers/crypto/nx/nx-sha256.c 	sctx->count = 0;
sctx               59 drivers/crypto/nx/nx-sha256.c 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx               68 drivers/crypto/nx/nx-sha256.c 	u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE);
sctx               76 drivers/crypto/nx/nx-sha256.c 	total = (sctx->count % SHA256_BLOCK_SIZE) + len;
sctx               78 drivers/crypto/nx/nx-sha256.c 		memcpy(sctx->buf + buf_len, data, len);
sctx               79 drivers/crypto/nx/nx-sha256.c 		sctx->count += len;
sctx               83 drivers/crypto/nx/nx-sha256.c 	memcpy(csbcpb->cpb.sha256.message_digest, sctx->state, SHA256_DIGEST_SIZE);
sctx               93 drivers/crypto/nx/nx-sha256.c 	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
sctx              109 drivers/crypto/nx/nx-sha256.c 						 (u8 *) sctx->buf,
sctx              167 drivers/crypto/nx/nx-sha256.c 		memcpy(sctx->buf, data, leftover);
sctx              169 drivers/crypto/nx/nx-sha256.c 	sctx->count += len;
sctx              170 drivers/crypto/nx/nx-sha256.c 	memcpy(sctx->state, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
sctx              178 drivers/crypto/nx/nx-sha256.c 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx              196 drivers/crypto/nx/nx-sha256.c 	if (sctx->count >= SHA256_BLOCK_SIZE) {
sctx              199 drivers/crypto/nx/nx-sha256.c 		memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE);
sctx              207 drivers/crypto/nx/nx-sha256.c 	csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8);
sctx              209 drivers/crypto/nx/nx-sha256.c 	len = sctx->count & (SHA256_BLOCK_SIZE - 1);
sctx              210 drivers/crypto/nx/nx-sha256.c 	in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf,
sctx              213 drivers/crypto/nx/nx-sha256.c 	if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) {
sctx              239 drivers/crypto/nx/nx-sha256.c 	atomic64_add(sctx->count, &(nx_ctx->stats->sha256_bytes));
sctx              248 drivers/crypto/nx/nx-sha256.c 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx              250 drivers/crypto/nx/nx-sha256.c 	memcpy(out, sctx, sizeof(*sctx));
sctx              257 drivers/crypto/nx/nx-sha256.c 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx              259 drivers/crypto/nx/nx-sha256.c 	memcpy(sctx, in, sizeof(*sctx));
sctx               39 drivers/crypto/nx/nx-sha512.c 	struct sha512_state *sctx = shash_desc_ctx(desc);
sctx               41 drivers/crypto/nx/nx-sha512.c 	memset(sctx, 0, sizeof *sctx);
sctx               43 drivers/crypto/nx/nx-sha512.c 	sctx->state[0] = __cpu_to_be64(SHA512_H0);
sctx               44 drivers/crypto/nx/nx-sha512.c 	sctx->state[1] = __cpu_to_be64(SHA512_H1);
sctx               45 drivers/crypto/nx/nx-sha512.c 	sctx->state[2] = __cpu_to_be64(SHA512_H2);
sctx               46 drivers/crypto/nx/nx-sha512.c 	sctx->state[3] = __cpu_to_be64(SHA512_H3);
sctx               47 drivers/crypto/nx/nx-sha512.c 	sctx->state[4] = __cpu_to_be64(SHA512_H4);
sctx               48 drivers/crypto/nx/nx-sha512.c 	sctx->state[5] = __cpu_to_be64(SHA512_H5);
sctx               49 drivers/crypto/nx/nx-sha512.c 	sctx->state[6] = __cpu_to_be64(SHA512_H6);
sctx               50 drivers/crypto/nx/nx-sha512.c 	sctx->state[7] = __cpu_to_be64(SHA512_H7);
sctx               51 drivers/crypto/nx/nx-sha512.c 	sctx->count[0] = 0;
sctx               59 drivers/crypto/nx/nx-sha512.c 	struct sha512_state *sctx = shash_desc_ctx(desc);
sctx               68 drivers/crypto/nx/nx-sha512.c 	u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE);
sctx               76 drivers/crypto/nx/nx-sha512.c 	total = (sctx->count[0] % SHA512_BLOCK_SIZE) + len;
sctx               78 drivers/crypto/nx/nx-sha512.c 		memcpy(sctx->buf + buf_len, data, len);
sctx               79 drivers/crypto/nx/nx-sha512.c 		sctx->count[0] += len;
sctx               83 drivers/crypto/nx/nx-sha512.c 	memcpy(csbcpb->cpb.sha512.message_digest, sctx->state, SHA512_DIGEST_SIZE);
sctx               93 drivers/crypto/nx/nx-sha512.c 	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
sctx              109 drivers/crypto/nx/nx-sha512.c 						 (u8 *) sctx->buf,
sctx              171 drivers/crypto/nx/nx-sha512.c 		memcpy(sctx->buf, data, leftover);
sctx              172 drivers/crypto/nx/nx-sha512.c 	sctx->count[0] += len;
sctx              173 drivers/crypto/nx/nx-sha512.c 	memcpy(sctx->state, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
sctx              181 drivers/crypto/nx/nx-sha512.c 	struct sha512_state *sctx = shash_desc_ctx(desc);
sctx              200 drivers/crypto/nx/nx-sha512.c 	if (sctx->count[0] >= SHA512_BLOCK_SIZE) {
sctx              203 drivers/crypto/nx/nx-sha512.c 		memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state,
sctx              214 drivers/crypto/nx/nx-sha512.c 	count0 = sctx->count[0] * 8;
sctx              218 drivers/crypto/nx/nx-sha512.c 	len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1);
sctx              219 drivers/crypto/nx/nx-sha512.c 	in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, &len,
sctx              222 drivers/crypto/nx/nx-sha512.c 	if (len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) {
sctx              244 drivers/crypto/nx/nx-sha512.c 	atomic64_add(sctx->count[0], &(nx_ctx->stats->sha512_bytes));
sctx              254 drivers/crypto/nx/nx-sha512.c 	struct sha512_state *sctx = shash_desc_ctx(desc);
sctx              256 drivers/crypto/nx/nx-sha512.c 	memcpy(out, sctx, sizeof(*sctx));
sctx              263 drivers/crypto/nx/nx-sha512.c 	struct sha512_state *sctx = shash_desc_ctx(desc);
sctx              265 drivers/crypto/nx/nx-sha512.c 	memcpy(sctx, in, sizeof(*sctx));
sctx              276 drivers/crypto/padlock-sha.c 	struct sha1_state *sctx = shash_desc_ctx(desc);
sctx              278 drivers/crypto/padlock-sha.c 	*sctx = (struct sha1_state){
sctx              288 drivers/crypto/padlock-sha.c 	struct sha1_state *sctx = shash_desc_ctx(desc);
sctx              296 drivers/crypto/padlock-sha.c 	partial = sctx->count & 0x3f;
sctx              297 drivers/crypto/padlock-sha.c 	sctx->count += len;
sctx              300 drivers/crypto/padlock-sha.c 	memcpy(dst, (u8 *)(sctx->state), SHA1_DIGEST_SIZE);
sctx              307 drivers/crypto/padlock-sha.c 			memcpy(sctx->buffer + partial, data,
sctx              309 drivers/crypto/padlock-sha.c 			src = sctx->buffer;
sctx              328 drivers/crypto/padlock-sha.c 	memcpy((u8 *)(sctx->state), dst, SHA1_DIGEST_SIZE);
sctx              329 drivers/crypto/padlock-sha.c 	memcpy(sctx->buffer + partial, src, len - done);
sctx              359 drivers/crypto/padlock-sha.c 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx              361 drivers/crypto/padlock-sha.c 	*sctx = (struct sha256_state){
sctx              372 drivers/crypto/padlock-sha.c 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx              380 drivers/crypto/padlock-sha.c 	partial = sctx->count & 0x3f;
sctx              381 drivers/crypto/padlock-sha.c 	sctx->count += len;
sctx              384 drivers/crypto/padlock-sha.c 	memcpy(dst, (u8 *)(sctx->state), SHA256_DIGEST_SIZE);
sctx              391 drivers/crypto/padlock-sha.c 			memcpy(sctx->buf + partial, data,
sctx              393 drivers/crypto/padlock-sha.c 			src = sctx->buf;
sctx              412 drivers/crypto/padlock-sha.c 	memcpy((u8 *)(sctx->state), dst, SHA256_DIGEST_SIZE);
sctx              413 drivers/crypto/padlock-sha.c 	memcpy(sctx->buf + partial, src, len - done);
sctx              446 drivers/crypto/padlock-sha.c 	void *sctx = shash_desc_ctx(desc);
sctx              448 drivers/crypto/padlock-sha.c 	memcpy(out, sctx, statesize);
sctx              456 drivers/crypto/padlock-sha.c 	void *sctx = shash_desc_ctx(desc);
sctx              458 drivers/crypto/padlock-sha.c 	memcpy(sctx, in, statesize);
sctx               62 drivers/md/dm-switch.c 	struct switch_ctx *sctx;
sctx               64 drivers/md/dm-switch.c 	sctx = kzalloc(struct_size(sctx, path_list, nr_paths), GFP_KERNEL);
sctx               65 drivers/md/dm-switch.c 	if (!sctx)
sctx               68 drivers/md/dm-switch.c 	sctx->ti = ti;
sctx               69 drivers/md/dm-switch.c 	sctx->region_size = region_size;
sctx               71 drivers/md/dm-switch.c 	ti->private = sctx;
sctx               73 drivers/md/dm-switch.c 	return sctx;
sctx               78 drivers/md/dm-switch.c 	struct switch_ctx *sctx = ti->private;
sctx               82 drivers/md/dm-switch.c 	if (!(sctx->region_size & (sctx->region_size - 1)))
sctx               83 drivers/md/dm-switch.c 		sctx->region_size_bits = __ffs(sctx->region_size);
sctx               85 drivers/md/dm-switch.c 		sctx->region_size_bits = -1;
sctx               87 drivers/md/dm-switch.c 	sctx->region_table_entry_bits = 1;
sctx               88 drivers/md/dm-switch.c 	while (sctx->region_table_entry_bits < sizeof(region_table_slot_t) * 8 &&
sctx               89 drivers/md/dm-switch.c 	       (region_table_slot_t)1 << sctx->region_table_entry_bits < nr_paths)
sctx               90 drivers/md/dm-switch.c 		sctx->region_table_entry_bits++;
sctx               92 drivers/md/dm-switch.c 	sctx->region_entries_per_slot = (sizeof(region_table_slot_t) * 8) / sctx->region_table_entry_bits;
sctx               93 drivers/md/dm-switch.c 	if (!(sctx->region_entries_per_slot & (sctx->region_entries_per_slot - 1)))
sctx               94 drivers/md/dm-switch.c 		sctx->region_entries_per_slot_bits = __ffs(sctx->region_entries_per_slot);
sctx               96 drivers/md/dm-switch.c 		sctx->region_entries_per_slot_bits = -1;
sctx               98 drivers/md/dm-switch.c 	if (sector_div(nr_regions, sctx->region_size))
sctx              105 drivers/md/dm-switch.c 	sctx->nr_regions = nr_regions;
sctx              108 drivers/md/dm-switch.c 	if (sector_div(nr_slots, sctx->region_entries_per_slot))
sctx              116 drivers/md/dm-switch.c 	sctx->region_table = vmalloc(array_size(nr_slots,
sctx              118 drivers/md/dm-switch.c 	if (!sctx->region_table) {
sctx              126 drivers/md/dm-switch.c static void switch_get_position(struct switch_ctx *sctx, unsigned long region_nr,
sctx              129 drivers/md/dm-switch.c 	if (sctx->region_entries_per_slot_bits >= 0) {
sctx              130 drivers/md/dm-switch.c 		*region_index = region_nr >> sctx->region_entries_per_slot_bits;
sctx              131 drivers/md/dm-switch.c 		*bit = region_nr & (sctx->region_entries_per_slot - 1);
sctx              133 drivers/md/dm-switch.c 		*region_index = region_nr / sctx->region_entries_per_slot;
sctx              134 drivers/md/dm-switch.c 		*bit = region_nr % sctx->region_entries_per_slot;
sctx              137 drivers/md/dm-switch.c 	*bit *= sctx->region_table_entry_bits;
sctx              140 drivers/md/dm-switch.c static unsigned switch_region_table_read(struct switch_ctx *sctx, unsigned long region_nr)
sctx              145 drivers/md/dm-switch.c 	switch_get_position(sctx, region_nr, &region_index, &bit);
sctx              147 drivers/md/dm-switch.c 	return (READ_ONCE(sctx->region_table[region_index]) >> bit) &
sctx              148 drivers/md/dm-switch.c 		((1 << sctx->region_table_entry_bits) - 1);
sctx              154 drivers/md/dm-switch.c static unsigned switch_get_path_nr(struct switch_ctx *sctx, sector_t offset)
sctx              160 drivers/md/dm-switch.c 	if (sctx->region_size_bits >= 0)
sctx              161 drivers/md/dm-switch.c 		p >>= sctx->region_size_bits;
sctx              163 drivers/md/dm-switch.c 		sector_div(p, sctx->region_size);
sctx              165 drivers/md/dm-switch.c 	path_nr = switch_region_table_read(sctx, p);
sctx              168 drivers/md/dm-switch.c 	if (unlikely(path_nr >= sctx->nr_paths))
sctx              174 drivers/md/dm-switch.c static void switch_region_table_write(struct switch_ctx *sctx, unsigned long region_nr,
sctx              181 drivers/md/dm-switch.c 	switch_get_position(sctx, region_nr, &region_index, &bit);
sctx              183 drivers/md/dm-switch.c 	pte = sctx->region_table[region_index];
sctx              184 drivers/md/dm-switch.c 	pte &= ~((((region_table_slot_t)1 << sctx->region_table_entry_bits) - 1) << bit);
sctx              186 drivers/md/dm-switch.c 	sctx->region_table[region_index] = pte;
sctx              192 drivers/md/dm-switch.c static void initialise_region_table(struct switch_ctx *sctx)
sctx              197 drivers/md/dm-switch.c 	for (region_nr = 0; region_nr < sctx->nr_regions; region_nr++) {
sctx              198 drivers/md/dm-switch.c 		switch_region_table_write(sctx, region_nr, path_nr);
sctx              199 drivers/md/dm-switch.c 		if (++path_nr >= sctx->nr_paths)
sctx              206 drivers/md/dm-switch.c 	struct switch_ctx *sctx = ti->private;
sctx              211 drivers/md/dm-switch.c 			  &sctx->path_list[sctx->nr_paths].dmdev);
sctx              219 drivers/md/dm-switch.c 		dm_put_device(ti, sctx->path_list[sctx->nr_paths].dmdev);
sctx              223 drivers/md/dm-switch.c 	sctx->path_list[sctx->nr_paths].start = start;
sctx              225 drivers/md/dm-switch.c 	sctx->nr_paths++;
sctx              235 drivers/md/dm-switch.c 	struct switch_ctx *sctx = ti->private;
sctx              237 drivers/md/dm-switch.c 	while (sctx->nr_paths--)
sctx              238 drivers/md/dm-switch.c 		dm_put_device(ti, sctx->path_list[sctx->nr_paths].dmdev);
sctx              240 drivers/md/dm-switch.c 	vfree(sctx->region_table);
sctx              241 drivers/md/dm-switch.c 	kfree(sctx);
sctx              260 drivers/md/dm-switch.c 	struct switch_ctx *sctx;
sctx              286 drivers/md/dm-switch.c 	sctx = alloc_switch_ctx(ti, nr_paths, region_size);
sctx              287 drivers/md/dm-switch.c 	if (!sctx) {
sctx              306 drivers/md/dm-switch.c 	initialise_region_table(sctx);
sctx              321 drivers/md/dm-switch.c 	struct switch_ctx *sctx = ti->private;
sctx              323 drivers/md/dm-switch.c 	unsigned path_nr = switch_get_path_nr(sctx, offset);
sctx              325 drivers/md/dm-switch.c 	bio_set_dev(bio, sctx->path_list[path_nr].dmdev->bdev);
sctx              326 drivers/md/dm-switch.c 	bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset;
sctx              373 drivers/md/dm-switch.c static int process_set_region_mappings(struct switch_ctx *sctx,
sctx              413 drivers/md/dm-switch.c 			    unlikely(region_index + num_write >= sctx->nr_regions)) {
sctx              415 drivers/md/dm-switch.c 				       region_index, num_write, sctx->nr_regions);
sctx              421 drivers/md/dm-switch.c 				path_nr = switch_region_table_read(sctx, region_index - cycle_length);
sctx              422 drivers/md/dm-switch.c 				switch_region_table_write(sctx, region_index, path_nr);
sctx              449 drivers/md/dm-switch.c 		if (unlikely(region_index >= sctx->nr_regions)) {
sctx              450 drivers/md/dm-switch.c 			DMWARN("invalid set_region_mappings region number: %lu >= %lu", region_index, sctx->nr_regions);
sctx              453 drivers/md/dm-switch.c 		if (unlikely(path_nr >= sctx->nr_paths)) {
sctx              454 drivers/md/dm-switch.c 			DMWARN("invalid set_region_mappings device: %lu >= %u", path_nr, sctx->nr_paths);
sctx              458 drivers/md/dm-switch.c 		switch_region_table_write(sctx, region_index, path_nr);
sctx              474 drivers/md/dm-switch.c 	struct switch_ctx *sctx = ti->private;
sctx              480 drivers/md/dm-switch.c 		r = process_set_region_mappings(sctx, argc, argv);
sctx              492 drivers/md/dm-switch.c 	struct switch_ctx *sctx = ti->private;
sctx              502 drivers/md/dm-switch.c 		DMEMIT("%u %u 0", sctx->nr_paths, sctx->region_size);
sctx              503 drivers/md/dm-switch.c 		for (path_nr = 0; path_nr < sctx->nr_paths; path_nr++)
sctx              504 drivers/md/dm-switch.c 			DMEMIT(" %s %llu", sctx->path_list[path_nr].dmdev->name,
sctx              505 drivers/md/dm-switch.c 			       (unsigned long long)sctx->path_list[path_nr].start);
sctx              517 drivers/md/dm-switch.c 	struct switch_ctx *sctx = ti->private;
sctx              520 drivers/md/dm-switch.c 	path_nr = switch_get_path_nr(sctx, 0);
sctx              522 drivers/md/dm-switch.c 	*bdev = sctx->path_list[path_nr].dmdev->bdev;
sctx              527 drivers/md/dm-switch.c 	if (ti->len + sctx->path_list[path_nr].start !=
sctx              536 drivers/md/dm-switch.c 	struct switch_ctx *sctx = ti->private;
sctx              540 drivers/md/dm-switch.c 	for (path_nr = 0; path_nr < sctx->nr_paths; path_nr++) {
sctx              541 drivers/md/dm-switch.c 		r = fn(ti, sctx->path_list[path_nr].dmdev,
sctx              542 drivers/md/dm-switch.c 			 sctx->path_list[path_nr].start, ti->len, data);
sctx              197 drivers/misc/fastrpc.c 	struct fastrpc_session_ctx *sctx;
sctx              288 drivers/misc/fastrpc.c 	if (fl->sctx && fl->sctx->sid)
sctx              289 drivers/misc/fastrpc.c 		buf->phys += ((u64)fl->sctx->sid << 32);
sctx              596 drivers/misc/fastrpc.c 	struct fastrpc_session_ctx *sess = fl->sctx;
sctx              630 drivers/misc/fastrpc.c 	map->phys += ((u64)fl->sctx->sid << 32);
sctx              714 drivers/misc/fastrpc.c 	struct device *dev = ctx->fl->sctx->dev;
sctx              739 drivers/misc/fastrpc.c 	struct device *dev = ctx->fl->sctx->dev;
sctx              882 drivers/misc/fastrpc.c static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
sctx              914 drivers/misc/fastrpc.c 	if (!fl->sctx)
sctx              933 drivers/misc/fastrpc.c 	err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle);
sctx              964 drivers/misc/fastrpc.c 		dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err);
sctx             1019 drivers/misc/fastrpc.c 	err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen,
sctx             1155 drivers/misc/fastrpc.c 	fastrpc_session_free(cctx, fl->sctx);
sctx             1187 drivers/misc/fastrpc.c 	fl->sctx = fastrpc_session_alloc(cctx);
sctx             1188 drivers/misc/fastrpc.c 	if (!fl->sctx) {
sctx             1213 drivers/misc/fastrpc.c 	err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf);
sctx              237 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	struct submit_ctx sctx;
sctx              242 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	rtw_sctx_init(&sctx, timeout_ms);
sctx              243 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 	pxmitbuf->sctx = &sctx;
sctx              248 drivers/staging/rtl8188eu/core/rtw_mlme_ext.c 		ret = rtw_sctx_wait(&sctx);
sctx             1153 drivers/staging/rtl8188eu/core/rtw_xmit.c 		if (pxmitbuf->sctx) {
sctx             1155 drivers/staging/rtl8188eu/core/rtw_xmit.c 			rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
sctx             1196 drivers/staging/rtl8188eu/core/rtw_xmit.c 		if (pxmitbuf->sctx) {
sctx             1198 drivers/staging/rtl8188eu/core/rtw_xmit.c 			rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
sctx             1214 drivers/staging/rtl8188eu/core/rtw_xmit.c 	if (pxmitbuf->sctx) {
sctx             1216 drivers/staging/rtl8188eu/core/rtw_xmit.c 		rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_FREE);
sctx             2007 drivers/staging/rtl8188eu/core/rtw_xmit.c void rtw_sctx_init(struct submit_ctx *sctx, int timeout_ms)
sctx             2009 drivers/staging/rtl8188eu/core/rtw_xmit.c 	sctx->timeout_ms = timeout_ms;
sctx             2010 drivers/staging/rtl8188eu/core/rtw_xmit.c 	sctx->submit_time = jiffies;
sctx             2011 drivers/staging/rtl8188eu/core/rtw_xmit.c 	init_completion(&sctx->done);
sctx             2012 drivers/staging/rtl8188eu/core/rtw_xmit.c 	sctx->status = RTW_SCTX_SUBMITTED;
sctx             2015 drivers/staging/rtl8188eu/core/rtw_xmit.c int rtw_sctx_wait(struct submit_ctx *sctx)
sctx             2021 drivers/staging/rtl8188eu/core/rtw_xmit.c 	expire = sctx->timeout_ms ? msecs_to_jiffies(sctx->timeout_ms) : MAX_SCHEDULE_TIMEOUT;
sctx             2022 drivers/staging/rtl8188eu/core/rtw_xmit.c 	if (!wait_for_completion_timeout(&sctx->done, expire)) {
sctx             2027 drivers/staging/rtl8188eu/core/rtw_xmit.c 		status = sctx->status;
sctx             2051 drivers/staging/rtl8188eu/core/rtw_xmit.c void rtw_sctx_done_err(struct submit_ctx **sctx, int status)
sctx             2053 drivers/staging/rtl8188eu/core/rtw_xmit.c 	if (*sctx) {
sctx             2056 drivers/staging/rtl8188eu/core/rtw_xmit.c 		(*sctx)->status = status;
sctx             2057 drivers/staging/rtl8188eu/core/rtw_xmit.c 		complete(&((*sctx)->done));
sctx             2058 drivers/staging/rtl8188eu/core/rtw_xmit.c 		*sctx = NULL;
sctx              392 drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c 		rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_UNKNOWN);
sctx              185 drivers/staging/rtl8188eu/include/rtw_xmit.h void rtw_sctx_init(struct submit_ctx *sctx, int timeout_ms);
sctx              186 drivers/staging/rtl8188eu/include/rtw_xmit.h int rtw_sctx_wait(struct submit_ctx *sctx);
sctx              187 drivers/staging/rtl8188eu/include/rtw_xmit.h void rtw_sctx_done_err(struct submit_ctx **sctx, int status);
sctx              199 drivers/staging/rtl8188eu/include/rtw_xmit.h 	struct submit_ctx *sctx;
sctx              647 drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c 	rtw_sctx_done_err(&pxmitbuf->sctx,
sctx              675 drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c 		rtw_sctx_done_err(&xmitbuf->sctx, RTW_SCTX_DONE_TX_DENY);
sctx              721 drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c 		rtw_sctx_done_err(&xmitbuf->sctx, RTW_SCTX_DONE_WRITE_PORT_ERR);
sctx              496 drivers/staging/rtl8723bs/core/rtw_cmd.c 			if (pcmd->sctx) {
sctx              501 drivers/staging/rtl8723bs/core/rtw_cmd.c 					rtw_sctx_done(&pcmd->sctx);
sctx              503 drivers/staging/rtl8723bs/core/rtw_cmd.c 					rtw_sctx_done_err(&pcmd->sctx, RTW_SCTX_DONE_CMD_ERROR);
sctx              726 drivers/staging/rtl8723bs/core/rtw_cmd.c 	struct submit_ctx sctx;
sctx              748 drivers/staging/rtl8723bs/core/rtw_cmd.c 			pcmd->sctx = &sctx;
sctx              749 drivers/staging/rtl8723bs/core/rtw_cmd.c 			rtw_sctx_init(&sctx, 2000);
sctx              755 drivers/staging/rtl8723bs/core/rtw_cmd.c 			rtw_sctx_wait(&sctx, __func__);
sctx              757 drivers/staging/rtl8723bs/core/rtw_cmd.c 				if (sctx.status == RTW_SCTX_SUBMITTED)
sctx              758 drivers/staging/rtl8723bs/core/rtw_cmd.c 					pcmd->sctx = NULL;
sctx             2348 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 	struct submit_ctx sctx;
sctx             2357 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 	rtw_sctx_init(&sctx, timeout_ms);
sctx             2358 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 	pxmitbuf->sctx = &sctx;
sctx             2363 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 		ret = rtw_sctx_wait(&sctx, __func__);
sctx             2366 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c 	pxmitbuf->sctx = NULL;
sctx             1631 drivers/staging/rtl8723bs/core/rtw_xmit.c 		if (pxmitbuf->sctx) {
sctx             1633 drivers/staging/rtl8723bs/core/rtw_xmit.c 			rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
sctx             1707 drivers/staging/rtl8723bs/core/rtw_xmit.c 		if (pxmitbuf->sctx) {
sctx             1709 drivers/staging/rtl8723bs/core/rtw_xmit.c 			rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
sctx             1780 drivers/staging/rtl8723bs/core/rtw_xmit.c 		if (pxmitbuf->sctx) {
sctx             1782 drivers/staging/rtl8723bs/core/rtw_xmit.c 			rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
sctx             1805 drivers/staging/rtl8723bs/core/rtw_xmit.c 	if (pxmitbuf->sctx) {
sctx             1807 drivers/staging/rtl8723bs/core/rtw_xmit.c 		rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_FREE);
sctx             3002 drivers/staging/rtl8723bs/core/rtw_xmit.c void rtw_sctx_init(struct submit_ctx *sctx, int timeout_ms)
sctx             3004 drivers/staging/rtl8723bs/core/rtw_xmit.c 	sctx->timeout_ms = timeout_ms;
sctx             3005 drivers/staging/rtl8723bs/core/rtw_xmit.c 	sctx->submit_time = jiffies;
sctx             3006 drivers/staging/rtl8723bs/core/rtw_xmit.c 	init_completion(&sctx->done);
sctx             3007 drivers/staging/rtl8723bs/core/rtw_xmit.c 	sctx->status = RTW_SCTX_SUBMITTED;
sctx             3010 drivers/staging/rtl8723bs/core/rtw_xmit.c int rtw_sctx_wait(struct submit_ctx *sctx, const char *msg)
sctx             3016 drivers/staging/rtl8723bs/core/rtw_xmit.c 	expire = sctx->timeout_ms ? msecs_to_jiffies(sctx->timeout_ms) : MAX_SCHEDULE_TIMEOUT;
sctx             3017 drivers/staging/rtl8723bs/core/rtw_xmit.c 	if (!wait_for_completion_timeout(&sctx->done, expire)) {
sctx             3022 drivers/staging/rtl8723bs/core/rtw_xmit.c 		status = sctx->status;
sctx             3047 drivers/staging/rtl8723bs/core/rtw_xmit.c void rtw_sctx_done_err(struct submit_ctx **sctx, int status)
sctx             3049 drivers/staging/rtl8723bs/core/rtw_xmit.c 	if (*sctx) {
sctx             3052 drivers/staging/rtl8723bs/core/rtw_xmit.c 		(*sctx)->status = status;
sctx             3053 drivers/staging/rtl8723bs/core/rtw_xmit.c 		complete(&((*sctx)->done));
sctx             3054 drivers/staging/rtl8723bs/core/rtw_xmit.c 		*sctx = NULL;
sctx             3058 drivers/staging/rtl8723bs/core/rtw_xmit.c void rtw_sctx_done(struct submit_ctx **sctx)
sctx             3060 drivers/staging/rtl8723bs/core/rtw_xmit.c 	rtw_sctx_done_err(sctx, RTW_SCTX_DONE_SUCCESS);
sctx              537 drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c 			rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_WRITE_PORT_ERR);
sctx              498 drivers/staging/rtl8723bs/hal/sdio_ops.c 		&xmitbuf->sctx,
sctx               30 drivers/staging/rtl8723bs/include/rtw_cmd.h 		struct submit_ctx *sctx;
sctx              244 drivers/staging/rtl8723bs/include/rtw_xmit.h void rtw_sctx_init(struct submit_ctx *sctx, int timeout_ms);
sctx              245 drivers/staging/rtl8723bs/include/rtw_xmit.h int rtw_sctx_wait(struct submit_ctx *sctx, const char *msg);
sctx              246 drivers/staging/rtl8723bs/include/rtw_xmit.h void rtw_sctx_done_err(struct submit_ctx **sctx, int status);
sctx              247 drivers/staging/rtl8723bs/include/rtw_xmit.h void rtw_sctx_done(struct submit_ctx **sctx);
sctx              267 drivers/staging/rtl8723bs/include/rtw_xmit.h 	struct submit_ctx *sctx;
sctx               85 fs/btrfs/scrub.c 	struct scrub_ctx	*sctx;
sctx              106 fs/btrfs/scrub.c 	struct scrub_ctx	*sctx;
sctx              123 fs/btrfs/scrub.c 	struct scrub_ctx	*sctx;
sctx              209 fs/btrfs/scrub.c static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
sctx              210 fs/btrfs/scrub.c static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
sctx              235 fs/btrfs/scrub.c static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
sctx              237 fs/btrfs/scrub.c static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
sctx              249 fs/btrfs/scrub.c static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
sctx              251 fs/btrfs/scrub.c static void scrub_wr_submit(struct scrub_ctx *sctx);
sctx              256 fs/btrfs/scrub.c static void scrub_put_ctx(struct scrub_ctx *sctx);
sctx              264 fs/btrfs/scrub.c static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
sctx              266 fs/btrfs/scrub.c 	refcount_inc(&sctx->refs);
sctx              267 fs/btrfs/scrub.c 	atomic_inc(&sctx->bios_in_flight);
sctx              270 fs/btrfs/scrub.c static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
sctx              272 fs/btrfs/scrub.c 	atomic_dec(&sctx->bios_in_flight);
sctx              273 fs/btrfs/scrub.c 	wake_up(&sctx->list_wait);
sctx              274 fs/btrfs/scrub.c 	scrub_put_ctx(sctx);
sctx              527 fs/btrfs/scrub.c static void scrub_free_csums(struct scrub_ctx *sctx)
sctx              529 fs/btrfs/scrub.c 	while (!list_empty(&sctx->csum_list)) {
sctx              531 fs/btrfs/scrub.c 		sum = list_first_entry(&sctx->csum_list,
sctx              538 fs/btrfs/scrub.c static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
sctx              542 fs/btrfs/scrub.c 	if (!sctx)
sctx              546 fs/btrfs/scrub.c 	if (sctx->curr != -1) {
sctx              547 fs/btrfs/scrub.c 		struct scrub_bio *sbio = sctx->bios[sctx->curr];
sctx              557 fs/btrfs/scrub.c 		struct scrub_bio *sbio = sctx->bios[i];
sctx              564 fs/btrfs/scrub.c 	kfree(sctx->wr_curr_bio);
sctx              565 fs/btrfs/scrub.c 	scrub_free_csums(sctx);
sctx              566 fs/btrfs/scrub.c 	kfree(sctx);
sctx              569 fs/btrfs/scrub.c static void scrub_put_ctx(struct scrub_ctx *sctx)
sctx              571 fs/btrfs/scrub.c 	if (refcount_dec_and_test(&sctx->refs))
sctx              572 fs/btrfs/scrub.c 		scrub_free_ctx(sctx);
sctx              578 fs/btrfs/scrub.c 	struct scrub_ctx *sctx;
sctx              581 fs/btrfs/scrub.c 	sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
sctx              582 fs/btrfs/scrub.c 	if (!sctx)
sctx              584 fs/btrfs/scrub.c 	refcount_set(&sctx->refs, 1);
sctx              585 fs/btrfs/scrub.c 	sctx->is_dev_replace = is_dev_replace;
sctx              586 fs/btrfs/scrub.c 	sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
sctx              587 fs/btrfs/scrub.c 	sctx->curr = -1;
sctx              588 fs/btrfs/scrub.c 	sctx->fs_info = fs_info;
sctx              589 fs/btrfs/scrub.c 	INIT_LIST_HEAD(&sctx->csum_list);
sctx              596 fs/btrfs/scrub.c 		sctx->bios[i] = sbio;
sctx              599 fs/btrfs/scrub.c 		sbio->sctx = sctx;
sctx              605 fs/btrfs/scrub.c 			sctx->bios[i]->next_free = i + 1;
sctx              607 fs/btrfs/scrub.c 			sctx->bios[i]->next_free = -1;
sctx              609 fs/btrfs/scrub.c 	sctx->first_free = 0;
sctx              610 fs/btrfs/scrub.c 	atomic_set(&sctx->bios_in_flight, 0);
sctx              611 fs/btrfs/scrub.c 	atomic_set(&sctx->workers_pending, 0);
sctx              612 fs/btrfs/scrub.c 	atomic_set(&sctx->cancel_req, 0);
sctx              613 fs/btrfs/scrub.c 	sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
sctx              615 fs/btrfs/scrub.c 	spin_lock_init(&sctx->list_lock);
sctx              616 fs/btrfs/scrub.c 	spin_lock_init(&sctx->stat_lock);
sctx              617 fs/btrfs/scrub.c 	init_waitqueue_head(&sctx->list_wait);
sctx              619 fs/btrfs/scrub.c 	WARN_ON(sctx->wr_curr_bio != NULL);
sctx              620 fs/btrfs/scrub.c 	mutex_init(&sctx->wr_lock);
sctx              621 fs/btrfs/scrub.c 	sctx->wr_curr_bio = NULL;
sctx              624 fs/btrfs/scrub.c 		sctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
sctx              625 fs/btrfs/scrub.c 		sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
sctx              626 fs/btrfs/scrub.c 		sctx->flush_all_writes = false;
sctx              629 fs/btrfs/scrub.c 	return sctx;
sctx              632 fs/btrfs/scrub.c 	scrub_free_ctx(sctx);
sctx              748 fs/btrfs/scrub.c 	fs_info = sblock->sctx->fs_info;
sctx              824 fs/btrfs/scrub.c 	struct scrub_ctx *sctx = sblock_to_check->sctx;
sctx              843 fs/btrfs/scrub.c 	fs_info = sctx->fs_info;
sctx              850 fs/btrfs/scrub.c 		spin_lock(&sctx->stat_lock);
sctx              851 fs/btrfs/scrub.c 		++sctx->stat.super_errors;
sctx              852 fs/btrfs/scrub.c 		spin_unlock(&sctx->stat_lock);
sctx              883 fs/btrfs/scrub.c 		spin_lock(&sctx->stat_lock);
sctx              885 fs/btrfs/scrub.c 			sctx->stat.malloc_errors++;
sctx              886 fs/btrfs/scrub.c 		sctx->stat.read_errors++;
sctx              887 fs/btrfs/scrub.c 		sctx->stat.uncorrectable_errors++;
sctx              888 fs/btrfs/scrub.c 		spin_unlock(&sctx->stat_lock);
sctx              924 fs/btrfs/scrub.c 		spin_lock(&sctx->stat_lock);
sctx              925 fs/btrfs/scrub.c 		sctx->stat.malloc_errors++;
sctx              926 fs/btrfs/scrub.c 		sctx->stat.read_errors++;
sctx              927 fs/btrfs/scrub.c 		sctx->stat.uncorrectable_errors++;
sctx              928 fs/btrfs/scrub.c 		spin_unlock(&sctx->stat_lock);
sctx              936 fs/btrfs/scrub.c 		spin_lock(&sctx->stat_lock);
sctx              937 fs/btrfs/scrub.c 		sctx->stat.read_errors++;
sctx              938 fs/btrfs/scrub.c 		sctx->stat.uncorrectable_errors++;
sctx              939 fs/btrfs/scrub.c 		spin_unlock(&sctx->stat_lock);
sctx              959 fs/btrfs/scrub.c 		spin_lock(&sctx->stat_lock);
sctx              960 fs/btrfs/scrub.c 		sctx->stat.unverified_errors++;
sctx              962 fs/btrfs/scrub.c 		spin_unlock(&sctx->stat_lock);
sctx              964 fs/btrfs/scrub.c 		if (sctx->is_dev_replace)
sctx              970 fs/btrfs/scrub.c 		spin_lock(&sctx->stat_lock);
sctx              971 fs/btrfs/scrub.c 		sctx->stat.read_errors++;
sctx              972 fs/btrfs/scrub.c 		spin_unlock(&sctx->stat_lock);
sctx              977 fs/btrfs/scrub.c 		spin_lock(&sctx->stat_lock);
sctx              978 fs/btrfs/scrub.c 		sctx->stat.csum_errors++;
sctx              979 fs/btrfs/scrub.c 		spin_unlock(&sctx->stat_lock);
sctx              985 fs/btrfs/scrub.c 		spin_lock(&sctx->stat_lock);
sctx              986 fs/btrfs/scrub.c 		sctx->stat.verify_errors++;
sctx              987 fs/btrfs/scrub.c 		spin_unlock(&sctx->stat_lock);
sctx              999 fs/btrfs/scrub.c 	if (sctx->readonly) {
sctx             1000 fs/btrfs/scrub.c 		ASSERT(!sctx->is_dev_replace);
sctx             1054 fs/btrfs/scrub.c 			if (sctx->is_dev_replace) {
sctx             1066 fs/btrfs/scrub.c 	if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
sctx             1100 fs/btrfs/scrub.c 		if (!page_bad->io_error && !sctx->is_dev_replace)
sctx             1129 fs/btrfs/scrub.c 		if (sctx->is_dev_replace) {
sctx             1157 fs/btrfs/scrub.c 	if (success && !sctx->is_dev_replace) {
sctx             1177 fs/btrfs/scrub.c 			spin_lock(&sctx->stat_lock);
sctx             1178 fs/btrfs/scrub.c 			sctx->stat.corrected_errors++;
sctx             1180 fs/btrfs/scrub.c 			spin_unlock(&sctx->stat_lock);
sctx             1187 fs/btrfs/scrub.c 		spin_lock(&sctx->stat_lock);
sctx             1188 fs/btrfs/scrub.c 		sctx->stat.uncorrectable_errors++;
sctx             1189 fs/btrfs/scrub.c 		spin_unlock(&sctx->stat_lock);
sctx             1269 fs/btrfs/scrub.c 	struct scrub_ctx *sctx = original_sblock->sctx;
sctx             1270 fs/btrfs/scrub.c 	struct btrfs_fs_info *fs_info = sctx->fs_info;
sctx             1332 fs/btrfs/scrub.c 			sblock->sctx = sctx;
sctx             1337 fs/btrfs/scrub.c 				spin_lock(&sctx->stat_lock);
sctx             1338 fs/btrfs/scrub.c 				sctx->stat.malloc_errors++;
sctx             1339 fs/btrfs/scrub.c 				spin_unlock(&sctx->stat_lock);
sctx             1353 fs/btrfs/scrub.c 				       sctx->csum_size);
sctx             1554 fs/btrfs/scrub.c 	struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
sctx             1595 fs/btrfs/scrub.c 	struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
sctx             1627 fs/btrfs/scrub.c 	return scrub_add_page_to_wr_bio(sblock->sctx, spage);
sctx             1630 fs/btrfs/scrub.c static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
sctx             1636 fs/btrfs/scrub.c 	mutex_lock(&sctx->wr_lock);
sctx             1638 fs/btrfs/scrub.c 	if (!sctx->wr_curr_bio) {
sctx             1639 fs/btrfs/scrub.c 		sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
sctx             1641 fs/btrfs/scrub.c 		if (!sctx->wr_curr_bio) {
sctx             1642 fs/btrfs/scrub.c 			mutex_unlock(&sctx->wr_lock);
sctx             1645 fs/btrfs/scrub.c 		sctx->wr_curr_bio->sctx = sctx;
sctx             1646 fs/btrfs/scrub.c 		sctx->wr_curr_bio->page_count = 0;
sctx             1648 fs/btrfs/scrub.c 	sbio = sctx->wr_curr_bio;
sctx             1654 fs/btrfs/scrub.c 		sbio->dev = sctx->wr_tgtdev;
sctx             1657 fs/btrfs/scrub.c 			bio = btrfs_io_bio_alloc(sctx->pages_per_wr_bio);
sctx             1671 fs/btrfs/scrub.c 		scrub_wr_submit(sctx);
sctx             1680 fs/btrfs/scrub.c 			mutex_unlock(&sctx->wr_lock);
sctx             1683 fs/btrfs/scrub.c 		scrub_wr_submit(sctx);
sctx             1690 fs/btrfs/scrub.c 	if (sbio->page_count == sctx->pages_per_wr_bio)
sctx             1691 fs/btrfs/scrub.c 		scrub_wr_submit(sctx);
sctx             1692 fs/btrfs/scrub.c 	mutex_unlock(&sctx->wr_lock);
sctx             1697 fs/btrfs/scrub.c static void scrub_wr_submit(struct scrub_ctx *sctx)
sctx             1701 fs/btrfs/scrub.c 	if (!sctx->wr_curr_bio)
sctx             1704 fs/btrfs/scrub.c 	sbio = sctx->wr_curr_bio;
sctx             1705 fs/btrfs/scrub.c 	sctx->wr_curr_bio = NULL;
sctx             1707 fs/btrfs/scrub.c 	scrub_pending_bio_inc(sctx);
sctx             1730 fs/btrfs/scrub.c 	struct scrub_ctx *sctx = sbio->sctx;
sctx             1736 fs/btrfs/scrub.c 			&sbio->sctx->fs_info->dev_replace;
sctx             1751 fs/btrfs/scrub.c 	scrub_pending_bio_dec(sctx);
sctx             1790 fs/btrfs/scrub.c 	struct scrub_ctx *sctx = sblock->sctx;
sctx             1791 fs/btrfs/scrub.c 	struct btrfs_fs_info *fs_info = sctx->fs_info;
sctx             1811 fs/btrfs/scrub.c 	len = sctx->fs_info->sectorsize;
sctx             1829 fs/btrfs/scrub.c 	if (memcmp(csum, on_disk_csum, sctx->csum_size))
sctx             1837 fs/btrfs/scrub.c 	struct scrub_ctx *sctx = sblock->sctx;
sctx             1839 fs/btrfs/scrub.c 	struct btrfs_fs_info *fs_info = sctx->fs_info;
sctx             1857 fs/btrfs/scrub.c 	memcpy(on_disk_csum, h->csum, sctx->csum_size);
sctx             1879 fs/btrfs/scrub.c 	len = sctx->fs_info->nodesize - BTRFS_CSUM_SIZE;
sctx             1901 fs/btrfs/scrub.c 	if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
sctx             1910 fs/btrfs/scrub.c 	struct scrub_ctx *sctx = sblock->sctx;
sctx             1911 fs/btrfs/scrub.c 	struct btrfs_fs_info *fs_info = sctx->fs_info;
sctx             1931 fs/btrfs/scrub.c 	memcpy(on_disk_csum, s->csum, sctx->csum_size);
sctx             1964 fs/btrfs/scrub.c 	if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
sctx             1973 fs/btrfs/scrub.c 		spin_lock(&sctx->stat_lock);
sctx             1974 fs/btrfs/scrub.c 		++sctx->stat.super_errors;
sctx             1975 fs/btrfs/scrub.c 		spin_unlock(&sctx->stat_lock);
sctx             2020 fs/btrfs/scrub.c static void scrub_submit(struct scrub_ctx *sctx)
sctx             2024 fs/btrfs/scrub.c 	if (sctx->curr == -1)
sctx             2027 fs/btrfs/scrub.c 	sbio = sctx->bios[sctx->curr];
sctx             2028 fs/btrfs/scrub.c 	sctx->curr = -1;
sctx             2029 fs/btrfs/scrub.c 	scrub_pending_bio_inc(sctx);
sctx             2033 fs/btrfs/scrub.c static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
sctx             2044 fs/btrfs/scrub.c 	while (sctx->curr == -1) {
sctx             2045 fs/btrfs/scrub.c 		spin_lock(&sctx->list_lock);
sctx             2046 fs/btrfs/scrub.c 		sctx->curr = sctx->first_free;
sctx             2047 fs/btrfs/scrub.c 		if (sctx->curr != -1) {
sctx             2048 fs/btrfs/scrub.c 			sctx->first_free = sctx->bios[sctx->curr]->next_free;
sctx             2049 fs/btrfs/scrub.c 			sctx->bios[sctx->curr]->next_free = -1;
sctx             2050 fs/btrfs/scrub.c 			sctx->bios[sctx->curr]->page_count = 0;
sctx             2051 fs/btrfs/scrub.c 			spin_unlock(&sctx->list_lock);
sctx             2053 fs/btrfs/scrub.c 			spin_unlock(&sctx->list_lock);
sctx             2054 fs/btrfs/scrub.c 			wait_event(sctx->list_wait, sctx->first_free != -1);
sctx             2057 fs/btrfs/scrub.c 	sbio = sctx->bios[sctx->curr];
sctx             2066 fs/btrfs/scrub.c 			bio = btrfs_io_bio_alloc(sctx->pages_per_rd_bio);
sctx             2081 fs/btrfs/scrub.c 		scrub_submit(sctx);
sctx             2093 fs/btrfs/scrub.c 		scrub_submit(sctx);
sctx             2100 fs/btrfs/scrub.c 	if (sbio->page_count == sctx->pages_per_rd_bio)
sctx             2101 fs/btrfs/scrub.c 		scrub_submit(sctx);
sctx             2109 fs/btrfs/scrub.c 	struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
sctx             2122 fs/btrfs/scrub.c 	struct scrub_ctx *sctx = sblock->sctx;
sctx             2123 fs/btrfs/scrub.c 	struct btrfs_fs_info *fs_info = sctx->fs_info;
sctx             2134 fs/btrfs/scrub.c 		spin_lock(&sctx->stat_lock);
sctx             2135 fs/btrfs/scrub.c 		sctx->stat.read_errors++;
sctx             2136 fs/btrfs/scrub.c 		spin_unlock(&sctx->stat_lock);
sctx             2141 fs/btrfs/scrub.c 		spin_lock(&sctx->stat_lock);
sctx             2142 fs/btrfs/scrub.c 		sctx->stat.uncorrectable_errors++;
sctx             2143 fs/btrfs/scrub.c 		spin_unlock(&sctx->stat_lock);
sctx             2151 fs/btrfs/scrub.c 	if (sctx->is_dev_replace && sctx->flush_all_writes) {
sctx             2152 fs/btrfs/scrub.c 		mutex_lock(&sctx->wr_lock);
sctx             2153 fs/btrfs/scrub.c 		scrub_wr_submit(sctx);
sctx             2154 fs/btrfs/scrub.c 		mutex_unlock(&sctx->wr_lock);
sctx             2158 fs/btrfs/scrub.c 	scrub_pending_bio_dec(sctx);
sctx             2163 fs/btrfs/scrub.c 	struct scrub_ctx *sctx = sblock->sctx;
sctx             2164 fs/btrfs/scrub.c 	struct btrfs_fs_info *fs_info = sctx->fs_info;
sctx             2179 fs/btrfs/scrub.c 	if (WARN_ON(!sctx->is_dev_replace ||
sctx             2207 fs/btrfs/scrub.c 	scrub_pending_bio_inc(sctx);
sctx             2216 fs/btrfs/scrub.c 	spin_lock(&sctx->stat_lock);
sctx             2217 fs/btrfs/scrub.c 	sctx->stat.malloc_errors++;
sctx             2218 fs/btrfs/scrub.c 	spin_unlock(&sctx->stat_lock);
sctx             2221 fs/btrfs/scrub.c static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
sctx             2231 fs/btrfs/scrub.c 		spin_lock(&sctx->stat_lock);
sctx             2232 fs/btrfs/scrub.c 		sctx->stat.malloc_errors++;
sctx             2233 fs/btrfs/scrub.c 		spin_unlock(&sctx->stat_lock);
sctx             2240 fs/btrfs/scrub.c 	sblock->sctx = sctx;
sctx             2250 fs/btrfs/scrub.c 			spin_lock(&sctx->stat_lock);
sctx             2251 fs/btrfs/scrub.c 			sctx->stat.malloc_errors++;
sctx             2252 fs/btrfs/scrub.c 			spin_unlock(&sctx->stat_lock);
sctx             2269 fs/btrfs/scrub.c 			memcpy(spage->csum, csum, sctx->csum_size);
sctx             2295 fs/btrfs/scrub.c 			ret = scrub_add_page_to_rd_bio(sctx, spage);
sctx             2303 fs/btrfs/scrub.c 			scrub_submit(sctx);
sctx             2325 fs/btrfs/scrub.c 	struct scrub_ctx *sctx = sbio->sctx;
sctx             2350 fs/btrfs/scrub.c 	spin_lock(&sctx->list_lock);
sctx             2351 fs/btrfs/scrub.c 	sbio->next_free = sctx->first_free;
sctx             2352 fs/btrfs/scrub.c 	sctx->first_free = sbio->index;
sctx             2353 fs/btrfs/scrub.c 	spin_unlock(&sctx->list_lock);
sctx             2355 fs/btrfs/scrub.c 	if (sctx->is_dev_replace && sctx->flush_all_writes) {
sctx             2356 fs/btrfs/scrub.c 		mutex_lock(&sctx->wr_lock);
sctx             2357 fs/btrfs/scrub.c 		scrub_wr_submit(sctx);
sctx             2358 fs/btrfs/scrub.c 		mutex_unlock(&sctx->wr_lock);
sctx             2361 fs/btrfs/scrub.c 	scrub_pending_bio_dec(sctx);
sctx             2371 fs/btrfs/scrub.c 	int sectorsize = sparity->sctx->fs_info->sectorsize;
sctx             2421 fs/btrfs/scrub.c 		if (!corrupted && sblock->sctx->is_dev_replace)
sctx             2435 fs/btrfs/scrub.c static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
sctx             2441 fs/btrfs/scrub.c 	while (!list_empty(&sctx->csum_list)) {
sctx             2442 fs/btrfs/scrub.c 		sum = list_first_entry(&sctx->csum_list,
sctx             2449 fs/btrfs/scrub.c 		++sctx->stat.csum_discards;
sctx             2457 fs/btrfs/scrub.c 	index = div_u64(logical - sum->bytenr, sctx->fs_info->sectorsize);
sctx             2460 fs/btrfs/scrub.c 	num_sectors = sum->len / sctx->fs_info->sectorsize;
sctx             2461 fs/btrfs/scrub.c 	memcpy(csum, sum->sums + index * sctx->csum_size, sctx->csum_size);
sctx             2470 fs/btrfs/scrub.c static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map,
sctx             2483 fs/btrfs/scrub.c 			blocksize = sctx->fs_info->sectorsize;
sctx             2484 fs/btrfs/scrub.c 		spin_lock(&sctx->stat_lock);
sctx             2485 fs/btrfs/scrub.c 		sctx->stat.data_extents_scrubbed++;
sctx             2486 fs/btrfs/scrub.c 		sctx->stat.data_bytes_scrubbed += len;
sctx             2487 fs/btrfs/scrub.c 		spin_unlock(&sctx->stat_lock);
sctx             2492 fs/btrfs/scrub.c 			blocksize = sctx->fs_info->nodesize;
sctx             2493 fs/btrfs/scrub.c 		spin_lock(&sctx->stat_lock);
sctx             2494 fs/btrfs/scrub.c 		sctx->stat.tree_extents_scrubbed++;
sctx             2495 fs/btrfs/scrub.c 		sctx->stat.tree_bytes_scrubbed += len;
sctx             2496 fs/btrfs/scrub.c 		spin_unlock(&sctx->stat_lock);
sctx             2498 fs/btrfs/scrub.c 		blocksize = sctx->fs_info->sectorsize;
sctx             2508 fs/btrfs/scrub.c 			have_csum = scrub_find_csum(sctx, logical, csum);
sctx             2510 fs/btrfs/scrub.c 				++sctx->stat.no_csum;
sctx             2512 fs/btrfs/scrub.c 		ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
sctx             2530 fs/btrfs/scrub.c 	struct scrub_ctx *sctx = sparity->sctx;
sctx             2536 fs/btrfs/scrub.c 		spin_lock(&sctx->stat_lock);
sctx             2537 fs/btrfs/scrub.c 		sctx->stat.malloc_errors++;
sctx             2538 fs/btrfs/scrub.c 		spin_unlock(&sctx->stat_lock);
sctx             2545 fs/btrfs/scrub.c 	sblock->sctx = sctx;
sctx             2557 fs/btrfs/scrub.c 			spin_lock(&sctx->stat_lock);
sctx             2558 fs/btrfs/scrub.c 			sctx->stat.malloc_errors++;
sctx             2559 fs/btrfs/scrub.c 			spin_unlock(&sctx->stat_lock);
sctx             2579 fs/btrfs/scrub.c 			memcpy(spage->csum, csum, sctx->csum_size);
sctx             2597 fs/btrfs/scrub.c 		ret = scrub_add_page_to_rd_bio(sctx, spage);
sctx             2614 fs/btrfs/scrub.c 	struct scrub_ctx *sctx = sparity->sctx;
sctx             2629 fs/btrfs/scrub.c 		blocksize = sctx->fs_info->sectorsize;
sctx             2639 fs/btrfs/scrub.c 			have_csum = scrub_find_csum(sctx, logical, csum);
sctx             2702 fs/btrfs/scrub.c 	struct scrub_ctx *sctx = sparity->sctx;
sctx             2708 fs/btrfs/scrub.c 		spin_lock(&sctx->stat_lock);
sctx             2709 fs/btrfs/scrub.c 		sctx->stat.read_errors += nbits;
sctx             2710 fs/btrfs/scrub.c 		sctx->stat.uncorrectable_errors += nbits;
sctx             2711 fs/btrfs/scrub.c 		spin_unlock(&sctx->stat_lock);
sctx             2726 fs/btrfs/scrub.c 	struct scrub_ctx *sctx = sparity->sctx;
sctx             2729 fs/btrfs/scrub.c 	scrub_pending_bio_dec(sctx);
sctx             2735 fs/btrfs/scrub.c 	struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
sctx             2750 fs/btrfs/scrub.c 	struct scrub_ctx *sctx = sparity->sctx;
sctx             2751 fs/btrfs/scrub.c 	struct btrfs_fs_info *fs_info = sctx->fs_info;
sctx             2782 fs/btrfs/scrub.c 	scrub_pending_bio_inc(sctx);
sctx             2793 fs/btrfs/scrub.c 	spin_lock(&sctx->stat_lock);
sctx             2794 fs/btrfs/scrub.c 	sctx->stat.malloc_errors++;
sctx             2795 fs/btrfs/scrub.c 	spin_unlock(&sctx->stat_lock);
sctx             2818 fs/btrfs/scrub.c static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
sctx             2825 fs/btrfs/scrub.c 	struct btrfs_fs_info *fs_info = sctx->fs_info;
sctx             2852 fs/btrfs/scrub.c 		spin_lock(&sctx->stat_lock);
sctx             2853 fs/btrfs/scrub.c 		sctx->stat.malloc_errors++;
sctx             2854 fs/btrfs/scrub.c 		spin_unlock(&sctx->stat_lock);
sctx             2860 fs/btrfs/scrub.c 	sparity->sctx = sctx;
sctx             2945 fs/btrfs/scrub.c 				spin_lock(&sctx->stat_lock);
sctx             2946 fs/btrfs/scrub.c 				sctx->stat.uncorrectable_errors++;
sctx             2947 fs/btrfs/scrub.c 				spin_unlock(&sctx->stat_lock);
sctx             2988 fs/btrfs/scrub.c 						&sctx->csum_list, 1);
sctx             2999 fs/btrfs/scrub.c 			scrub_free_csums(sctx);
sctx             3034 fs/btrfs/scrub.c 	scrub_submit(sctx);
sctx             3035 fs/btrfs/scrub.c 	mutex_lock(&sctx->wr_lock);
sctx             3036 fs/btrfs/scrub.c 	scrub_wr_submit(sctx);
sctx             3037 fs/btrfs/scrub.c 	mutex_unlock(&sctx->wr_lock);
sctx             3043 fs/btrfs/scrub.c static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
sctx             3049 fs/btrfs/scrub.c 	struct btrfs_fs_info *fs_info = sctx->fs_info;
sctx             3141 fs/btrfs/scrub.c 	wait_event(sctx->list_wait,
sctx             3142 fs/btrfs/scrub.c 		   atomic_read(&sctx->bios_in_flight) == 0);
sctx             3183 fs/btrfs/scrub.c 		    atomic_read(&sctx->cancel_req)) {
sctx             3192 fs/btrfs/scrub.c 			sctx->flush_all_writes = true;
sctx             3193 fs/btrfs/scrub.c 			scrub_submit(sctx);
sctx             3194 fs/btrfs/scrub.c 			mutex_lock(&sctx->wr_lock);
sctx             3195 fs/btrfs/scrub.c 			scrub_wr_submit(sctx);
sctx             3196 fs/btrfs/scrub.c 			mutex_unlock(&sctx->wr_lock);
sctx             3197 fs/btrfs/scrub.c 			wait_event(sctx->list_wait,
sctx             3198 fs/btrfs/scrub.c 				   atomic_read(&sctx->bios_in_flight) == 0);
sctx             3199 fs/btrfs/scrub.c 			sctx->flush_all_writes = false;
sctx             3212 fs/btrfs/scrub.c 				ret = scrub_raid56_parity(sctx, map, scrub_dev,
sctx             3296 fs/btrfs/scrub.c 				spin_lock(&sctx->stat_lock);
sctx             3297 fs/btrfs/scrub.c 				sctx->stat.uncorrectable_errors++;
sctx             3298 fs/btrfs/scrub.c 				spin_unlock(&sctx->stat_lock);
sctx             3322 fs/btrfs/scrub.c 			if (sctx->is_dev_replace)
sctx             3332 fs/btrfs/scrub.c 						       &sctx->csum_list, 1);
sctx             3336 fs/btrfs/scrub.c 			ret = scrub_extent(sctx, map, extent_logical, extent_len,
sctx             3341 fs/btrfs/scrub.c 			scrub_free_csums(sctx);
sctx             3364 fs/btrfs/scrub.c 						ret = scrub_raid56_parity(sctx,
sctx             3393 fs/btrfs/scrub.c 		spin_lock(&sctx->stat_lock);
sctx             3395 fs/btrfs/scrub.c 			sctx->stat.last_physical = map->stripes[num].physical +
sctx             3398 fs/btrfs/scrub.c 			sctx->stat.last_physical = physical;
sctx             3399 fs/btrfs/scrub.c 		spin_unlock(&sctx->stat_lock);
sctx             3405 fs/btrfs/scrub.c 	scrub_submit(sctx);
sctx             3406 fs/btrfs/scrub.c 	mutex_lock(&sctx->wr_lock);
sctx             3407 fs/btrfs/scrub.c 	scrub_wr_submit(sctx);
sctx             3408 fs/btrfs/scrub.c 	mutex_unlock(&sctx->wr_lock);
sctx             3416 fs/btrfs/scrub.c static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
sctx             3422 fs/btrfs/scrub.c 	struct btrfs_fs_info *fs_info = sctx->fs_info;
sctx             3456 fs/btrfs/scrub.c 			ret = scrub_stripe(sctx, map, scrub_dev, i,
sctx             3469 fs/btrfs/scrub.c int scrub_enumerate_chunks(struct scrub_ctx *sctx,
sctx             3474 fs/btrfs/scrub.c 	struct btrfs_fs_info *fs_info = sctx->fs_info;
sctx             3564 fs/btrfs/scrub.c 		if (!ret && sctx->is_dev_replace) {
sctx             3629 fs/btrfs/scrub.c 		ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
sctx             3642 fs/btrfs/scrub.c 		sctx->flush_all_writes = true;
sctx             3643 fs/btrfs/scrub.c 		scrub_submit(sctx);
sctx             3644 fs/btrfs/scrub.c 		mutex_lock(&sctx->wr_lock);
sctx             3645 fs/btrfs/scrub.c 		scrub_wr_submit(sctx);
sctx             3646 fs/btrfs/scrub.c 		mutex_unlock(&sctx->wr_lock);
sctx             3648 fs/btrfs/scrub.c 		wait_event(sctx->list_wait,
sctx             3649 fs/btrfs/scrub.c 			   atomic_read(&sctx->bios_in_flight) == 0);
sctx             3658 fs/btrfs/scrub.c 		wait_event(sctx->list_wait,
sctx             3659 fs/btrfs/scrub.c 			   atomic_read(&sctx->workers_pending) == 0);
sctx             3660 fs/btrfs/scrub.c 		sctx->flush_all_writes = false;
sctx             3691 fs/btrfs/scrub.c 		if (sctx->is_dev_replace &&
sctx             3696 fs/btrfs/scrub.c 		if (sctx->stat.malloc_errors > 0) {
sctx             3710 fs/btrfs/scrub.c static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
sctx             3717 fs/btrfs/scrub.c 	struct btrfs_fs_info *fs_info = sctx->fs_info;
sctx             3734 fs/btrfs/scrub.c 		ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
sctx             3740 fs/btrfs/scrub.c 	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
sctx             3795 fs/btrfs/scrub.c 	struct scrub_ctx *sctx;
sctx             3844 fs/btrfs/scrub.c 	sctx = scrub_setup_ctx(fs_info, is_dev_replace);
sctx             3845 fs/btrfs/scrub.c 	if (IS_ERR(sctx))
sctx             3846 fs/btrfs/scrub.c 		return PTR_ERR(sctx);
sctx             3894 fs/btrfs/scrub.c 	sctx->readonly = readonly;
sctx             3895 fs/btrfs/scrub.c 	dev->scrub_ctx = sctx;
sctx             3923 fs/btrfs/scrub.c 		ret = scrub_supers(sctx, dev);
sctx             3928 fs/btrfs/scrub.c 		ret = scrub_enumerate_chunks(sctx, dev, start, end);
sctx             3931 fs/btrfs/scrub.c 	wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
sctx             3935 fs/btrfs/scrub.c 	wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
sctx             3938 fs/btrfs/scrub.c 		memcpy(progress, &sctx->stat, sizeof(*progress));
sctx             3960 fs/btrfs/scrub.c 	scrub_put_ctx(sctx);
sctx             3965 fs/btrfs/scrub.c 	scrub_free_ctx(sctx);
sctx             4015 fs/btrfs/scrub.c 	struct scrub_ctx *sctx;
sctx             4018 fs/btrfs/scrub.c 	sctx = dev->scrub_ctx;
sctx             4019 fs/btrfs/scrub.c 	if (!sctx) {
sctx             4023 fs/btrfs/scrub.c 	atomic_inc(&sctx->cancel_req);
sctx             4039 fs/btrfs/scrub.c 	struct scrub_ctx *sctx = NULL;
sctx             4044 fs/btrfs/scrub.c 		sctx = dev->scrub_ctx;
sctx             4045 fs/btrfs/scrub.c 	if (sctx)
sctx             4046 fs/btrfs/scrub.c 		memcpy(progress, &sctx->stat, sizeof(*progress));
sctx             4049 fs/btrfs/scrub.c 	return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
sctx              287 fs/btrfs/send.c static void inconsistent_snapshot_error(struct send_ctx *sctx,
sctx              312 fs/btrfs/send.c 	btrfs_err(sctx->send_root->fs_info,
sctx              314 fs/btrfs/send.c 		  result_string, what, sctx->cmp_key->objectid,
sctx              315 fs/btrfs/send.c 		  sctx->send_root->root_key.objectid,
sctx              316 fs/btrfs/send.c 		  (sctx->parent_root ?
sctx              317 fs/btrfs/send.c 		   sctx->parent_root->root_key.objectid : 0));
sctx              320 fs/btrfs/send.c static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
sctx              323 fs/btrfs/send.c get_waiting_dir_move(struct send_ctx *sctx, u64 ino);
sctx              325 fs/btrfs/send.c static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino);
sctx              327 fs/btrfs/send.c static int need_send_hole(struct send_ctx *sctx)
sctx              329 fs/btrfs/send.c 	return (sctx->parent_root && !sctx->cur_inode_new &&
sctx              330 fs/btrfs/send.c 		!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted &&
sctx              331 fs/btrfs/send.c 		S_ISREG(sctx->cur_inode_mode));
sctx              576 fs/btrfs/send.c static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len)
sctx              580 fs/btrfs/send.c 	int left = sctx->send_max_size - sctx->send_size;
sctx              585 fs/btrfs/send.c 	hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size);
sctx              589 fs/btrfs/send.c 	sctx->send_size += total_len;
sctx              595 fs/btrfs/send.c 	static int tlv_put_u##bits(struct send_ctx *sctx,	 	\
sctx              599 fs/btrfs/send.c 		return tlv_put(sctx, attr, &__tmp, sizeof(__tmp));	\
sctx              604 fs/btrfs/send.c static int tlv_put_string(struct send_ctx *sctx, u16 attr,
sctx              609 fs/btrfs/send.c 	return tlv_put(sctx, attr, str, len);
sctx              612 fs/btrfs/send.c static int tlv_put_uuid(struct send_ctx *sctx, u16 attr,
sctx              615 fs/btrfs/send.c 	return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE);
sctx              618 fs/btrfs/send.c static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr,
sctx              624 fs/btrfs/send.c 	return tlv_put(sctx, attr, &bts, sizeof(bts));
sctx              628 fs/btrfs/send.c #define TLV_PUT(sctx, attrtype, data, attrlen) \
sctx              630 fs/btrfs/send.c 		ret = tlv_put(sctx, attrtype, data, attrlen); \
sctx              635 fs/btrfs/send.c #define TLV_PUT_INT(sctx, attrtype, bits, value) \
sctx              637 fs/btrfs/send.c 		ret = tlv_put_u##bits(sctx, attrtype, value); \
sctx              642 fs/btrfs/send.c #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
sctx              643 fs/btrfs/send.c #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
sctx              644 fs/btrfs/send.c #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
sctx              645 fs/btrfs/send.c #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
sctx              646 fs/btrfs/send.c #define TLV_PUT_STRING(sctx, attrtype, str, len) \
sctx              648 fs/btrfs/send.c 		ret = tlv_put_string(sctx, attrtype, str, len); \
sctx              652 fs/btrfs/send.c #define TLV_PUT_PATH(sctx, attrtype, p) \
sctx              654 fs/btrfs/send.c 		ret = tlv_put_string(sctx, attrtype, p->start, \
sctx              659 fs/btrfs/send.c #define TLV_PUT_UUID(sctx, attrtype, uuid) \
sctx              661 fs/btrfs/send.c 		ret = tlv_put_uuid(sctx, attrtype, uuid); \
sctx              665 fs/btrfs/send.c #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
sctx              667 fs/btrfs/send.c 		ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
sctx              672 fs/btrfs/send.c static int send_header(struct send_ctx *sctx)
sctx              679 fs/btrfs/send.c 	return write_buf(sctx->send_filp, &hdr, sizeof(hdr),
sctx              680 fs/btrfs/send.c 					&sctx->send_off);
sctx              686 fs/btrfs/send.c static int begin_cmd(struct send_ctx *sctx, int cmd)
sctx              690 fs/btrfs/send.c 	if (WARN_ON(!sctx->send_buf))
sctx              693 fs/btrfs/send.c 	BUG_ON(sctx->send_size);
sctx              695 fs/btrfs/send.c 	sctx->send_size += sizeof(*hdr);
sctx              696 fs/btrfs/send.c 	hdr = (struct btrfs_cmd_header *)sctx->send_buf;
sctx              702 fs/btrfs/send.c static int send_cmd(struct send_ctx *sctx)
sctx              708 fs/btrfs/send.c 	hdr = (struct btrfs_cmd_header *)sctx->send_buf;
sctx              709 fs/btrfs/send.c 	hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr));
sctx              712 fs/btrfs/send.c 	crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
sctx              715 fs/btrfs/send.c 	ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
sctx              716 fs/btrfs/send.c 					&sctx->send_off);
sctx              718 fs/btrfs/send.c 	sctx->total_send_size += sctx->send_size;
sctx              719 fs/btrfs/send.c 	sctx->cmd_send_size[le16_to_cpu(hdr->cmd)] += sctx->send_size;
sctx              720 fs/btrfs/send.c 	sctx->send_size = 0;
sctx              728 fs/btrfs/send.c static int send_rename(struct send_ctx *sctx,
sctx              731 fs/btrfs/send.c 	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
sctx              736 fs/btrfs/send.c 	ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
sctx              740 fs/btrfs/send.c 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from);
sctx              741 fs/btrfs/send.c 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to);
sctx              743 fs/btrfs/send.c 	ret = send_cmd(sctx);
sctx              753 fs/btrfs/send.c static int send_link(struct send_ctx *sctx,
sctx              756 fs/btrfs/send.c 	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
sctx              761 fs/btrfs/send.c 	ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
sctx              765 fs/btrfs/send.c 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
sctx              766 fs/btrfs/send.c 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk);
sctx              768 fs/btrfs/send.c 	ret = send_cmd(sctx);
sctx              778 fs/btrfs/send.c static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
sctx              780 fs/btrfs/send.c 	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
sctx              785 fs/btrfs/send.c 	ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
sctx              789 fs/btrfs/send.c 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
sctx              791 fs/btrfs/send.c 	ret = send_cmd(sctx);
sctx              801 fs/btrfs/send.c static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
sctx              803 fs/btrfs/send.c 	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
sctx              808 fs/btrfs/send.c 	ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
sctx              812 fs/btrfs/send.c 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
sctx              814 fs/btrfs/send.c 	ret = send_cmd(sctx);
sctx             1184 fs/btrfs/send.c 	struct send_ctx *sctx;
sctx             1240 fs/btrfs/send.c 	found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots,
sctx             1241 fs/btrfs/send.c 			bctx->sctx->clone_roots_cnt,
sctx             1247 fs/btrfs/send.c 	if (found->root == bctx->sctx->send_root &&
sctx             1257 fs/btrfs/send.c 	if (found->root == bctx->sctx->send_root) {
sctx             1293 fs/btrfs/send.c static int find_extent_clone(struct send_ctx *sctx,
sctx             1299 fs/btrfs/send.c 	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
sctx             1387 fs/btrfs/send.c 	for (i = 0; i < sctx->clone_roots_cnt; i++) {
sctx             1388 fs/btrfs/send.c 		cur_clone_root = sctx->clone_roots + i;
sctx             1394 fs/btrfs/send.c 	backref_ctx->sctx = sctx;
sctx             1453 fs/btrfs/send.c 	for (i = 0; i < sctx->clone_roots_cnt; i++) {
sctx             1454 fs/btrfs/send.c 		if (sctx->clone_roots[i].found_refs) {
sctx             1456 fs/btrfs/send.c 				cur_clone_root = sctx->clone_roots + i;
sctx             1457 fs/btrfs/send.c 			else if (sctx->clone_roots[i].root == sctx->send_root)
sctx             1459 fs/btrfs/send.c 				cur_clone_root = sctx->clone_roots + i;
sctx             1537 fs/btrfs/send.c static int gen_unique_name(struct send_ctx *sctx,
sctx             1557 fs/btrfs/send.c 		di = btrfs_lookup_dir_item(NULL, sctx->send_root,
sctx             1571 fs/btrfs/send.c 		if (!sctx->parent_root) {
sctx             1577 fs/btrfs/send.c 		di = btrfs_lookup_dir_item(NULL, sctx->parent_root,
sctx             1609 fs/btrfs/send.c static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
sctx             1617 fs/btrfs/send.c 	ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL,
sctx             1623 fs/btrfs/send.c 	if (!sctx->parent_root) {
sctx             1626 fs/btrfs/send.c 		ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen,
sctx             1637 fs/btrfs/send.c 			if (ino < sctx->send_progress)
sctx             1642 fs/btrfs/send.c 			if (ino < sctx->send_progress)
sctx             1651 fs/btrfs/send.c 			if (ino < sctx->send_progress)
sctx             1660 fs/btrfs/send.c 			if (ino < sctx->send_progress)
sctx             1675 fs/btrfs/send.c static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
sctx             1682 fs/btrfs/send.c 	ret = get_cur_inode_state(sctx, ino, gen);
sctx             1842 fs/btrfs/send.c static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
sctx             1851 fs/btrfs/send.c 	if (!sctx->parent_root)
sctx             1854 fs/btrfs/send.c 	ret = is_inode_existent(sctx, dir, dir_gen);
sctx             1863 fs/btrfs/send.c 	if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID) {
sctx             1864 fs/btrfs/send.c 		ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL,
sctx             1876 fs/btrfs/send.c 	ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len,
sctx             1890 fs/btrfs/send.c 	if (other_inode > sctx->send_progress ||
sctx             1891 fs/btrfs/send.c 	    is_waiting_for_move(sctx, other_inode)) {
sctx             1892 fs/btrfs/send.c 		ret = get_inode_info(sctx->parent_root, other_inode, NULL,
sctx             1914 fs/btrfs/send.c static int did_overwrite_ref(struct send_ctx *sctx,
sctx             1924 fs/btrfs/send.c 	if (!sctx->parent_root)
sctx             1927 fs/btrfs/send.c 	ret = is_inode_existent(sctx, dir, dir_gen);
sctx             1932 fs/btrfs/send.c 		ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL,
sctx             1945 fs/btrfs/send.c 	ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len,
sctx             1955 fs/btrfs/send.c 	ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL,
sctx             1971 fs/btrfs/send.c 	if ((ow_inode < sctx->send_progress) ||
sctx             1972 fs/btrfs/send.c 	    (ino != sctx->cur_ino && ow_inode == sctx->cur_ino &&
sctx             1973 fs/btrfs/send.c 	     gen == sctx->cur_inode_gen))
sctx             1987 fs/btrfs/send.c static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen)
sctx             1994 fs/btrfs/send.c 	if (!sctx->parent_root)
sctx             2001 fs/btrfs/send.c 	ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name);
sctx             2005 fs/btrfs/send.c 	ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen,
sctx             2019 fs/btrfs/send.c static int name_cache_insert(struct send_ctx *sctx,
sctx             2025 fs/btrfs/send.c 	nce_head = radix_tree_lookup(&sctx->name_cache,
sctx             2035 fs/btrfs/send.c 		ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
sctx             2043 fs/btrfs/send.c 	list_add_tail(&nce->list, &sctx->name_cache_list);
sctx             2044 fs/btrfs/send.c 	sctx->name_cache_size++;
sctx             2049 fs/btrfs/send.c static void name_cache_delete(struct send_ctx *sctx,
sctx             2054 fs/btrfs/send.c 	nce_head = radix_tree_lookup(&sctx->name_cache,
sctx             2057 fs/btrfs/send.c 		btrfs_err(sctx->send_root->fs_info,
sctx             2059 fs/btrfs/send.c 			nce->ino, sctx->name_cache_size);
sctx             2064 fs/btrfs/send.c 	sctx->name_cache_size--;
sctx             2070 fs/btrfs/send.c 		radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino);
sctx             2075 fs/btrfs/send.c static struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
sctx             2081 fs/btrfs/send.c 	nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino);
sctx             2096 fs/btrfs/send.c static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce)
sctx             2099 fs/btrfs/send.c 	list_add_tail(&nce->list, &sctx->name_cache_list);
sctx             2105 fs/btrfs/send.c static void name_cache_clean_unused(struct send_ctx *sctx)
sctx             2109 fs/btrfs/send.c 	if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE)
sctx             2112 fs/btrfs/send.c 	while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) {
sctx             2113 fs/btrfs/send.c 		nce = list_entry(sctx->name_cache_list.next,
sctx             2115 fs/btrfs/send.c 		name_cache_delete(sctx, nce);
sctx             2120 fs/btrfs/send.c static void name_cache_free(struct send_ctx *sctx)
sctx             2124 fs/btrfs/send.c 	while (!list_empty(&sctx->name_cache_list)) {
sctx             2125 fs/btrfs/send.c 		nce = list_entry(sctx->name_cache_list.next,
sctx             2127 fs/btrfs/send.c 		name_cache_delete(sctx, nce);
sctx             2140 fs/btrfs/send.c static int __get_cur_name_and_parent(struct send_ctx *sctx,
sctx             2155 fs/btrfs/send.c 	nce = name_cache_search(sctx, ino, gen);
sctx             2157 fs/btrfs/send.c 		if (ino < sctx->send_progress && nce->need_later_update) {
sctx             2158 fs/btrfs/send.c 			name_cache_delete(sctx, nce);
sctx             2162 fs/btrfs/send.c 			name_cache_used(sctx, nce);
sctx             2178 fs/btrfs/send.c 	ret = is_inode_existent(sctx, ino, gen);
sctx             2183 fs/btrfs/send.c 		ret = gen_unique_name(sctx, ino, gen, dest);
sctx             2194 fs/btrfs/send.c 	if (ino < sctx->send_progress)
sctx             2195 fs/btrfs/send.c 		ret = get_first_ref(sctx->send_root, ino,
sctx             2198 fs/btrfs/send.c 		ret = get_first_ref(sctx->parent_root, ino,
sctx             2207 fs/btrfs/send.c 	ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen,
sctx             2213 fs/btrfs/send.c 		ret = gen_unique_name(sctx, ino, gen, dest);
sctx             2237 fs/btrfs/send.c 	if (ino < sctx->send_progress)
sctx             2242 fs/btrfs/send.c 	nce_ret = name_cache_insert(sctx, nce);
sctx             2245 fs/btrfs/send.c 	name_cache_clean_unused(sctx);
sctx             2276 fs/btrfs/send.c static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
sctx             2299 fs/btrfs/send.c 		if (is_waiting_for_rm(sctx, ino)) {
sctx             2300 fs/btrfs/send.c 			ret = gen_unique_name(sctx, ino, gen, name);
sctx             2307 fs/btrfs/send.c 		wdm = get_waiting_dir_move(sctx, ino);
sctx             2309 fs/btrfs/send.c 			ret = gen_unique_name(sctx, ino, gen, name);
sctx             2312 fs/btrfs/send.c 			ret = get_first_ref(sctx->parent_root, ino,
sctx             2315 fs/btrfs/send.c 			ret = __get_cur_name_and_parent(sctx, ino, gen,
sctx             2343 fs/btrfs/send.c static int send_subvol_begin(struct send_ctx *sctx)
sctx             2346 fs/btrfs/send.c 	struct btrfs_root *send_root = sctx->send_root;
sctx             2347 fs/btrfs/send.c 	struct btrfs_root *parent_root = sctx->parent_root;
sctx             2391 fs/btrfs/send.c 		ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT);
sctx             2395 fs/btrfs/send.c 		ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL);
sctx             2400 fs/btrfs/send.c 	TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen);
sctx             2402 fs/btrfs/send.c 	if (!btrfs_is_empty_uuid(sctx->send_root->root_item.received_uuid))
sctx             2403 fs/btrfs/send.c 		TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
sctx             2404 fs/btrfs/send.c 			    sctx->send_root->root_item.received_uuid);
sctx             2406 fs/btrfs/send.c 		TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
sctx             2407 fs/btrfs/send.c 			    sctx->send_root->root_item.uuid);
sctx             2409 fs/btrfs/send.c 	TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
sctx             2410 fs/btrfs/send.c 		    le64_to_cpu(sctx->send_root->root_item.ctransid));
sctx             2413 fs/btrfs/send.c 			TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
sctx             2416 fs/btrfs/send.c 			TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
sctx             2418 fs/btrfs/send.c 		TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
sctx             2419 fs/btrfs/send.c 			    le64_to_cpu(sctx->parent_root->root_item.ctransid));
sctx             2422 fs/btrfs/send.c 	ret = send_cmd(sctx);
sctx             2431 fs/btrfs/send.c static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
sctx             2433 fs/btrfs/send.c 	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
sctx             2443 fs/btrfs/send.c 	ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE);
sctx             2447 fs/btrfs/send.c 	ret = get_cur_path(sctx, ino, gen, p);
sctx             2450 fs/btrfs/send.c 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
sctx             2451 fs/btrfs/send.c 	TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size);
sctx             2453 fs/btrfs/send.c 	ret = send_cmd(sctx);
sctx             2461 fs/btrfs/send.c static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
sctx             2463 fs/btrfs/send.c 	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
sctx             2473 fs/btrfs/send.c 	ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD);
sctx             2477 fs/btrfs/send.c 	ret = get_cur_path(sctx, ino, gen, p);
sctx             2480 fs/btrfs/send.c 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
sctx             2481 fs/btrfs/send.c 	TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777);
sctx             2483 fs/btrfs/send.c 	ret = send_cmd(sctx);
sctx             2491 fs/btrfs/send.c static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
sctx             2493 fs/btrfs/send.c 	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
sctx             2504 fs/btrfs/send.c 	ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN);
sctx             2508 fs/btrfs/send.c 	ret = get_cur_path(sctx, ino, gen, p);
sctx             2511 fs/btrfs/send.c 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
sctx             2512 fs/btrfs/send.c 	TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid);
sctx             2513 fs/btrfs/send.c 	TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid);
sctx             2515 fs/btrfs/send.c 	ret = send_cmd(sctx);
sctx             2523 fs/btrfs/send.c static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
sctx             2525 fs/btrfs/send.c 	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
sctx             2549 fs/btrfs/send.c 	ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
sctx             2559 fs/btrfs/send.c 	ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES);
sctx             2563 fs/btrfs/send.c 	ret = get_cur_path(sctx, ino, gen, p);
sctx             2566 fs/btrfs/send.c 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
sctx             2567 fs/btrfs/send.c 	TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime);
sctx             2568 fs/btrfs/send.c 	TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime);
sctx             2569 fs/btrfs/send.c 	TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime);
sctx             2572 fs/btrfs/send.c 	ret = send_cmd(sctx);
sctx             2586 fs/btrfs/send.c static int send_create_inode(struct send_ctx *sctx, u64 ino)
sctx             2588 fs/btrfs/send.c 	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
sctx             2602 fs/btrfs/send.c 	if (ino != sctx->cur_ino) {
sctx             2603 fs/btrfs/send.c 		ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode,
sctx             2608 fs/btrfs/send.c 		gen = sctx->cur_inode_gen;
sctx             2609 fs/btrfs/send.c 		mode = sctx->cur_inode_mode;
sctx             2610 fs/btrfs/send.c 		rdev = sctx->cur_inode_rdev;
sctx             2626 fs/btrfs/send.c 		btrfs_warn(sctx->send_root->fs_info, "unexpected inode type %o",
sctx             2632 fs/btrfs/send.c 	ret = begin_cmd(sctx, cmd);
sctx             2636 fs/btrfs/send.c 	ret = gen_unique_name(sctx, ino, gen, p);
sctx             2640 fs/btrfs/send.c 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
sctx             2641 fs/btrfs/send.c 	TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino);
sctx             2645 fs/btrfs/send.c 		ret = read_symlink(sctx->send_root, ino, p);
sctx             2648 fs/btrfs/send.c 		TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
sctx             2651 fs/btrfs/send.c 		TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev));
sctx             2652 fs/btrfs/send.c 		TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode);
sctx             2655 fs/btrfs/send.c 	ret = send_cmd(sctx);
sctx             2671 fs/btrfs/send.c static int did_create_dir(struct send_ctx *sctx, u64 dir)
sctx             2691 fs/btrfs/send.c 	ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
sctx             2699 fs/btrfs/send.c 			ret = btrfs_next_leaf(sctx->send_root, path);
sctx             2720 fs/btrfs/send.c 		    di_key.objectid < sctx->send_progress) {
sctx             2739 fs/btrfs/send.c static int send_create_inode_if_needed(struct send_ctx *sctx)
sctx             2743 fs/btrfs/send.c 	if (S_ISDIR(sctx->cur_inode_mode)) {
sctx             2744 fs/btrfs/send.c 		ret = did_create_dir(sctx, sctx->cur_ino);
sctx             2753 fs/btrfs/send.c 	ret = send_create_inode(sctx, sctx->cur_ino);
sctx             2826 fs/btrfs/send.c static void free_recorded_refs(struct send_ctx *sctx)
sctx             2828 fs/btrfs/send.c 	__free_recorded_refs(&sctx->new_refs);
sctx             2829 fs/btrfs/send.c 	__free_recorded_refs(&sctx->deleted_refs);
sctx             2837 fs/btrfs/send.c static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
sctx             2847 fs/btrfs/send.c 	ret = gen_unique_name(sctx, ino, gen, orphan);
sctx             2851 fs/btrfs/send.c 	ret = send_rename(sctx, path, orphan);
sctx             2859 fs/btrfs/send.c add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
sctx             2861 fs/btrfs/send.c 	struct rb_node **p = &sctx->orphan_dirs.rb_node;
sctx             2885 fs/btrfs/send.c 	rb_insert_color(&odi->node, &sctx->orphan_dirs);
sctx             2890 fs/btrfs/send.c get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
sctx             2892 fs/btrfs/send.c 	struct rb_node *n = sctx->orphan_dirs.rb_node;
sctx             2907 fs/btrfs/send.c static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino)
sctx             2909 fs/btrfs/send.c 	struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino);
sctx             2914 fs/btrfs/send.c static void free_orphan_dir_info(struct send_ctx *sctx,
sctx             2919 fs/btrfs/send.c 	rb_erase(&odi->node, &sctx->orphan_dirs);
sctx             2928 fs/btrfs/send.c static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
sctx             2932 fs/btrfs/send.c 	struct btrfs_root *root = sctx->parent_root;
sctx             2954 fs/btrfs/send.c 	odi = get_orphan_dir_info(sctx, dir);
sctx             2983 fs/btrfs/send.c 		dm = get_waiting_dir_move(sctx, loc.objectid);
sctx             2985 fs/btrfs/send.c 			odi = add_orphan_dir_info(sctx, dir);
sctx             2998 fs/btrfs/send.c 			odi = add_orphan_dir_info(sctx, dir);
sctx             3011 fs/btrfs/send.c 	free_orphan_dir_info(sctx, odi);
sctx             3020 fs/btrfs/send.c static int is_waiting_for_move(struct send_ctx *sctx, u64 ino)
sctx             3022 fs/btrfs/send.c 	struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino);
sctx             3027 fs/btrfs/send.c static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized)
sctx             3029 fs/btrfs/send.c 	struct rb_node **p = &sctx->waiting_dir_moves.rb_node;
sctx             3054 fs/btrfs/send.c 	rb_insert_color(&dm->node, &sctx->waiting_dir_moves);
sctx             3059 fs/btrfs/send.c get_waiting_dir_move(struct send_ctx *sctx, u64 ino)
sctx             3061 fs/btrfs/send.c 	struct rb_node *n = sctx->waiting_dir_moves.rb_node;
sctx             3076 fs/btrfs/send.c static void free_waiting_dir_move(struct send_ctx *sctx,
sctx             3081 fs/btrfs/send.c 	rb_erase(&dm->node, &sctx->waiting_dir_moves);
sctx             3085 fs/btrfs/send.c static int add_pending_dir_move(struct send_ctx *sctx,
sctx             3093 fs/btrfs/send.c 	struct rb_node **p = &sctx->pending_dir_moves.rb_node;
sctx             3134 fs/btrfs/send.c 	ret = add_waiting_dir_move(sctx, pm->ino, is_orphan);
sctx             3142 fs/btrfs/send.c 		rb_insert_color(&pm->node, &sctx->pending_dir_moves);
sctx             3153 fs/btrfs/send.c static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx,
sctx             3156 fs/btrfs/send.c 	struct rb_node *n = sctx->pending_dir_moves.rb_node;
sctx             3171 fs/btrfs/send.c static int path_loop(struct send_ctx *sctx, struct fs_path *name,
sctx             3183 fs/btrfs/send.c 		if (is_waiting_for_rm(sctx, ino))
sctx             3185 fs/btrfs/send.c 		if (is_waiting_for_move(sctx, ino)) {
sctx             3188 fs/btrfs/send.c 			ret = get_first_ref(sctx->parent_root, ino,
sctx             3191 fs/btrfs/send.c 			ret = __get_cur_name_and_parent(sctx, ino, gen,
sctx             3213 fs/btrfs/send.c static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
sctx             3218 fs/btrfs/send.c 	u64 orig_progress = sctx->send_progress;
sctx             3234 fs/btrfs/send.c 	dm = get_waiting_dir_move(sctx, pm->ino);
sctx             3238 fs/btrfs/send.c 	free_waiting_dir_move(sctx, dm);
sctx             3241 fs/btrfs/send.c 		ret = gen_unique_name(sctx, pm->ino,
sctx             3244 fs/btrfs/send.c 		ret = get_first_ref(sctx->parent_root, pm->ino,
sctx             3248 fs/btrfs/send.c 		ret = get_cur_path(sctx, parent_ino, parent_gen,
sctx             3257 fs/btrfs/send.c 	sctx->send_progress = sctx->cur_ino + 1;
sctx             3258 fs/btrfs/send.c 	ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor);
sctx             3264 fs/btrfs/send.c 		ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor,
sctx             3270 fs/btrfs/send.c 			dm = get_waiting_dir_move(sctx, pm->ino);
sctx             3279 fs/btrfs/send.c 	ret = get_cur_path(sctx, pm->ino, pm->gen, to_path);
sctx             3283 fs/btrfs/send.c 	ret = send_rename(sctx, from_path, to_path);
sctx             3291 fs/btrfs/send.c 		odi = get_orphan_dir_info(sctx, rmdir_ino);
sctx             3298 fs/btrfs/send.c 		ret = can_rmdir(sctx, rmdir_ino, gen, sctx->cur_ino);
sctx             3309 fs/btrfs/send.c 		ret = get_cur_path(sctx, rmdir_ino, gen, name);
sctx             3312 fs/btrfs/send.c 		ret = send_rmdir(sctx, name);
sctx             3318 fs/btrfs/send.c 	ret = send_utimes(sctx, pm->ino, pm->gen);
sctx             3330 fs/btrfs/send.c 		ret = get_inode_info(sctx->send_root, cur->dir, NULL,
sctx             3339 fs/btrfs/send.c 		ret = send_utimes(sctx, cur->dir, cur->dir_gen);
sctx             3348 fs/btrfs/send.c 	sctx->send_progress = orig_progress;
sctx             3353 fs/btrfs/send.c static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
sctx             3358 fs/btrfs/send.c 		rb_erase(&m->node, &sctx->pending_dir_moves);
sctx             3363 fs/btrfs/send.c static void tail_append_pending_moves(struct send_ctx *sctx,
sctx             3376 fs/btrfs/send.c 		rb_erase(&moves->node, &sctx->pending_dir_moves);
sctx             3381 fs/btrfs/send.c static int apply_children_dir_moves(struct send_ctx *sctx)
sctx             3385 fs/btrfs/send.c 	u64 parent_ino = sctx->cur_ino;
sctx             3388 fs/btrfs/send.c 	pm = get_pending_dir_moves(sctx, parent_ino);
sctx             3393 fs/btrfs/send.c 	tail_append_pending_moves(sctx, pm, &stack);
sctx             3398 fs/btrfs/send.c 		ret = apply_dir_move(sctx, pm);
sctx             3399 fs/btrfs/send.c 		free_pending_move(sctx, pm);
sctx             3402 fs/btrfs/send.c 		pm = get_pending_dir_moves(sctx, parent_ino);
sctx             3404 fs/btrfs/send.c 			tail_append_pending_moves(sctx, pm, &stack);
sctx             3411 fs/btrfs/send.c 		free_pending_move(sctx, pm);
sctx             3452 fs/btrfs/send.c static int wait_for_dest_dir_move(struct send_ctx *sctx,
sctx             3456 fs/btrfs/send.c 	struct btrfs_fs_info *fs_info = sctx->parent_root->fs_info;
sctx             3466 fs/btrfs/send.c 	if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves))
sctx             3477 fs/btrfs/send.c 	ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
sctx             3505 fs/btrfs/send.c 	ret = get_inode_info(sctx->parent_root, di_key.objectid, NULL,
sctx             3509 fs/btrfs/send.c 	ret = get_inode_info(sctx->send_root, di_key.objectid, NULL,
sctx             3523 fs/btrfs/send.c 	wdm = get_waiting_dir_move(sctx, di_key.objectid);
sctx             3525 fs/btrfs/send.c 		ret = add_pending_dir_move(sctx,
sctx             3526 fs/btrfs/send.c 					   sctx->cur_ino,
sctx             3527 fs/btrfs/send.c 					   sctx->cur_inode_gen,
sctx             3529 fs/btrfs/send.c 					   &sctx->new_refs,
sctx             3530 fs/btrfs/send.c 					   &sctx->deleted_refs,
sctx             3672 fs/btrfs/send.c static int wait_for_parent_move(struct send_ctx *sctx,
sctx             3701 fs/btrfs/send.c 		if (is_waiting_for_move(sctx, ino)) {
sctx             3712 fs/btrfs/send.c 			ret = is_ancestor(sctx->parent_root,
sctx             3713 fs/btrfs/send.c 					  sctx->cur_ino, sctx->cur_inode_gen,
sctx             3722 fs/btrfs/send.c 		ret = get_first_ref(sctx->send_root, ino, &parent_ino_after,
sctx             3726 fs/btrfs/send.c 		ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before,
sctx             3737 fs/btrfs/send.c 		if (ino > sctx->cur_ino &&
sctx             3742 fs/btrfs/send.c 			ret = get_inode_info(sctx->parent_root, ino, NULL,
sctx             3761 fs/btrfs/send.c 		ret = add_pending_dir_move(sctx,
sctx             3762 fs/btrfs/send.c 					   sctx->cur_ino,
sctx             3763 fs/btrfs/send.c 					   sctx->cur_inode_gen,
sctx             3765 fs/btrfs/send.c 					   &sctx->new_refs,
sctx             3766 fs/btrfs/send.c 					   &sctx->deleted_refs,
sctx             3775 fs/btrfs/send.c static int update_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
sctx             3788 fs/btrfs/send.c 	ret = get_cur_path(sctx, ref->dir, ref->dir_gen, new_path);
sctx             3808 fs/btrfs/send.c static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
sctx             3810 fs/btrfs/send.c 	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
sctx             3826 fs/btrfs/send.c 	btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino);
sctx             3832 fs/btrfs/send.c 	BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID);
sctx             3852 fs/btrfs/send.c 	if (!sctx->cur_inode_new) {
sctx             3853 fs/btrfs/send.c 		ret = did_overwrite_first_ref(sctx, sctx->cur_ino,
sctx             3854 fs/btrfs/send.c 				sctx->cur_inode_gen);
sctx             3860 fs/btrfs/send.c 	if (sctx->cur_inode_new || did_overwrite) {
sctx             3861 fs/btrfs/send.c 		ret = gen_unique_name(sctx, sctx->cur_ino,
sctx             3862 fs/btrfs/send.c 				sctx->cur_inode_gen, valid_path);
sctx             3867 fs/btrfs/send.c 		ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
sctx             3873 fs/btrfs/send.c 	list_for_each_entry(cur, &sctx->new_refs, list) {
sctx             3881 fs/btrfs/send.c 		ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
sctx             3890 fs/btrfs/send.c 			list_for_each_entry(cur2, &sctx->new_refs, list) {
sctx             3904 fs/btrfs/send.c 				ret = did_create_dir(sctx, cur->dir);
sctx             3908 fs/btrfs/send.c 				ret = send_create_inode(sctx, cur->dir);
sctx             3920 fs/btrfs/send.c 		ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen,
sctx             3926 fs/btrfs/send.c 			ret = is_first_ref(sctx->parent_root,
sctx             3935 fs/btrfs/send.c 				ret = orphanize_inode(sctx, ow_inode, ow_gen,
sctx             3948 fs/btrfs/send.c 				if (is_waiting_for_move(sctx, ow_inode)) {
sctx             3949 fs/btrfs/send.c 					wdm = get_waiting_dir_move(sctx,
sctx             3965 fs/btrfs/send.c 				nce = name_cache_search(sctx, ow_inode, ow_gen);
sctx             3967 fs/btrfs/send.c 					name_cache_delete(sctx, nce);
sctx             3978 fs/btrfs/send.c 				ret = is_ancestor(sctx->parent_root,
sctx             3980 fs/btrfs/send.c 						  sctx->cur_ino, NULL);
sctx             3984 fs/btrfs/send.c 					ret = get_cur_path(sctx, sctx->cur_ino,
sctx             3985 fs/btrfs/send.c 							   sctx->cur_inode_gen,
sctx             3991 fs/btrfs/send.c 				ret = send_unlink(sctx, cur->full_path);
sctx             3997 fs/btrfs/send.c 		if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) {
sctx             3998 fs/btrfs/send.c 			ret = wait_for_dest_dir_move(sctx, cur, is_orphan);
sctx             4007 fs/btrfs/send.c 		if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root &&
sctx             4009 fs/btrfs/send.c 			ret = wait_for_parent_move(sctx, cur, is_orphan);
sctx             4024 fs/btrfs/send.c 			ret = send_rename(sctx, valid_path, cur->full_path);
sctx             4032 fs/btrfs/send.c 			if (S_ISDIR(sctx->cur_inode_mode)) {
sctx             4038 fs/btrfs/send.c 				ret = send_rename(sctx, valid_path,
sctx             4054 fs/btrfs/send.c 					ret = update_ref_path(sctx, cur);
sctx             4058 fs/btrfs/send.c 				ret = send_link(sctx, cur->full_path,
sctx             4069 fs/btrfs/send.c 	if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) {
sctx             4076 fs/btrfs/send.c 		ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen,
sctx             4077 fs/btrfs/send.c 				sctx->cur_ino);
sctx             4081 fs/btrfs/send.c 			ret = send_rmdir(sctx, valid_path);
sctx             4085 fs/btrfs/send.c 			ret = orphanize_inode(sctx, sctx->cur_ino,
sctx             4086 fs/btrfs/send.c 					sctx->cur_inode_gen, valid_path);
sctx             4092 fs/btrfs/send.c 		list_for_each_entry(cur, &sctx->deleted_refs, list) {
sctx             4097 fs/btrfs/send.c 	} else if (S_ISDIR(sctx->cur_inode_mode) &&
sctx             4098 fs/btrfs/send.c 		   !list_empty(&sctx->deleted_refs)) {
sctx             4102 fs/btrfs/send.c 		cur = list_entry(sctx->deleted_refs.next, struct recorded_ref,
sctx             4107 fs/btrfs/send.c 	} else if (!S_ISDIR(sctx->cur_inode_mode)) {
sctx             4113 fs/btrfs/send.c 		list_for_each_entry(cur, &sctx->deleted_refs, list) {
sctx             4114 fs/btrfs/send.c 			ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen,
sctx             4115 fs/btrfs/send.c 					sctx->cur_ino, sctx->cur_inode_gen,
sctx             4128 fs/btrfs/send.c 					ret = update_ref_path(sctx, cur);
sctx             4132 fs/btrfs/send.c 				ret = send_unlink(sctx, cur->full_path);
sctx             4149 fs/btrfs/send.c 			ret = send_unlink(sctx, valid_path);
sctx             4167 fs/btrfs/send.c 		if (cur->dir > sctx->cur_ino)
sctx             4170 fs/btrfs/send.c 		ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
sctx             4177 fs/btrfs/send.c 			ret = send_utimes(sctx, cur->dir, cur->dir_gen);
sctx             4182 fs/btrfs/send.c 			ret = can_rmdir(sctx, cur->dir, cur->dir_gen,
sctx             4183 fs/btrfs/send.c 					sctx->cur_ino);
sctx             4187 fs/btrfs/send.c 				ret = get_cur_path(sctx, cur->dir,
sctx             4191 fs/btrfs/send.c 				ret = send_rmdir(sctx, valid_path);
sctx             4203 fs/btrfs/send.c 	free_recorded_refs(sctx);
sctx             4212 fs/btrfs/send.c 	struct send_ctx *sctx = ctx;
sctx             4225 fs/btrfs/send.c 	ret = get_cur_path(sctx, dir, gen, p);
sctx             4244 fs/btrfs/send.c 	struct send_ctx *sctx = ctx;
sctx             4245 fs/btrfs/send.c 	return record_ref(sctx->send_root, dir, name, ctx, &sctx->new_refs);
sctx             4253 fs/btrfs/send.c 	struct send_ctx *sctx = ctx;
sctx             4254 fs/btrfs/send.c 	return record_ref(sctx->parent_root, dir, name, ctx,
sctx             4255 fs/btrfs/send.c 			  &sctx->deleted_refs);
sctx             4258 fs/btrfs/send.c static int record_new_ref(struct send_ctx *sctx)
sctx             4262 fs/btrfs/send.c 	ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
sctx             4263 fs/btrfs/send.c 				sctx->cmp_key, 0, __record_new_ref, sctx);
sctx             4272 fs/btrfs/send.c static int record_deleted_ref(struct send_ctx *sctx)
sctx             4276 fs/btrfs/send.c 	ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
sctx             4277 fs/btrfs/send.c 				sctx->cmp_key, 0, __record_deleted_ref, sctx);
sctx             4350 fs/btrfs/send.c 	struct send_ctx *sctx = ctx;
sctx             4352 fs/btrfs/send.c 	ret = get_inode_info(sctx->send_root, dir, NULL, &dir_gen, NULL,
sctx             4357 fs/btrfs/send.c 	ret = find_iref(sctx->parent_root, sctx->right_path,
sctx             4358 fs/btrfs/send.c 			sctx->cmp_key, dir, dir_gen, name);
sctx             4360 fs/btrfs/send.c 		ret = __record_new_ref(num, dir, index, name, sctx);
sctx             4373 fs/btrfs/send.c 	struct send_ctx *sctx = ctx;
sctx             4375 fs/btrfs/send.c 	ret = get_inode_info(sctx->parent_root, dir, NULL, &dir_gen, NULL,
sctx             4380 fs/btrfs/send.c 	ret = find_iref(sctx->send_root, sctx->left_path, sctx->cmp_key,
sctx             4383 fs/btrfs/send.c 		ret = __record_deleted_ref(num, dir, index, name, sctx);
sctx             4390 fs/btrfs/send.c static int record_changed_ref(struct send_ctx *sctx)
sctx             4394 fs/btrfs/send.c 	ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
sctx             4395 fs/btrfs/send.c 			sctx->cmp_key, 0, __record_changed_new_ref, sctx);
sctx             4398 fs/btrfs/send.c 	ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
sctx             4399 fs/btrfs/send.c 			sctx->cmp_key, 0, __record_changed_deleted_ref, sctx);
sctx             4412 fs/btrfs/send.c static int process_all_refs(struct send_ctx *sctx,
sctx             4430 fs/btrfs/send.c 		root = sctx->send_root;
sctx             4433 fs/btrfs/send.c 		root = sctx->parent_root;
sctx             4436 fs/btrfs/send.c 		btrfs_err(sctx->send_root->fs_info,
sctx             4442 fs/btrfs/send.c 	key.objectid = sctx->cmp_key->objectid;
sctx             4468 fs/btrfs/send.c 		ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx);
sctx             4481 fs/btrfs/send.c 	ret = process_recorded_refs(sctx, &pending_move);
sctx             4487 fs/btrfs/send.c static int send_set_xattr(struct send_ctx *sctx,
sctx             4494 fs/btrfs/send.c 	ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR);
sctx             4498 fs/btrfs/send.c 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
sctx             4499 fs/btrfs/send.c 	TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
sctx             4500 fs/btrfs/send.c 	TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len);
sctx             4502 fs/btrfs/send.c 	ret = send_cmd(sctx);
sctx             4509 fs/btrfs/send.c static int send_remove_xattr(struct send_ctx *sctx,
sctx             4515 fs/btrfs/send.c 	ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR);
sctx             4519 fs/btrfs/send.c 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
sctx             4520 fs/btrfs/send.c 	TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
sctx             4522 fs/btrfs/send.c 	ret = send_cmd(sctx);
sctx             4535 fs/btrfs/send.c 	struct send_ctx *sctx = ctx;
sctx             4559 fs/btrfs/send.c 	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
sctx             4563 fs/btrfs/send.c 	ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
sctx             4576 fs/btrfs/send.c 	struct send_ctx *sctx = ctx;
sctx             4583 fs/btrfs/send.c 	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
sctx             4587 fs/btrfs/send.c 	ret = send_remove_xattr(sctx, p, name, name_len);
sctx             4594 fs/btrfs/send.c static int process_new_xattr(struct send_ctx *sctx)
sctx             4598 fs/btrfs/send.c 	ret = iterate_dir_item(sctx->send_root, sctx->left_path,
sctx             4599 fs/btrfs/send.c 			       __process_new_xattr, sctx);
sctx             4604 fs/btrfs/send.c static int process_deleted_xattr(struct send_ctx *sctx)
sctx             4606 fs/btrfs/send.c 	return iterate_dir_item(sctx->parent_root, sctx->right_path,
sctx             4607 fs/btrfs/send.c 				__process_deleted_xattr, sctx);
sctx             4674 fs/btrfs/send.c 	struct send_ctx *sctx = ctx;
sctx             4678 fs/btrfs/send.c 	ret = find_xattr(sctx->parent_root, sctx->right_path,
sctx             4679 fs/btrfs/send.c 			 sctx->cmp_key, name, name_len, &found_data,
sctx             4704 fs/btrfs/send.c 	struct send_ctx *sctx = ctx;
sctx             4706 fs/btrfs/send.c 	ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key,
sctx             4717 fs/btrfs/send.c static int process_changed_xattr(struct send_ctx *sctx)
sctx             4721 fs/btrfs/send.c 	ret = iterate_dir_item(sctx->send_root, sctx->left_path,
sctx             4722 fs/btrfs/send.c 			__process_changed_new_xattr, sctx);
sctx             4725 fs/btrfs/send.c 	ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
sctx             4726 fs/btrfs/send.c 			__process_changed_deleted_xattr, sctx);
sctx             4732 fs/btrfs/send.c static int process_all_new_xattrs(struct send_ctx *sctx)
sctx             4746 fs/btrfs/send.c 	root = sctx->send_root;
sctx             4748 fs/btrfs/send.c 	key.objectid = sctx->cmp_key->objectid;
sctx             4776 fs/btrfs/send.c 		ret = iterate_dir_item(root, path, __process_new_xattr, sctx);
sctx             4788 fs/btrfs/send.c static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
sctx             4790 fs/btrfs/send.c 	struct btrfs_root *root = sctx->send_root;
sctx             4801 fs/btrfs/send.c 	key.objectid = sctx->cur_ino;
sctx             4821 fs/btrfs/send.c 	memset(&sctx->ra, 0, sizeof(struct file_ra_state));
sctx             4822 fs/btrfs/send.c 	file_ra_state_init(&sctx->ra, inode->i_mapping);
sctx             4830 fs/btrfs/send.c 			page_cache_sync_readahead(inode->i_mapping, &sctx->ra,
sctx             4842 fs/btrfs/send.c 			page_cache_async_readahead(inode->i_mapping, &sctx->ra,
sctx             4858 fs/btrfs/send.c 		memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len);
sctx             4876 fs/btrfs/send.c static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
sctx             4878 fs/btrfs/send.c 	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
sctx             4889 fs/btrfs/send.c 	num_read = fill_read_buf(sctx, offset, len);
sctx             4896 fs/btrfs/send.c 	ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
sctx             4900 fs/btrfs/send.c 	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
sctx             4904 fs/btrfs/send.c 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
sctx             4905 fs/btrfs/send.c 	TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
sctx             4906 fs/btrfs/send.c 	TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, num_read);
sctx             4908 fs/btrfs/send.c 	ret = send_cmd(sctx);
sctx             4921 fs/btrfs/send.c static int send_clone(struct send_ctx *sctx,
sctx             4929 fs/btrfs/send.c 	btrfs_debug(sctx->send_root->fs_info,
sctx             4938 fs/btrfs/send.c 	ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE);
sctx             4942 fs/btrfs/send.c 	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
sctx             4946 fs/btrfs/send.c 	TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
sctx             4947 fs/btrfs/send.c 	TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len);
sctx             4948 fs/btrfs/send.c 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
sctx             4950 fs/btrfs/send.c 	if (clone_root->root == sctx->send_root) {
sctx             4951 fs/btrfs/send.c 		ret = get_inode_info(sctx->send_root, clone_root->ino, NULL,
sctx             4955 fs/btrfs/send.c 		ret = get_cur_path(sctx, clone_root->ino, gen, p);
sctx             4972 fs/btrfs/send.c 		TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
sctx             4975 fs/btrfs/send.c 		TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
sctx             4977 fs/btrfs/send.c 	TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
sctx             4979 fs/btrfs/send.c 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
sctx             4980 fs/btrfs/send.c 	TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET,
sctx             4983 fs/btrfs/send.c 	ret = send_cmd(sctx);
sctx             4994 fs/btrfs/send.c static int send_update_extent(struct send_ctx *sctx,
sctx             5004 fs/btrfs/send.c 	ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT);
sctx             5008 fs/btrfs/send.c 	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
sctx             5012 fs/btrfs/send.c 	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
sctx             5013 fs/btrfs/send.c 	TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
sctx             5014 fs/btrfs/send.c 	TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
sctx             5016 fs/btrfs/send.c 	ret = send_cmd(sctx);
sctx             5024 fs/btrfs/send.c static int send_hole(struct send_ctx *sctx, u64 end)
sctx             5027 fs/btrfs/send.c 	u64 offset = sctx->cur_inode_last_extent;
sctx             5037 fs/btrfs/send.c 	if (offset >= sctx->cur_inode_size)
sctx             5044 fs/btrfs/send.c 	end = min_t(u64, end, sctx->cur_inode_size);
sctx             5046 fs/btrfs/send.c 	if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
sctx             5047 fs/btrfs/send.c 		return send_update_extent(sctx, offset, end - offset);
sctx             5052 fs/btrfs/send.c 	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
sctx             5055 fs/btrfs/send.c 	memset(sctx->read_buf, 0, BTRFS_SEND_READ_SIZE);
sctx             5059 fs/btrfs/send.c 		ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
sctx             5062 fs/btrfs/send.c 		TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
sctx             5063 fs/btrfs/send.c 		TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
sctx             5064 fs/btrfs/send.c 		TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, len);
sctx             5065 fs/btrfs/send.c 		ret = send_cmd(sctx);
sctx             5070 fs/btrfs/send.c 	sctx->cur_inode_next_write_offset = offset;
sctx             5076 fs/btrfs/send.c static int send_extent_data(struct send_ctx *sctx,
sctx             5082 fs/btrfs/send.c 	if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
sctx             5083 fs/btrfs/send.c 		return send_update_extent(sctx, offset, len);
sctx             5091 fs/btrfs/send.c 		ret = send_write(sctx, offset + sent, size);
sctx             5101 fs/btrfs/send.c static int clone_range(struct send_ctx *sctx,
sctx             5129 fs/btrfs/send.c 	    len == sctx->send_root->fs_info->sectorsize)
sctx             5130 fs/btrfs/send.c 		return send_extent_data(sctx, offset, len);
sctx             5227 fs/btrfs/send.c 			ret = send_extent_data(sctx, offset, hole_len);
sctx             5289 fs/btrfs/send.c 			    offset + clone_len < sctx->cur_inode_size) {
sctx             5295 fs/btrfs/send.c 					ret = send_clone(sctx, offset, slen,
sctx             5300 fs/btrfs/send.c 				ret = send_extent_data(sctx, offset + slen,
sctx             5303 fs/btrfs/send.c 				ret = send_clone(sctx, offset, clone_len,
sctx             5307 fs/btrfs/send.c 			ret = send_extent_data(sctx, offset, clone_len);
sctx             5324 fs/btrfs/send.c 		ret = send_extent_data(sctx, offset, len);
sctx             5332 fs/btrfs/send.c static int send_write_or_clone(struct send_ctx *sctx,
sctx             5342 fs/btrfs/send.c 	u64 bs = sctx->send_root->fs_info->sb->s_blocksize;
sctx             5359 fs/btrfs/send.c 	if (offset >= sctx->cur_inode_size) {
sctx             5363 fs/btrfs/send.c 	if (offset + len > sctx->cur_inode_size)
sctx             5364 fs/btrfs/send.c 		len = sctx->cur_inode_size - offset;
sctx             5376 fs/btrfs/send.c 		ret = clone_range(sctx, clone_root, disk_byte, data_offset,
sctx             5379 fs/btrfs/send.c 		ret = send_extent_data(sctx, offset, len);
sctx             5381 fs/btrfs/send.c 	sctx->cur_inode_next_write_offset = offset + len;
sctx             5386 fs/btrfs/send.c static int is_extent_unchanged(struct send_ctx *sctx,
sctx             5451 fs/btrfs/send.c 	ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0);
sctx             5541 fs/btrfs/send.c 		ret = btrfs_next_item(sctx->parent_root, path);
sctx             5576 fs/btrfs/send.c static int get_last_extent(struct send_ctx *sctx, u64 offset)
sctx             5579 fs/btrfs/send.c 	struct btrfs_root *root = sctx->send_root;
sctx             5590 fs/btrfs/send.c 	sctx->cur_inode_last_extent = 0;
sctx             5592 fs/btrfs/send.c 	key.objectid = sctx->cur_ino;
sctx             5600 fs/btrfs/send.c 	if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY)
sctx             5609 fs/btrfs/send.c 				   sctx->send_root->fs_info->sectorsize);
sctx             5614 fs/btrfs/send.c 	sctx->cur_inode_last_extent = extent_end;
sctx             5620 fs/btrfs/send.c static int range_is_hole_in_parent(struct send_ctx *sctx,
sctx             5626 fs/btrfs/send.c 	struct btrfs_root *root = sctx->parent_root;
sctx             5634 fs/btrfs/send.c 	key.objectid = sctx->cur_ino;
sctx             5659 fs/btrfs/send.c 		if (key.objectid < sctx->cur_ino ||
sctx             5662 fs/btrfs/send.c 		if (key.objectid > sctx->cur_ino ||
sctx             5695 fs/btrfs/send.c static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
sctx             5703 fs/btrfs/send.c 	if (sctx->cur_ino != key->objectid || !need_send_hole(sctx))
sctx             5706 fs/btrfs/send.c 	if (sctx->cur_inode_last_extent == (u64)-1) {
sctx             5707 fs/btrfs/send.c 		ret = get_last_extent(sctx, key->offset - 1);
sctx             5718 fs/btrfs/send.c 				   sctx->send_root->fs_info->sectorsize);
sctx             5725 fs/btrfs/send.c 	    sctx->cur_inode_last_extent < key->offset) {
sctx             5733 fs/btrfs/send.c 		ret = get_last_extent(sctx, key->offset - 1);
sctx             5738 fs/btrfs/send.c 	if (sctx->cur_inode_last_extent < key->offset) {
sctx             5739 fs/btrfs/send.c 		ret = range_is_hole_in_parent(sctx,
sctx             5740 fs/btrfs/send.c 					      sctx->cur_inode_last_extent,
sctx             5745 fs/btrfs/send.c 			ret = send_hole(sctx, key->offset);
sctx             5749 fs/btrfs/send.c 	sctx->cur_inode_last_extent = extent_end;
sctx             5753 fs/btrfs/send.c static int process_extent(struct send_ctx *sctx,
sctx             5760 fs/btrfs/send.c 	if (S_ISLNK(sctx->cur_inode_mode))
sctx             5763 fs/btrfs/send.c 	if (sctx->parent_root && !sctx->cur_inode_new) {
sctx             5764 fs/btrfs/send.c 		ret = is_extent_unchanged(sctx, path, key);
sctx             5799 fs/btrfs/send.c 	ret = find_extent_clone(sctx, path, key->objectid, key->offset,
sctx             5800 fs/btrfs/send.c 			sctx->cur_inode_size, &found_clone);
sctx             5804 fs/btrfs/send.c 	ret = send_write_or_clone(sctx, path, key, found_clone);
sctx             5808 fs/btrfs/send.c 	ret = maybe_send_hole(sctx, path, key);
sctx             5813 fs/btrfs/send.c static int process_all_extents(struct send_ctx *sctx)
sctx             5823 fs/btrfs/send.c 	root = sctx->send_root;
sctx             5828 fs/btrfs/send.c 	key.objectid = sctx->cmp_key->objectid;
sctx             5858 fs/btrfs/send.c 		ret = process_extent(sctx, path, &found_key);
sctx             5870 fs/btrfs/send.c static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end,
sctx             5876 fs/btrfs/send.c 	if (sctx->cur_ino == 0)
sctx             5878 fs/btrfs/send.c 	if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
sctx             5879 fs/btrfs/send.c 	    sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY)
sctx             5881 fs/btrfs/send.c 	if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
sctx             5884 fs/btrfs/send.c 	ret = process_recorded_refs(sctx, pending_move);
sctx             5893 fs/btrfs/send.c static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
sctx             5908 fs/btrfs/send.c 	if (sctx->ignore_cur_inode)
sctx             5911 fs/btrfs/send.c 	ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move,
sctx             5929 fs/btrfs/send.c 		sctx->send_progress = sctx->cur_ino + 1;
sctx             5931 fs/btrfs/send.c 	if (sctx->cur_ino == 0 || sctx->cur_inode_deleted)
sctx             5933 fs/btrfs/send.c 	if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino)
sctx             5936 fs/btrfs/send.c 	ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL,
sctx             5941 fs/btrfs/send.c 	if (!sctx->parent_root || sctx->cur_inode_new) {
sctx             5943 fs/btrfs/send.c 		if (!S_ISLNK(sctx->cur_inode_mode))
sctx             5945 fs/btrfs/send.c 		if (sctx->cur_inode_next_write_offset == sctx->cur_inode_size)
sctx             5950 fs/btrfs/send.c 		ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
sctx             5958 fs/btrfs/send.c 		if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode)
sctx             5960 fs/btrfs/send.c 		if ((old_size == sctx->cur_inode_size) ||
sctx             5961 fs/btrfs/send.c 		    (sctx->cur_inode_size > old_size &&
sctx             5962 fs/btrfs/send.c 		     sctx->cur_inode_next_write_offset == sctx->cur_inode_size))
sctx             5966 fs/btrfs/send.c 	if (S_ISREG(sctx->cur_inode_mode)) {
sctx             5967 fs/btrfs/send.c 		if (need_send_hole(sctx)) {
sctx             5968 fs/btrfs/send.c 			if (sctx->cur_inode_last_extent == (u64)-1 ||
sctx             5969 fs/btrfs/send.c 			    sctx->cur_inode_last_extent <
sctx             5970 fs/btrfs/send.c 			    sctx->cur_inode_size) {
sctx             5971 fs/btrfs/send.c 				ret = get_last_extent(sctx, (u64)-1);
sctx             5975 fs/btrfs/send.c 			if (sctx->cur_inode_last_extent <
sctx             5976 fs/btrfs/send.c 			    sctx->cur_inode_size) {
sctx             5977 fs/btrfs/send.c 				ret = send_hole(sctx, sctx->cur_inode_size);
sctx             5983 fs/btrfs/send.c 			ret = send_truncate(sctx, sctx->cur_ino,
sctx             5984 fs/btrfs/send.c 					    sctx->cur_inode_gen,
sctx             5985 fs/btrfs/send.c 					    sctx->cur_inode_size);
sctx             5992 fs/btrfs/send.c 		ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen,
sctx             5998 fs/btrfs/send.c 		ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen,
sctx             6008 fs/btrfs/send.c 	if (!is_waiting_for_move(sctx, sctx->cur_ino)) {
sctx             6009 fs/btrfs/send.c 		ret = apply_children_dir_moves(sctx);
sctx             6019 fs/btrfs/send.c 		sctx->send_progress = sctx->cur_ino + 1;
sctx             6020 fs/btrfs/send.c 		ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
sctx             6031 fs/btrfs/send.c 	struct send_ctx *sctx;
sctx             6039 fs/btrfs/send.c 	return record_ref(ppctx->sctx->parent_root, dir, name, ppctx->sctx,
sctx             6047 fs/btrfs/send.c static int btrfs_unlink_all_paths(struct send_ctx *sctx)
sctx             6059 fs/btrfs/send.c 	key.objectid = sctx->cur_ino;
sctx             6062 fs/btrfs/send.c 	ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
sctx             6067 fs/btrfs/send.c 	ctx.sctx = sctx;
sctx             6074 fs/btrfs/send.c 			ret = btrfs_next_leaf(sctx->parent_root, path);
sctx             6083 fs/btrfs/send.c 		if (key.objectid != sctx->cur_ino)
sctx             6089 fs/btrfs/send.c 		ret = iterate_inode_ref(sctx->parent_root, path, &key, 1,
sctx             6101 fs/btrfs/send.c 		ret = send_unlink(sctx, ref->full_path);
sctx             6116 fs/btrfs/send.c static int changed_inode(struct send_ctx *sctx,
sctx             6120 fs/btrfs/send.c 	struct btrfs_key *key = sctx->cmp_key;
sctx             6126 fs/btrfs/send.c 	sctx->cur_ino = key->objectid;
sctx             6127 fs/btrfs/send.c 	sctx->cur_inode_new_gen = 0;
sctx             6128 fs/btrfs/send.c 	sctx->cur_inode_last_extent = (u64)-1;
sctx             6129 fs/btrfs/send.c 	sctx->cur_inode_next_write_offset = 0;
sctx             6130 fs/btrfs/send.c 	sctx->ignore_cur_inode = false;
sctx             6137 fs/btrfs/send.c 	sctx->send_progress = sctx->cur_ino;
sctx             6141 fs/btrfs/send.c 		left_ii = btrfs_item_ptr(sctx->left_path->nodes[0],
sctx             6142 fs/btrfs/send.c 				sctx->left_path->slots[0],
sctx             6144 fs/btrfs/send.c 		left_gen = btrfs_inode_generation(sctx->left_path->nodes[0],
sctx             6147 fs/btrfs/send.c 		right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
sctx             6148 fs/btrfs/send.c 				sctx->right_path->slots[0],
sctx             6150 fs/btrfs/send.c 		right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
sctx             6154 fs/btrfs/send.c 		right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
sctx             6155 fs/btrfs/send.c 				sctx->right_path->slots[0],
sctx             6158 fs/btrfs/send.c 		right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
sctx             6167 fs/btrfs/send.c 		    sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
sctx             6168 fs/btrfs/send.c 			sctx->cur_inode_new_gen = 1;
sctx             6189 fs/btrfs/send.c 		nlinks = btrfs_inode_nlink(sctx->left_path->nodes[0], left_ii);
sctx             6191 fs/btrfs/send.c 			sctx->ignore_cur_inode = true;
sctx             6193 fs/btrfs/send.c 				ret = btrfs_unlink_all_paths(sctx);
sctx             6199 fs/btrfs/send.c 		sctx->cur_inode_gen = left_gen;
sctx             6200 fs/btrfs/send.c 		sctx->cur_inode_new = 1;
sctx             6201 fs/btrfs/send.c 		sctx->cur_inode_deleted = 0;
sctx             6202 fs/btrfs/send.c 		sctx->cur_inode_size = btrfs_inode_size(
sctx             6203 fs/btrfs/send.c 				sctx->left_path->nodes[0], left_ii);
sctx             6204 fs/btrfs/send.c 		sctx->cur_inode_mode = btrfs_inode_mode(
sctx             6205 fs/btrfs/send.c 				sctx->left_path->nodes[0], left_ii);
sctx             6206 fs/btrfs/send.c 		sctx->cur_inode_rdev = btrfs_inode_rdev(
sctx             6207 fs/btrfs/send.c 				sctx->left_path->nodes[0], left_ii);
sctx             6208 fs/btrfs/send.c 		if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
sctx             6209 fs/btrfs/send.c 			ret = send_create_inode_if_needed(sctx);
sctx             6211 fs/btrfs/send.c 		sctx->cur_inode_gen = right_gen;
sctx             6212 fs/btrfs/send.c 		sctx->cur_inode_new = 0;
sctx             6213 fs/btrfs/send.c 		sctx->cur_inode_deleted = 1;
sctx             6214 fs/btrfs/send.c 		sctx->cur_inode_size = btrfs_inode_size(
sctx             6215 fs/btrfs/send.c 				sctx->right_path->nodes[0], right_ii);
sctx             6216 fs/btrfs/send.c 		sctx->cur_inode_mode = btrfs_inode_mode(
sctx             6217 fs/btrfs/send.c 				sctx->right_path->nodes[0], right_ii);
sctx             6226 fs/btrfs/send.c 		if (sctx->cur_inode_new_gen) {
sctx             6230 fs/btrfs/send.c 			sctx->cur_inode_gen = right_gen;
sctx             6231 fs/btrfs/send.c 			sctx->cur_inode_new = 0;
sctx             6232 fs/btrfs/send.c 			sctx->cur_inode_deleted = 1;
sctx             6233 fs/btrfs/send.c 			sctx->cur_inode_size = btrfs_inode_size(
sctx             6234 fs/btrfs/send.c 					sctx->right_path->nodes[0], right_ii);
sctx             6235 fs/btrfs/send.c 			sctx->cur_inode_mode = btrfs_inode_mode(
sctx             6236 fs/btrfs/send.c 					sctx->right_path->nodes[0], right_ii);
sctx             6237 fs/btrfs/send.c 			ret = process_all_refs(sctx,
sctx             6245 fs/btrfs/send.c 			sctx->cur_inode_gen = left_gen;
sctx             6246 fs/btrfs/send.c 			sctx->cur_inode_new = 1;
sctx             6247 fs/btrfs/send.c 			sctx->cur_inode_deleted = 0;
sctx             6248 fs/btrfs/send.c 			sctx->cur_inode_size = btrfs_inode_size(
sctx             6249 fs/btrfs/send.c 					sctx->left_path->nodes[0], left_ii);
sctx             6250 fs/btrfs/send.c 			sctx->cur_inode_mode = btrfs_inode_mode(
sctx             6251 fs/btrfs/send.c 					sctx->left_path->nodes[0], left_ii);
sctx             6252 fs/btrfs/send.c 			sctx->cur_inode_rdev = btrfs_inode_rdev(
sctx             6253 fs/btrfs/send.c 					sctx->left_path->nodes[0], left_ii);
sctx             6254 fs/btrfs/send.c 			ret = send_create_inode_if_needed(sctx);
sctx             6258 fs/btrfs/send.c 			ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW);
sctx             6265 fs/btrfs/send.c 			sctx->send_progress = sctx->cur_ino + 1;
sctx             6271 fs/btrfs/send.c 			ret = process_all_extents(sctx);
sctx             6274 fs/btrfs/send.c 			ret = process_all_new_xattrs(sctx);
sctx             6278 fs/btrfs/send.c 			sctx->cur_inode_gen = left_gen;
sctx             6279 fs/btrfs/send.c 			sctx->cur_inode_new = 0;
sctx             6280 fs/btrfs/send.c 			sctx->cur_inode_new_gen = 0;
sctx             6281 fs/btrfs/send.c 			sctx->cur_inode_deleted = 0;
sctx             6282 fs/btrfs/send.c 			sctx->cur_inode_size = btrfs_inode_size(
sctx             6283 fs/btrfs/send.c 					sctx->left_path->nodes[0], left_ii);
sctx             6284 fs/btrfs/send.c 			sctx->cur_inode_mode = btrfs_inode_mode(
sctx             6285 fs/btrfs/send.c 					sctx->left_path->nodes[0], left_ii);
sctx             6303 fs/btrfs/send.c static int changed_ref(struct send_ctx *sctx,
sctx             6308 fs/btrfs/send.c 	if (sctx->cur_ino != sctx->cmp_key->objectid) {
sctx             6309 fs/btrfs/send.c 		inconsistent_snapshot_error(sctx, result, "reference");
sctx             6313 fs/btrfs/send.c 	if (!sctx->cur_inode_new_gen &&
sctx             6314 fs/btrfs/send.c 	    sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
sctx             6316 fs/btrfs/send.c 			ret = record_new_ref(sctx);
sctx             6318 fs/btrfs/send.c 			ret = record_deleted_ref(sctx);
sctx             6320 fs/btrfs/send.c 			ret = record_changed_ref(sctx);
sctx             6331 fs/btrfs/send.c static int changed_xattr(struct send_ctx *sctx,
sctx             6336 fs/btrfs/send.c 	if (sctx->cur_ino != sctx->cmp_key->objectid) {
sctx             6337 fs/btrfs/send.c 		inconsistent_snapshot_error(sctx, result, "xattr");
sctx             6341 fs/btrfs/send.c 	if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
sctx             6343 fs/btrfs/send.c 			ret = process_new_xattr(sctx);
sctx             6345 fs/btrfs/send.c 			ret = process_deleted_xattr(sctx);
sctx             6347 fs/btrfs/send.c 			ret = process_changed_xattr(sctx);
sctx             6358 fs/btrfs/send.c static int changed_extent(struct send_ctx *sctx,
sctx             6376 fs/btrfs/send.c 	if (sctx->cur_ino != sctx->cmp_key->objectid)
sctx             6379 fs/btrfs/send.c 	if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
sctx             6381 fs/btrfs/send.c 			ret = process_extent(sctx, sctx->left_path,
sctx             6382 fs/btrfs/send.c 					sctx->cmp_key);
sctx             6388 fs/btrfs/send.c static int dir_changed(struct send_ctx *sctx, u64 dir)
sctx             6393 fs/btrfs/send.c 	ret = get_inode_info(sctx->send_root, dir, NULL, &new_gen, NULL, NULL,
sctx             6398 fs/btrfs/send.c 	ret = get_inode_info(sctx->parent_root, dir, NULL, &orig_gen, NULL,
sctx             6406 fs/btrfs/send.c static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path,
sctx             6422 fs/btrfs/send.c 		ret = dir_changed(sctx, dirid);
sctx             6437 fs/btrfs/send.c 		ret = dir_changed(sctx, dirid);
sctx             6457 fs/btrfs/send.c 	struct send_ctx *sctx = ctx;
sctx             6462 fs/btrfs/send.c 			ret = compare_refs(sctx, left_path, key);
sctx             6468 fs/btrfs/send.c 			return maybe_send_hole(sctx, left_path, key);
sctx             6476 fs/btrfs/send.c 	sctx->left_path = left_path;
sctx             6477 fs/btrfs/send.c 	sctx->right_path = right_path;
sctx             6478 fs/btrfs/send.c 	sctx->cmp_key = key;
sctx             6480 fs/btrfs/send.c 	ret = finish_inode_if_needed(sctx, 0);
sctx             6490 fs/btrfs/send.c 		ret = changed_inode(sctx, result);
sctx             6491 fs/btrfs/send.c 	} else if (!sctx->ignore_cur_inode) {
sctx             6494 fs/btrfs/send.c 			ret = changed_ref(sctx, result);
sctx             6496 fs/btrfs/send.c 			ret = changed_xattr(sctx, result);
sctx             6498 fs/btrfs/send.c 			ret = changed_extent(sctx, result);
sctx             6505 fs/btrfs/send.c static int full_send_tree(struct send_ctx *sctx)
sctx             6508 fs/btrfs/send.c 	struct btrfs_root *send_root = sctx->send_root;
sctx             6534 fs/btrfs/send.c 				 BTRFS_COMPARE_TREE_NEW, sctx);
sctx             6548 fs/btrfs/send.c 	ret = finish_inode_if_needed(sctx, 1);
sctx             6915 fs/btrfs/send.c static int send_subvol(struct send_ctx *sctx)
sctx             6919 fs/btrfs/send.c 	if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) {
sctx             6920 fs/btrfs/send.c 		ret = send_header(sctx);
sctx             6925 fs/btrfs/send.c 	ret = send_subvol_begin(sctx);
sctx             6929 fs/btrfs/send.c 	if (sctx->parent_root) {
sctx             6930 fs/btrfs/send.c 		ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root,
sctx             6931 fs/btrfs/send.c 				changed_cb, sctx);
sctx             6934 fs/btrfs/send.c 		ret = finish_inode_if_needed(sctx, 1);
sctx             6938 fs/btrfs/send.c 		ret = full_send_tree(sctx);
sctx             6944 fs/btrfs/send.c 	free_recorded_refs(sctx);
sctx             6961 fs/btrfs/send.c static int ensure_commit_roots_uptodate(struct send_ctx *sctx)
sctx             6967 fs/btrfs/send.c 	if (sctx->parent_root &&
sctx             6968 fs/btrfs/send.c 	    sctx->parent_root->node != sctx->parent_root->commit_root)
sctx             6971 fs/btrfs/send.c 	for (i = 0; i < sctx->clone_roots_cnt; i++)
sctx             6972 fs/btrfs/send.c 		if (sctx->clone_roots[i].root->node !=
sctx             6973 fs/btrfs/send.c 		    sctx->clone_roots[i].root->commit_root)
sctx             6984 fs/btrfs/send.c 		trans = btrfs_join_transaction(sctx->send_root);
sctx             7001 fs/btrfs/send.c static int flush_delalloc_roots(struct send_ctx *sctx)
sctx             7003 fs/btrfs/send.c 	struct btrfs_root *root = sctx->parent_root;
sctx             7014 fs/btrfs/send.c 	for (i = 0; i < sctx->clone_roots_cnt; i++) {
sctx             7015 fs/btrfs/send.c 		root = sctx->clone_roots[i].root;
sctx             7054 fs/btrfs/send.c 	struct send_ctx *sctx = NULL;
sctx             7110 fs/btrfs/send.c 	sctx = kzalloc(sizeof(struct send_ctx), GFP_KERNEL);
sctx             7111 fs/btrfs/send.c 	if (!sctx) {
sctx             7116 fs/btrfs/send.c 	INIT_LIST_HEAD(&sctx->new_refs);
sctx             7117 fs/btrfs/send.c 	INIT_LIST_HEAD(&sctx->deleted_refs);
sctx             7118 fs/btrfs/send.c 	INIT_RADIX_TREE(&sctx->name_cache, GFP_KERNEL);
sctx             7119 fs/btrfs/send.c 	INIT_LIST_HEAD(&sctx->name_cache_list);
sctx             7121 fs/btrfs/send.c 	sctx->flags = arg->flags;
sctx             7123 fs/btrfs/send.c 	sctx->send_filp = fget(arg->send_fd);
sctx             7124 fs/btrfs/send.c 	if (!sctx->send_filp) {
sctx             7129 fs/btrfs/send.c 	sctx->send_root = send_root;
sctx             7134 fs/btrfs/send.c 	if (btrfs_root_dead(sctx->send_root)) {
sctx             7139 fs/btrfs/send.c 	sctx->clone_roots_cnt = arg->clone_sources_count;
sctx             7141 fs/btrfs/send.c 	sctx->send_max_size = BTRFS_SEND_BUF_SIZE;
sctx             7142 fs/btrfs/send.c 	sctx->send_buf = kvmalloc(sctx->send_max_size, GFP_KERNEL);
sctx             7143 fs/btrfs/send.c 	if (!sctx->send_buf) {
sctx             7148 fs/btrfs/send.c 	sctx->read_buf = kvmalloc(BTRFS_SEND_READ_SIZE, GFP_KERNEL);
sctx             7149 fs/btrfs/send.c 	if (!sctx->read_buf) {
sctx             7154 fs/btrfs/send.c 	sctx->pending_dir_moves = RB_ROOT;
sctx             7155 fs/btrfs/send.c 	sctx->waiting_dir_moves = RB_ROOT;
sctx             7156 fs/btrfs/send.c 	sctx->orphan_dirs = RB_ROOT;
sctx             7160 fs/btrfs/send.c 	sctx->clone_roots = kzalloc(alloc_size, GFP_KERNEL);
sctx             7161 fs/btrfs/send.c 	if (!sctx->clone_roots) {
sctx             7214 fs/btrfs/send.c 			sctx->clone_roots[i].root = clone_root;
sctx             7228 fs/btrfs/send.c 		sctx->parent_root = btrfs_read_fs_root_no_name(fs_info, &key);
sctx             7229 fs/btrfs/send.c 		if (IS_ERR(sctx->parent_root)) {
sctx             7231 fs/btrfs/send.c 			ret = PTR_ERR(sctx->parent_root);
sctx             7235 fs/btrfs/send.c 		spin_lock(&sctx->parent_root->root_item_lock);
sctx             7236 fs/btrfs/send.c 		sctx->parent_root->send_in_progress++;
sctx             7237 fs/btrfs/send.c 		if (!btrfs_root_readonly(sctx->parent_root) ||
sctx             7238 fs/btrfs/send.c 				btrfs_root_dead(sctx->parent_root)) {
sctx             7239 fs/btrfs/send.c 			spin_unlock(&sctx->parent_root->root_item_lock);
sctx             7244 fs/btrfs/send.c 		if (sctx->parent_root->dedupe_in_progress) {
sctx             7245 fs/btrfs/send.c 			dedupe_in_progress_warn(sctx->parent_root);
sctx             7246 fs/btrfs/send.c 			spin_unlock(&sctx->parent_root->root_item_lock);
sctx             7251 fs/btrfs/send.c 		spin_unlock(&sctx->parent_root->root_item_lock);
sctx             7261 fs/btrfs/send.c 	sctx->clone_roots[sctx->clone_roots_cnt++].root = sctx->send_root;
sctx             7264 fs/btrfs/send.c 	sort(sctx->clone_roots, sctx->clone_roots_cnt,
sctx             7265 fs/btrfs/send.c 			sizeof(*sctx->clone_roots), __clone_root_cmp_sort,
sctx             7269 fs/btrfs/send.c 	ret = flush_delalloc_roots(sctx);
sctx             7273 fs/btrfs/send.c 	ret = ensure_commit_roots_uptodate(sctx);
sctx             7289 fs/btrfs/send.c 	ret = send_subvol(sctx);
sctx             7297 fs/btrfs/send.c 	if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) {
sctx             7298 fs/btrfs/send.c 		ret = begin_cmd(sctx, BTRFS_SEND_C_END);
sctx             7301 fs/btrfs/send.c 		ret = send_cmd(sctx);
sctx             7307 fs/btrfs/send.c 	WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->pending_dir_moves));
sctx             7308 fs/btrfs/send.c 	while (sctx && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)) {
sctx             7312 fs/btrfs/send.c 		n = rb_first(&sctx->pending_dir_moves);
sctx             7319 fs/btrfs/send.c 			free_pending_move(sctx, pm2);
sctx             7321 fs/btrfs/send.c 		free_pending_move(sctx, pm);
sctx             7324 fs/btrfs/send.c 	WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves));
sctx             7325 fs/btrfs/send.c 	while (sctx && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) {
sctx             7329 fs/btrfs/send.c 		n = rb_first(&sctx->waiting_dir_moves);
sctx             7331 fs/btrfs/send.c 		rb_erase(&dm->node, &sctx->waiting_dir_moves);
sctx             7335 fs/btrfs/send.c 	WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->orphan_dirs));
sctx             7336 fs/btrfs/send.c 	while (sctx && !RB_EMPTY_ROOT(&sctx->orphan_dirs)) {
sctx             7340 fs/btrfs/send.c 		n = rb_first(&sctx->orphan_dirs);
sctx             7342 fs/btrfs/send.c 		free_orphan_dir_info(sctx, odi);
sctx             7346 fs/btrfs/send.c 		for (i = 0; i < sctx->clone_roots_cnt; i++)
sctx             7348 fs/btrfs/send.c 					sctx->clone_roots[i].root);
sctx             7350 fs/btrfs/send.c 		for (i = 0; sctx && i < clone_sources_to_rollback; i++)
sctx             7352 fs/btrfs/send.c 					sctx->clone_roots[i].root);
sctx             7356 fs/btrfs/send.c 	if (sctx && !IS_ERR_OR_NULL(sctx->parent_root))
sctx             7357 fs/btrfs/send.c 		btrfs_root_dec_send_in_progress(sctx->parent_root);
sctx             7361 fs/btrfs/send.c 	if (sctx) {
sctx             7362 fs/btrfs/send.c 		if (sctx->send_filp)
sctx             7363 fs/btrfs/send.c 			fput(sctx->send_filp);
sctx             7365 fs/btrfs/send.c 		kvfree(sctx->clone_roots);
sctx             7366 fs/btrfs/send.c 		kvfree(sctx->send_buf);
sctx             7367 fs/btrfs/send.c 		kvfree(sctx->read_buf);
sctx             7369 fs/btrfs/send.c 		name_cache_free(sctx);
sctx             7371 fs/btrfs/send.c 		kfree(sctx);
sctx              126 include/crypto/sha.h static inline int sha256_init(struct sha256_state *sctx)
sctx              128 include/crypto/sha.h 	sctx->state[0] = SHA256_H0;
sctx              129 include/crypto/sha.h 	sctx->state[1] = SHA256_H1;
sctx              130 include/crypto/sha.h 	sctx->state[2] = SHA256_H2;
sctx              131 include/crypto/sha.h 	sctx->state[3] = SHA256_H3;
sctx              132 include/crypto/sha.h 	sctx->state[4] = SHA256_H4;
sctx              133 include/crypto/sha.h 	sctx->state[5] = SHA256_H5;
sctx              134 include/crypto/sha.h 	sctx->state[6] = SHA256_H6;
sctx              135 include/crypto/sha.h 	sctx->state[7] = SHA256_H7;
sctx              136 include/crypto/sha.h 	sctx->count = 0;
sctx              140 include/crypto/sha.h extern int sha256_update(struct sha256_state *sctx, const u8 *input,
sctx              142 include/crypto/sha.h extern int sha256_final(struct sha256_state *sctx, u8 *hash);
sctx              144 include/crypto/sha.h static inline int sha224_init(struct sha256_state *sctx)
sctx              146 include/crypto/sha.h 	sctx->state[0] = SHA224_H0;
sctx              147 include/crypto/sha.h 	sctx->state[1] = SHA224_H1;
sctx              148 include/crypto/sha.h 	sctx->state[2] = SHA224_H2;
sctx              149 include/crypto/sha.h 	sctx->state[3] = SHA224_H3;
sctx              150 include/crypto/sha.h 	sctx->state[4] = SHA224_H4;
sctx              151 include/crypto/sha.h 	sctx->state[5] = SHA224_H5;
sctx              152 include/crypto/sha.h 	sctx->state[6] = SHA224_H6;
sctx              153 include/crypto/sha.h 	sctx->state[7] = SHA224_H7;
sctx              154 include/crypto/sha.h 	sctx->count = 0;
sctx              158 include/crypto/sha.h extern int sha224_update(struct sha256_state *sctx, const u8 *input,
sctx              160 include/crypto/sha.h extern int sha224_final(struct sha256_state *sctx, u8 *hash);
sctx               22 include/crypto/sha1_base.h 	struct sha1_state *sctx = shash_desc_ctx(desc);
sctx               24 include/crypto/sha1_base.h 	sctx->state[0] = SHA1_H0;
sctx               25 include/crypto/sha1_base.h 	sctx->state[1] = SHA1_H1;
sctx               26 include/crypto/sha1_base.h 	sctx->state[2] = SHA1_H2;
sctx               27 include/crypto/sha1_base.h 	sctx->state[3] = SHA1_H3;
sctx               28 include/crypto/sha1_base.h 	sctx->state[4] = SHA1_H4;
sctx               29 include/crypto/sha1_base.h 	sctx->count = 0;
sctx               39 include/crypto/sha1_base.h 	struct sha1_state *sctx = shash_desc_ctx(desc);
sctx               40 include/crypto/sha1_base.h 	unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
sctx               42 include/crypto/sha1_base.h 	sctx->count += len;
sctx               50 include/crypto/sha1_base.h 			memcpy(sctx->buffer + partial, data, p);
sctx               54 include/crypto/sha1_base.h 			block_fn(sctx, sctx->buffer, 1);
sctx               61 include/crypto/sha1_base.h 			block_fn(sctx, data, blocks);
sctx               67 include/crypto/sha1_base.h 		memcpy(sctx->buffer + partial, data, len);
sctx               76 include/crypto/sha1_base.h 	struct sha1_state *sctx = shash_desc_ctx(desc);
sctx               77 include/crypto/sha1_base.h 	__be64 *bits = (__be64 *)(sctx->buffer + bit_offset);
sctx               78 include/crypto/sha1_base.h 	unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
sctx               80 include/crypto/sha1_base.h 	sctx->buffer[partial++] = 0x80;
sctx               82 include/crypto/sha1_base.h 		memset(sctx->buffer + partial, 0x0, SHA1_BLOCK_SIZE - partial);
sctx               85 include/crypto/sha1_base.h 		block_fn(sctx, sctx->buffer, 1);
sctx               88 include/crypto/sha1_base.h 	memset(sctx->buffer + partial, 0x0, bit_offset - partial);
sctx               89 include/crypto/sha1_base.h 	*bits = cpu_to_be64(sctx->count << 3);
sctx               90 include/crypto/sha1_base.h 	block_fn(sctx, sctx->buffer, 1);
sctx               97 include/crypto/sha1_base.h 	struct sha1_state *sctx = shash_desc_ctx(desc);
sctx              102 include/crypto/sha1_base.h 		put_unaligned_be32(sctx->state[i], digest++);
sctx              104 include/crypto/sha1_base.h 	*sctx = (struct sha1_state){};
sctx               23 include/crypto/sha256_base.h 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx               25 include/crypto/sha256_base.h 	return sha224_init(sctx);
sctx               30 include/crypto/sha256_base.h 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx               32 include/crypto/sha256_base.h 	return sha256_init(sctx);
sctx               40 include/crypto/sha256_base.h 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx               41 include/crypto/sha256_base.h 	unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
sctx               43 include/crypto/sha256_base.h 	sctx->count += len;
sctx               51 include/crypto/sha256_base.h 			memcpy(sctx->buf + partial, data, p);
sctx               55 include/crypto/sha256_base.h 			block_fn(sctx, sctx->buf, 1);
sctx               62 include/crypto/sha256_base.h 			block_fn(sctx, data, blocks);
sctx               68 include/crypto/sha256_base.h 		memcpy(sctx->buf + partial, data, len);
sctx               77 include/crypto/sha256_base.h 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx               78 include/crypto/sha256_base.h 	__be64 *bits = (__be64 *)(sctx->buf + bit_offset);
sctx               79 include/crypto/sha256_base.h 	unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
sctx               81 include/crypto/sha256_base.h 	sctx->buf[partial++] = 0x80;
sctx               83 include/crypto/sha256_base.h 		memset(sctx->buf + partial, 0x0, SHA256_BLOCK_SIZE - partial);
sctx               86 include/crypto/sha256_base.h 		block_fn(sctx, sctx->buf, 1);
sctx               89 include/crypto/sha256_base.h 	memset(sctx->buf + partial, 0x0, bit_offset - partial);
sctx               90 include/crypto/sha256_base.h 	*bits = cpu_to_be64(sctx->count << 3);
sctx               91 include/crypto/sha256_base.h 	block_fn(sctx, sctx->buf, 1);
sctx               99 include/crypto/sha256_base.h 	struct sha256_state *sctx = shash_desc_ctx(desc);
sctx              104 include/crypto/sha256_base.h 		put_unaligned_be32(sctx->state[i], digest++);
sctx              106 include/crypto/sha256_base.h 	*sctx = (struct sha256_state){};
sctx               23 include/crypto/sha512_base.h 	struct sha512_state *sctx = shash_desc_ctx(desc);
sctx               25 include/crypto/sha512_base.h 	sctx->state[0] = SHA384_H0;
sctx               26 include/crypto/sha512_base.h 	sctx->state[1] = SHA384_H1;
sctx               27 include/crypto/sha512_base.h 	sctx->state[2] = SHA384_H2;
sctx               28 include/crypto/sha512_base.h 	sctx->state[3] = SHA384_H3;
sctx               29 include/crypto/sha512_base.h 	sctx->state[4] = SHA384_H4;
sctx               30 include/crypto/sha512_base.h 	sctx->state[5] = SHA384_H5;
sctx               31 include/crypto/sha512_base.h 	sctx->state[6] = SHA384_H6;
sctx               32 include/crypto/sha512_base.h 	sctx->state[7] = SHA384_H7;
sctx               33 include/crypto/sha512_base.h 	sctx->count[0] = sctx->count[1] = 0;
sctx               40 include/crypto/sha512_base.h 	struct sha512_state *sctx = shash_desc_ctx(desc);
sctx               42 include/crypto/sha512_base.h 	sctx->state[0] = SHA512_H0;
sctx               43 include/crypto/sha512_base.h 	sctx->state[1] = SHA512_H1;
sctx               44 include/crypto/sha512_base.h 	sctx->state[2] = SHA512_H2;
sctx               45 include/crypto/sha512_base.h 	sctx->state[3] = SHA512_H3;
sctx               46 include/crypto/sha512_base.h 	sctx->state[4] = SHA512_H4;
sctx               47 include/crypto/sha512_base.h 	sctx->state[5] = SHA512_H5;
sctx               48 include/crypto/sha512_base.h 	sctx->state[6] = SHA512_H6;
sctx               49 include/crypto/sha512_base.h 	sctx->state[7] = SHA512_H7;
sctx               50 include/crypto/sha512_base.h 	sctx->count[0] = sctx->count[1] = 0;
sctx               60 include/crypto/sha512_base.h 	struct sha512_state *sctx = shash_desc_ctx(desc);
sctx               61 include/crypto/sha512_base.h 	unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
sctx               63 include/crypto/sha512_base.h 	sctx->count[0] += len;
sctx               64 include/crypto/sha512_base.h 	if (sctx->count[0] < len)
sctx               65 include/crypto/sha512_base.h 		sctx->count[1]++;
sctx               73 include/crypto/sha512_base.h 			memcpy(sctx->buf + partial, data, p);
sctx               77 include/crypto/sha512_base.h 			block_fn(sctx, sctx->buf, 1);
sctx               84 include/crypto/sha512_base.h 			block_fn(sctx, data, blocks);
sctx               90 include/crypto/sha512_base.h 		memcpy(sctx->buf + partial, data, len);
sctx               99 include/crypto/sha512_base.h 	struct sha512_state *sctx = shash_desc_ctx(desc);
sctx              100 include/crypto/sha512_base.h 	__be64 *bits = (__be64 *)(sctx->buf + bit_offset);
sctx              101 include/crypto/sha512_base.h 	unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
sctx              103 include/crypto/sha512_base.h 	sctx->buf[partial++] = 0x80;
sctx              105 include/crypto/sha512_base.h 		memset(sctx->buf + partial, 0x0, SHA512_BLOCK_SIZE - partial);
sctx              108 include/crypto/sha512_base.h 		block_fn(sctx, sctx->buf, 1);
sctx              111 include/crypto/sha512_base.h 	memset(sctx->buf + partial, 0x0, bit_offset - partial);
sctx              112 include/crypto/sha512_base.h 	bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
sctx              113 include/crypto/sha512_base.h 	bits[1] = cpu_to_be64(sctx->count[0] << 3);
sctx              114 include/crypto/sha512_base.h 	block_fn(sctx, sctx->buf, 1);
sctx              122 include/crypto/sha512_base.h 	struct sha512_state *sctx = shash_desc_ctx(desc);
sctx              127 include/crypto/sha512_base.h 		put_unaligned_be64(sctx->state[i], digest++);
sctx              129 include/crypto/sha512_base.h 	*sctx = (struct sha512_state){};
sctx               22 include/crypto/sm3_base.h 	struct sm3_state *sctx = shash_desc_ctx(desc);
sctx               24 include/crypto/sm3_base.h 	sctx->state[0] = SM3_IVA;
sctx               25 include/crypto/sm3_base.h 	sctx->state[1] = SM3_IVB;
sctx               26 include/crypto/sm3_base.h 	sctx->state[2] = SM3_IVC;
sctx               27 include/crypto/sm3_base.h 	sctx->state[3] = SM3_IVD;
sctx               28 include/crypto/sm3_base.h 	sctx->state[4] = SM3_IVE;
sctx               29 include/crypto/sm3_base.h 	sctx->state[5] = SM3_IVF;
sctx               30 include/crypto/sm3_base.h 	sctx->state[6] = SM3_IVG;
sctx               31 include/crypto/sm3_base.h 	sctx->state[7] = SM3_IVH;
sctx               32 include/crypto/sm3_base.h 	sctx->count = 0;
sctx               42 include/crypto/sm3_base.h 	struct sm3_state *sctx = shash_desc_ctx(desc);
sctx               43 include/crypto/sm3_base.h 	unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
sctx               45 include/crypto/sm3_base.h 	sctx->count += len;
sctx               53 include/crypto/sm3_base.h 			memcpy(sctx->buffer + partial, data, p);
sctx               57 include/crypto/sm3_base.h 			block_fn(sctx, sctx->buffer, 1);
sctx               64 include/crypto/sm3_base.h 			block_fn(sctx, data, blocks);
sctx               70 include/crypto/sm3_base.h 		memcpy(sctx->buffer + partial, data, len);
sctx               79 include/crypto/sm3_base.h 	struct sm3_state *sctx = shash_desc_ctx(desc);
sctx               80 include/crypto/sm3_base.h 	__be64 *bits = (__be64 *)(sctx->buffer + bit_offset);
sctx               81 include/crypto/sm3_base.h 	unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
sctx               83 include/crypto/sm3_base.h 	sctx->buffer[partial++] = 0x80;
sctx               85 include/crypto/sm3_base.h 		memset(sctx->buffer + partial, 0x0, SM3_BLOCK_SIZE - partial);
sctx               88 include/crypto/sm3_base.h 		block_fn(sctx, sctx->buffer, 1);
sctx               91 include/crypto/sm3_base.h 	memset(sctx->buffer + partial, 0x0, bit_offset - partial);
sctx               92 include/crypto/sm3_base.h 	*bits = cpu_to_be64(sctx->count << 3);
sctx               93 include/crypto/sm3_base.h 	block_fn(sctx, sctx->buffer, 1);
sctx              100 include/crypto/sm3_base.h 	struct sm3_state *sctx = shash_desc_ctx(desc);
sctx              105 include/crypto/sm3_base.h 		put_unaligned_be32(sctx->state[i], digest++);
sctx              107 include/crypto/sm3_base.h 	*sctx = (struct sm3_state){};
sctx              209 lib/crypto/sha256.c int sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
sctx              214 lib/crypto/sha256.c 	partial = sctx->count & 0x3f;
sctx              215 lib/crypto/sha256.c 	sctx->count += len;
sctx              222 lib/crypto/sha256.c 			memcpy(sctx->buf + partial, data, done + 64);
sctx              223 lib/crypto/sha256.c 			src = sctx->buf;
sctx              227 lib/crypto/sha256.c 			sha256_transform(sctx->state, src);
sctx              234 lib/crypto/sha256.c 	memcpy(sctx->buf + partial, src, len - done);
sctx              240 lib/crypto/sha256.c int sha224_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
sctx              242 lib/crypto/sha256.c 	return sha256_update(sctx, data, len);
sctx              246 lib/crypto/sha256.c static int __sha256_final(struct sha256_state *sctx, u8 *out, int digest_words)
sctx              255 lib/crypto/sha256.c 	bits = cpu_to_be64(sctx->count << 3);
sctx              258 lib/crypto/sha256.c 	index = sctx->count & 0x3f;
sctx              260 lib/crypto/sha256.c 	sha256_update(sctx, padding, pad_len);
sctx              263 lib/crypto/sha256.c 	sha256_update(sctx, (const u8 *)&bits, sizeof(bits));
sctx              267 lib/crypto/sha256.c 		put_unaligned_be32(sctx->state[i], &dst[i]);
sctx              270 lib/crypto/sha256.c 	memset(sctx, 0, sizeof(*sctx));
sctx              275 lib/crypto/sha256.c int sha256_final(struct sha256_state *sctx, u8 *out)
sctx              277 lib/crypto/sha256.c 	return __sha256_final(sctx, out, 8);
sctx              281 lib/crypto/sha256.c int sha224_final(struct sha256_state *sctx, u8 *out)
sctx              283 lib/crypto/sha256.c 	return __sha256_final(sctx, out, 7);