root/crypto/xts.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. setkey
  2. xor_tweak
  3. xor_tweak_pre
  4. xor_tweak_post
  5. cts_done
  6. cts_final
  7. encrypt_done
  8. decrypt_done
  9. init_crypt
  10. encrypt
  11. decrypt
  12. init_tfm
  13. exit_tfm
  14. free_inst
  15. create
  16. crypto_module_init
  17. crypto_module_exit

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /* XTS: as defined in IEEE1619/D16
   3  *      http://grouper.ieee.org/groups/1619/email/pdf00086.pdf
   4  *
   5  * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org>
   6  *
   7  * Based on ecb.c
   8  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
   9  */
  10 #include <crypto/internal/skcipher.h>
  11 #include <crypto/scatterwalk.h>
  12 #include <linux/err.h>
  13 #include <linux/init.h>
  14 #include <linux/kernel.h>
  15 #include <linux/module.h>
  16 #include <linux/scatterlist.h>
  17 #include <linux/slab.h>
  18 
  19 #include <crypto/xts.h>
  20 #include <crypto/b128ops.h>
  21 #include <crypto/gf128mul.h>
  22 
  23 struct priv {
  24         struct crypto_skcipher *child;
  25         struct crypto_cipher *tweak;
  26 };
  27 
  28 struct xts_instance_ctx {
  29         struct crypto_skcipher_spawn spawn;
  30         char name[CRYPTO_MAX_ALG_NAME];
  31 };
  32 
  33 struct rctx {
  34         le128 t;
  35         struct scatterlist *tail;
  36         struct scatterlist sg[2];
  37         struct skcipher_request subreq;
  38 };
  39 
  40 static int setkey(struct crypto_skcipher *parent, const u8 *key,
  41                   unsigned int keylen)
  42 {
  43         struct priv *ctx = crypto_skcipher_ctx(parent);
  44         struct crypto_skcipher *child;
  45         struct crypto_cipher *tweak;
  46         int err;
  47 
  48         err = xts_verify_key(parent, key, keylen);
  49         if (err)
  50                 return err;
  51 
  52         keylen /= 2;
  53 
  54         /* we need two cipher instances: one to compute the initial 'tweak'
  55          * by encrypting the IV (usually the 'plain' iv) and the other
  56          * one to encrypt and decrypt the data */
  57 
  58         /* tweak cipher, uses Key2 i.e. the second half of *key */
  59         tweak = ctx->tweak;
  60         crypto_cipher_clear_flags(tweak, CRYPTO_TFM_REQ_MASK);
  61         crypto_cipher_set_flags(tweak, crypto_skcipher_get_flags(parent) &
  62                                        CRYPTO_TFM_REQ_MASK);
  63         err = crypto_cipher_setkey(tweak, key + keylen, keylen);
  64         crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(tweak) &
  65                                           CRYPTO_TFM_RES_MASK);
  66         if (err)
  67                 return err;
  68 
  69         /* data cipher, uses Key1 i.e. the first half of *key */
  70         child = ctx->child;
  71         crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
  72         crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
  73                                          CRYPTO_TFM_REQ_MASK);
  74         err = crypto_skcipher_setkey(child, key, keylen);
  75         crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
  76                                           CRYPTO_TFM_RES_MASK);
  77 
  78         return err;
  79 }
  80 
  81 /*
  82  * We compute the tweak masks twice (both before and after the ECB encryption or
  83  * decryption) to avoid having to allocate a temporary buffer and/or make
  84  * mutliple calls to the 'ecb(..)' instance, which usually would be slower than
  85  * just doing the gf128mul_x_ble() calls again.
  86  */
  87 static int xor_tweak(struct skcipher_request *req, bool second_pass, bool enc)
  88 {
  89         struct rctx *rctx = skcipher_request_ctx(req);
  90         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  91         const bool cts = (req->cryptlen % XTS_BLOCK_SIZE);
  92         const int bs = XTS_BLOCK_SIZE;
  93         struct skcipher_walk w;
  94         le128 t = rctx->t;
  95         int err;
  96 
  97         if (second_pass) {
  98                 req = &rctx->subreq;
  99                 /* set to our TFM to enforce correct alignment: */
 100                 skcipher_request_set_tfm(req, tfm);
 101         }
 102         err = skcipher_walk_virt(&w, req, false);
 103 
 104         while (w.nbytes) {
 105                 unsigned int avail = w.nbytes;
 106                 le128 *wsrc;
 107                 le128 *wdst;
 108 
 109                 wsrc = w.src.virt.addr;
 110                 wdst = w.dst.virt.addr;
 111 
 112                 do {
 113                         if (unlikely(cts) &&
 114                             w.total - w.nbytes + avail < 2 * XTS_BLOCK_SIZE) {
 115                                 if (!enc) {
 116                                         if (second_pass)
 117                                                 rctx->t = t;
 118                                         gf128mul_x_ble(&t, &t);
 119                                 }
 120                                 le128_xor(wdst, &t, wsrc);
 121                                 if (enc && second_pass)
 122                                         gf128mul_x_ble(&rctx->t, &t);
 123                                 skcipher_walk_done(&w, avail - bs);
 124                                 return 0;
 125                         }
 126 
 127                         le128_xor(wdst++, &t, wsrc++);
 128                         gf128mul_x_ble(&t, &t);
 129                 } while ((avail -= bs) >= bs);
 130 
 131                 err = skcipher_walk_done(&w, avail);
 132         }
 133 
 134         return err;
 135 }
 136 
 137 static int xor_tweak_pre(struct skcipher_request *req, bool enc)
 138 {
 139         return xor_tweak(req, false, enc);
 140 }
 141 
 142 static int xor_tweak_post(struct skcipher_request *req, bool enc)
 143 {
 144         return xor_tweak(req, true, enc);
 145 }
 146 
 147 static void cts_done(struct crypto_async_request *areq, int err)
 148 {
 149         struct skcipher_request *req = areq->data;
 150         le128 b;
 151 
 152         if (!err) {
 153                 struct rctx *rctx = skcipher_request_ctx(req);
 154 
 155                 scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
 156                 le128_xor(&b, &rctx->t, &b);
 157                 scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 1);
 158         }
 159 
 160         skcipher_request_complete(req, err);
 161 }
 162 
 163 static int cts_final(struct skcipher_request *req,
 164                      int (*crypt)(struct skcipher_request *req))
 165 {
 166         struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
 167         int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1);
 168         struct rctx *rctx = skcipher_request_ctx(req);
 169         struct skcipher_request *subreq = &rctx->subreq;
 170         int tail = req->cryptlen % XTS_BLOCK_SIZE;
 171         le128 b[2];
 172         int err;
 173 
 174         rctx->tail = scatterwalk_ffwd(rctx->sg, req->dst,
 175                                       offset - XTS_BLOCK_SIZE);
 176 
 177         scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
 178         memcpy(b + 1, b, tail);
 179         scatterwalk_map_and_copy(b, req->src, offset, tail, 0);
 180 
 181         le128_xor(b, &rctx->t, b);
 182 
 183         scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE + tail, 1);
 184 
 185         skcipher_request_set_tfm(subreq, ctx->child);
 186         skcipher_request_set_callback(subreq, req->base.flags, cts_done, req);
 187         skcipher_request_set_crypt(subreq, rctx->tail, rctx->tail,
 188                                    XTS_BLOCK_SIZE, NULL);
 189 
 190         err = crypt(subreq);
 191         if (err)
 192                 return err;
 193 
 194         scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
 195         le128_xor(b, &rctx->t, b);
 196         scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 1);
 197 
 198         return 0;
 199 }
 200 
 201 static void encrypt_done(struct crypto_async_request *areq, int err)
 202 {
 203         struct skcipher_request *req = areq->data;
 204 
 205         if (!err) {
 206                 struct rctx *rctx = skcipher_request_ctx(req);
 207 
 208                 rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 209                 err = xor_tweak_post(req, true);
 210 
 211                 if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
 212                         err = cts_final(req, crypto_skcipher_encrypt);
 213                         if (err == -EINPROGRESS)
 214                                 return;
 215                 }
 216         }
 217 
 218         skcipher_request_complete(req, err);
 219 }
 220 
 221 static void decrypt_done(struct crypto_async_request *areq, int err)
 222 {
 223         struct skcipher_request *req = areq->data;
 224 
 225         if (!err) {
 226                 struct rctx *rctx = skcipher_request_ctx(req);
 227 
 228                 rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 229                 err = xor_tweak_post(req, false);
 230 
 231                 if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
 232                         err = cts_final(req, crypto_skcipher_decrypt);
 233                         if (err == -EINPROGRESS)
 234                                 return;
 235                 }
 236         }
 237 
 238         skcipher_request_complete(req, err);
 239 }
 240 
 241 static int init_crypt(struct skcipher_request *req, crypto_completion_t compl)
 242 {
 243         struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
 244         struct rctx *rctx = skcipher_request_ctx(req);
 245         struct skcipher_request *subreq = &rctx->subreq;
 246 
 247         if (req->cryptlen < XTS_BLOCK_SIZE)
 248                 return -EINVAL;
 249 
 250         skcipher_request_set_tfm(subreq, ctx->child);
 251         skcipher_request_set_callback(subreq, req->base.flags, compl, req);
 252         skcipher_request_set_crypt(subreq, req->dst, req->dst,
 253                                    req->cryptlen & ~(XTS_BLOCK_SIZE - 1), NULL);
 254 
 255         /* calculate first value of T */
 256         crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);
 257 
 258         return 0;
 259 }
 260 
 261 static int encrypt(struct skcipher_request *req)
 262 {
 263         struct rctx *rctx = skcipher_request_ctx(req);
 264         struct skcipher_request *subreq = &rctx->subreq;
 265         int err;
 266 
 267         err = init_crypt(req, encrypt_done) ?:
 268               xor_tweak_pre(req, true) ?:
 269               crypto_skcipher_encrypt(subreq) ?:
 270               xor_tweak_post(req, true);
 271 
 272         if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
 273                 return err;
 274 
 275         return cts_final(req, crypto_skcipher_encrypt);
 276 }
 277 
 278 static int decrypt(struct skcipher_request *req)
 279 {
 280         struct rctx *rctx = skcipher_request_ctx(req);
 281         struct skcipher_request *subreq = &rctx->subreq;
 282         int err;
 283 
 284         err = init_crypt(req, decrypt_done) ?:
 285               xor_tweak_pre(req, false) ?:
 286               crypto_skcipher_decrypt(subreq) ?:
 287               xor_tweak_post(req, false);
 288 
 289         if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
 290                 return err;
 291 
 292         return cts_final(req, crypto_skcipher_decrypt);
 293 }
 294 
 295 static int init_tfm(struct crypto_skcipher *tfm)
 296 {
 297         struct skcipher_instance *inst = skcipher_alg_instance(tfm);
 298         struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
 299         struct priv *ctx = crypto_skcipher_ctx(tfm);
 300         struct crypto_skcipher *child;
 301         struct crypto_cipher *tweak;
 302 
 303         child = crypto_spawn_skcipher(&ictx->spawn);
 304         if (IS_ERR(child))
 305                 return PTR_ERR(child);
 306 
 307         ctx->child = child;
 308 
 309         tweak = crypto_alloc_cipher(ictx->name, 0, 0);
 310         if (IS_ERR(tweak)) {
 311                 crypto_free_skcipher(ctx->child);
 312                 return PTR_ERR(tweak);
 313         }
 314 
 315         ctx->tweak = tweak;
 316 
 317         crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
 318                                          sizeof(struct rctx));
 319 
 320         return 0;
 321 }
 322 
 323 static void exit_tfm(struct crypto_skcipher *tfm)
 324 {
 325         struct priv *ctx = crypto_skcipher_ctx(tfm);
 326 
 327         crypto_free_skcipher(ctx->child);
 328         crypto_free_cipher(ctx->tweak);
 329 }
 330 
 331 static void free_inst(struct skcipher_instance *inst)
 332 {
 333         crypto_drop_skcipher(skcipher_instance_ctx(inst));
 334         kfree(inst);
 335 }
 336 
 337 static int create(struct crypto_template *tmpl, struct rtattr **tb)
 338 {
 339         struct skcipher_instance *inst;
 340         struct crypto_attr_type *algt;
 341         struct xts_instance_ctx *ctx;
 342         struct skcipher_alg *alg;
 343         const char *cipher_name;
 344         u32 mask;
 345         int err;
 346 
 347         algt = crypto_get_attr_type(tb);
 348         if (IS_ERR(algt))
 349                 return PTR_ERR(algt);
 350 
 351         if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
 352                 return -EINVAL;
 353 
 354         cipher_name = crypto_attr_alg_name(tb[1]);
 355         if (IS_ERR(cipher_name))
 356                 return PTR_ERR(cipher_name);
 357 
 358         inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
 359         if (!inst)
 360                 return -ENOMEM;
 361 
 362         ctx = skcipher_instance_ctx(inst);
 363 
 364         crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
 365 
 366         mask = crypto_requires_off(algt->type, algt->mask,
 367                                    CRYPTO_ALG_NEED_FALLBACK |
 368                                    CRYPTO_ALG_ASYNC);
 369 
 370         err = crypto_grab_skcipher(&ctx->spawn, cipher_name, 0, mask);
 371         if (err == -ENOENT) {
 372                 err = -ENAMETOOLONG;
 373                 if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
 374                              cipher_name) >= CRYPTO_MAX_ALG_NAME)
 375                         goto err_free_inst;
 376 
 377                 err = crypto_grab_skcipher(&ctx->spawn, ctx->name, 0, mask);
 378         }
 379 
 380         if (err)
 381                 goto err_free_inst;
 382 
 383         alg = crypto_skcipher_spawn_alg(&ctx->spawn);
 384 
 385         err = -EINVAL;
 386         if (alg->base.cra_blocksize != XTS_BLOCK_SIZE)
 387                 goto err_drop_spawn;
 388 
 389         if (crypto_skcipher_alg_ivsize(alg))
 390                 goto err_drop_spawn;
 391 
 392         err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts",
 393                                   &alg->base);
 394         if (err)
 395                 goto err_drop_spawn;
 396 
 397         err = -EINVAL;
 398         cipher_name = alg->base.cra_name;
 399 
 400         /* Alas we screwed up the naming so we have to mangle the
 401          * cipher name.
 402          */
 403         if (!strncmp(cipher_name, "ecb(", 4)) {
 404                 unsigned len;
 405 
 406                 len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
 407                 if (len < 2 || len >= sizeof(ctx->name))
 408                         goto err_drop_spawn;
 409 
 410                 if (ctx->name[len - 1] != ')')
 411                         goto err_drop_spawn;
 412 
 413                 ctx->name[len - 1] = 0;
 414 
 415                 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
 416                              "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) {
 417                         err = -ENAMETOOLONG;
 418                         goto err_drop_spawn;
 419                 }
 420         } else
 421                 goto err_drop_spawn;
 422 
 423         inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
 424         inst->alg.base.cra_priority = alg->base.cra_priority;
 425         inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE;
 426         inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
 427                                        (__alignof__(u64) - 1);
 428 
 429         inst->alg.ivsize = XTS_BLOCK_SIZE;
 430         inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2;
 431         inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2;
 432 
 433         inst->alg.base.cra_ctxsize = sizeof(struct priv);
 434 
 435         inst->alg.init = init_tfm;
 436         inst->alg.exit = exit_tfm;
 437 
 438         inst->alg.setkey = setkey;
 439         inst->alg.encrypt = encrypt;
 440         inst->alg.decrypt = decrypt;
 441 
 442         inst->free = free_inst;
 443 
 444         err = skcipher_register_instance(tmpl, inst);
 445         if (err)
 446                 goto err_drop_spawn;
 447 
 448 out:
 449         return err;
 450 
 451 err_drop_spawn:
 452         crypto_drop_skcipher(&ctx->spawn);
 453 err_free_inst:
 454         kfree(inst);
 455         goto out;
 456 }
 457 
 458 static struct crypto_template crypto_tmpl = {
 459         .name = "xts",
 460         .create = create,
 461         .module = THIS_MODULE,
 462 };
 463 
 464 static int __init crypto_module_init(void)
 465 {
 466         return crypto_register_template(&crypto_tmpl);
 467 }
 468 
 469 static void __exit crypto_module_exit(void)
 470 {
 471         crypto_unregister_template(&crypto_tmpl);
 472 }
 473 
 474 subsys_initcall(crypto_module_init);
 475 module_exit(crypto_module_exit);
 476 
 477 MODULE_LICENSE("GPL");
 478 MODULE_DESCRIPTION("XTS block cipher mode");
 479 MODULE_ALIAS_CRYPTO("xts");

/* [<][>][^][v][top][bottom][index][help] */