root/drivers/crypto/cavium/cpt/cptvf_algs.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. cvm_callback
  2. update_input_iv
  3. update_output_iv
  4. update_input_data
  5. update_output_data
  6. create_ctx_hdr
  7. create_input_list
  8. store_cb_info
  9. create_output_list
  10. cvm_enc_dec
  11. cvm_encrypt
  12. cvm_decrypt
  13. cvm_xts_setkey
  14. cvm_validate_keylen
  15. cvm_setkey
  16. cvm_cbc_aes_setkey
  17. cvm_ecb_aes_setkey
  18. cvm_cfb_aes_setkey
  19. cvm_cbc_des3_setkey
  20. cvm_ecb_des3_setkey
  21. cvm_enc_dec_init
  22. cav_register_algs
  23. cav_unregister_algs
  24. cvm_crypto_init
  25. cvm_crypto_exit

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 
   3 /*
   4  * Copyright (C) 2016 Cavium, Inc.
   5  */
   6 
   7 #include <crypto/aes.h>
   8 #include <crypto/algapi.h>
   9 #include <crypto/authenc.h>
  10 #include <crypto/internal/des.h>
  11 #include <crypto/xts.h>
  12 #include <linux/crypto.h>
  13 #include <linux/err.h>
  14 #include <linux/list.h>
  15 #include <linux/scatterlist.h>
  16 
  17 #include "cptvf.h"
  18 #include "cptvf_algs.h"
  19 
  20 struct cpt_device_handle {
  21         void *cdev[MAX_DEVICES];
  22         u32 dev_count;
  23 };
  24 
  25 static struct cpt_device_handle dev_handle;
  26 
  27 static void cvm_callback(u32 status, void *arg)
  28 {
  29         struct crypto_async_request *req = (struct crypto_async_request *)arg;
  30 
  31         req->complete(req, !status);
  32 }
  33 
  34 static inline void update_input_iv(struct cpt_request_info *req_info,
  35                                    u8 *iv, u32 enc_iv_len,
  36                                    u32 *argcnt)
  37 {
  38         /* Setting the iv information */
  39         req_info->in[*argcnt].vptr = (void *)iv;
  40         req_info->in[*argcnt].size = enc_iv_len;
  41         req_info->req.dlen += enc_iv_len;
  42 
  43         ++(*argcnt);
  44 }
  45 
  46 static inline void update_output_iv(struct cpt_request_info *req_info,
  47                                     u8 *iv, u32 enc_iv_len,
  48                                     u32 *argcnt)
  49 {
  50         /* Setting the iv information */
  51         req_info->out[*argcnt].vptr = (void *)iv;
  52         req_info->out[*argcnt].size = enc_iv_len;
  53         req_info->rlen += enc_iv_len;
  54 
  55         ++(*argcnt);
  56 }
  57 
  58 static inline void update_input_data(struct cpt_request_info *req_info,
  59                                      struct scatterlist *inp_sg,
  60                                      u32 nbytes, u32 *argcnt)
  61 {
  62         req_info->req.dlen += nbytes;
  63 
  64         while (nbytes) {
  65                 u32 len = min(nbytes, inp_sg->length);
  66                 u8 *ptr = sg_virt(inp_sg);
  67 
  68                 req_info->in[*argcnt].vptr = (void *)ptr;
  69                 req_info->in[*argcnt].size = len;
  70                 nbytes -= len;
  71 
  72                 ++(*argcnt);
  73                 ++inp_sg;
  74         }
  75 }
  76 
  77 static inline void update_output_data(struct cpt_request_info *req_info,
  78                                       struct scatterlist *outp_sg,
  79                                       u32 nbytes, u32 *argcnt)
  80 {
  81         req_info->rlen += nbytes;
  82 
  83         while (nbytes) {
  84                 u32 len = min(nbytes, outp_sg->length);
  85                 u8 *ptr = sg_virt(outp_sg);
  86 
  87                 req_info->out[*argcnt].vptr = (void *)ptr;
  88                 req_info->out[*argcnt].size = len;
  89                 nbytes -= len;
  90                 ++(*argcnt);
  91                 ++outp_sg;
  92         }
  93 }
  94 
  95 static inline u32 create_ctx_hdr(struct ablkcipher_request *req, u32 enc,
  96                                  u32 *argcnt)
  97 {
  98         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  99         struct cvm_enc_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 100         struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
 101         struct fc_context *fctx = &rctx->fctx;
 102         u64 *offset_control = &rctx->control_word;
 103         u32 enc_iv_len = crypto_ablkcipher_ivsize(tfm);
 104         struct cpt_request_info *req_info = &rctx->cpt_req;
 105         u64 *ctrl_flags = NULL;
 106 
 107         req_info->ctrl.s.grp = 0;
 108         req_info->ctrl.s.dma_mode = DMA_GATHER_SCATTER;
 109         req_info->ctrl.s.se_req = SE_CORE_REQ;
 110 
 111         req_info->req.opcode.s.major = MAJOR_OP_FC |
 112                                         DMA_MODE_FLAG(DMA_GATHER_SCATTER);
 113         if (enc)
 114                 req_info->req.opcode.s.minor = 2;
 115         else
 116                 req_info->req.opcode.s.minor = 3;
 117 
 118         req_info->req.param1 = req->nbytes; /* Encryption Data length */
 119         req_info->req.param2 = 0; /*Auth data length */
 120 
 121         fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
 122         fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
 123         fctx->enc.enc_ctrl.e.iv_source = FROM_DPTR;
 124 
 125         if (ctx->cipher_type == AES_XTS)
 126                 memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
 127         else
 128                 memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
 129         ctrl_flags = (u64 *)&fctx->enc.enc_ctrl.flags;
 130         *ctrl_flags = cpu_to_be64(*ctrl_flags);
 131 
 132         *offset_control = cpu_to_be64(((u64)(enc_iv_len) << 16));
 133         /* Storing  Packet Data Information in offset
 134          * Control Word First 8 bytes
 135          */
 136         req_info->in[*argcnt].vptr = (u8 *)offset_control;
 137         req_info->in[*argcnt].size = CONTROL_WORD_LEN;
 138         req_info->req.dlen += CONTROL_WORD_LEN;
 139         ++(*argcnt);
 140 
 141         req_info->in[*argcnt].vptr = (u8 *)fctx;
 142         req_info->in[*argcnt].size = sizeof(struct fc_context);
 143         req_info->req.dlen += sizeof(struct fc_context);
 144 
 145         ++(*argcnt);
 146 
 147         return 0;
 148 }
 149 
 150 static inline u32 create_input_list(struct ablkcipher_request  *req, u32 enc,
 151                                     u32 enc_iv_len)
 152 {
 153         struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
 154         struct cpt_request_info *req_info = &rctx->cpt_req;
 155         u32 argcnt =  0;
 156 
 157         create_ctx_hdr(req, enc, &argcnt);
 158         update_input_iv(req_info, req->info, enc_iv_len, &argcnt);
 159         update_input_data(req_info, req->src, req->nbytes, &argcnt);
 160         req_info->incnt = argcnt;
 161 
 162         return 0;
 163 }
 164 
 165 static inline void store_cb_info(struct ablkcipher_request *req,
 166                                  struct cpt_request_info *req_info)
 167 {
 168         req_info->callback = (void *)cvm_callback;
 169         req_info->callback_arg = (void *)&req->base;
 170 }
 171 
 172 static inline void create_output_list(struct ablkcipher_request *req,
 173                                       u32 enc_iv_len)
 174 {
 175         struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
 176         struct cpt_request_info *req_info = &rctx->cpt_req;
 177         u32 argcnt = 0;
 178 
 179         /* OUTPUT Buffer Processing
 180          * AES encryption/decryption output would be
 181          * received in the following format
 182          *
 183          * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
 184          * [ 16 Bytes/     [   Request Enc/Dec/ DATA Len AES CBC ]
 185          */
 186         /* Reading IV information */
 187         update_output_iv(req_info, req->info, enc_iv_len, &argcnt);
 188         update_output_data(req_info, req->dst, req->nbytes, &argcnt);
 189         req_info->outcnt = argcnt;
 190 }
 191 
 192 static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc)
 193 {
 194         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 195         struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
 196         u32 enc_iv_len = crypto_ablkcipher_ivsize(tfm);
 197         struct fc_context *fctx = &rctx->fctx;
 198         struct cpt_request_info *req_info = &rctx->cpt_req;
 199         void *cdev = NULL;
 200         int status;
 201 
 202         memset(req_info, 0, sizeof(struct cpt_request_info));
 203         memset(fctx, 0, sizeof(struct fc_context));
 204         create_input_list(req, enc, enc_iv_len);
 205         create_output_list(req, enc_iv_len);
 206         store_cb_info(req, req_info);
 207         cdev = dev_handle.cdev[smp_processor_id()];
 208         status = cptvf_do_request(cdev, req_info);
 209         /* We perform an asynchronous send and once
 210          * the request is completed the driver would
 211          * intimate through  registered call back functions
 212          */
 213 
 214         if (status)
 215                 return status;
 216         else
 217                 return -EINPROGRESS;
 218 }
 219 
 220 static int cvm_encrypt(struct ablkcipher_request *req)
 221 {
 222         return cvm_enc_dec(req, true);
 223 }
 224 
 225 static int cvm_decrypt(struct ablkcipher_request *req)
 226 {
 227         return cvm_enc_dec(req, false);
 228 }
 229 
 230 static int cvm_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
 231                    u32 keylen)
 232 {
 233         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
 234         struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
 235         int err;
 236         const u8 *key1 = key;
 237         const u8 *key2 = key + (keylen / 2);
 238 
 239         err = xts_check_key(tfm, key, keylen);
 240         if (err)
 241                 return err;
 242         ctx->key_len = keylen;
 243         memcpy(ctx->enc_key, key1, keylen / 2);
 244         memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
 245         ctx->cipher_type = AES_XTS;
 246         switch (ctx->key_len) {
 247         case 32:
 248                 ctx->key_type = AES_128_BIT;
 249                 break;
 250         case 64:
 251                 ctx->key_type = AES_256_BIT;
 252                 break;
 253         default:
 254                 return -EINVAL;
 255         }
 256 
 257         return 0;
 258 }
 259 
 260 static int cvm_validate_keylen(struct cvm_enc_ctx *ctx, u32 keylen)
 261 {
 262         if ((keylen == 16) || (keylen == 24) || (keylen == 32)) {
 263                 ctx->key_len = keylen;
 264                 switch (ctx->key_len) {
 265                 case 16:
 266                         ctx->key_type = AES_128_BIT;
 267                         break;
 268                 case 24:
 269                         ctx->key_type = AES_192_BIT;
 270                         break;
 271                 case 32:
 272                         ctx->key_type = AES_256_BIT;
 273                         break;
 274                 default:
 275                         return -EINVAL;
 276                 }
 277 
 278                 if (ctx->cipher_type == DES3_CBC)
 279                         ctx->key_type = 0;
 280 
 281                 return 0;
 282         }
 283 
 284         return -EINVAL;
 285 }
 286 
 287 static int cvm_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
 288                       u32 keylen, u8 cipher_type)
 289 {
 290         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
 291         struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
 292 
 293         ctx->cipher_type = cipher_type;
 294         if (!cvm_validate_keylen(ctx, keylen)) {
 295                 memcpy(ctx->enc_key, key, keylen);
 296                 return 0;
 297         } else {
 298                 crypto_ablkcipher_set_flags(cipher,
 299                                             CRYPTO_TFM_RES_BAD_KEY_LEN);
 300                 return -EINVAL;
 301         }
 302 }
 303 
 304 static int cvm_cbc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
 305                               u32 keylen)
 306 {
 307         return cvm_setkey(cipher, key, keylen, AES_CBC);
 308 }
 309 
 310 static int cvm_ecb_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
 311                               u32 keylen)
 312 {
 313         return cvm_setkey(cipher, key, keylen, AES_ECB);
 314 }
 315 
 316 static int cvm_cfb_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
 317                               u32 keylen)
 318 {
 319         return cvm_setkey(cipher, key, keylen, AES_CFB);
 320 }
 321 
 322 static int cvm_cbc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
 323                                u32 keylen)
 324 {
 325         return verify_ablkcipher_des3_key(cipher, key) ?:
 326                cvm_setkey(cipher, key, keylen, DES3_CBC);
 327 }
 328 
 329 static int cvm_ecb_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
 330                                u32 keylen)
 331 {
 332         return verify_ablkcipher_des3_key(cipher, key) ?:
 333                cvm_setkey(cipher, key, keylen, DES3_ECB);
 334 }
 335 
 336 static int cvm_enc_dec_init(struct crypto_tfm *tfm)
 337 {
 338         tfm->crt_ablkcipher.reqsize = sizeof(struct cvm_req_ctx);
 339         return 0;
 340 }
 341 
 342 static struct crypto_alg algs[] = { {
 343         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 344         .cra_blocksize = AES_BLOCK_SIZE,
 345         .cra_ctxsize = sizeof(struct cvm_enc_ctx),
 346         .cra_alignmask = 7,
 347         .cra_priority = 4001,
 348         .cra_name = "xts(aes)",
 349         .cra_driver_name = "cavium-xts-aes",
 350         .cra_type = &crypto_ablkcipher_type,
 351         .cra_u = {
 352                 .ablkcipher = {
 353                         .ivsize = AES_BLOCK_SIZE,
 354                         .min_keysize = 2 * AES_MIN_KEY_SIZE,
 355                         .max_keysize = 2 * AES_MAX_KEY_SIZE,
 356                         .setkey = cvm_xts_setkey,
 357                         .encrypt = cvm_encrypt,
 358                         .decrypt = cvm_decrypt,
 359                 },
 360         },
 361         .cra_init = cvm_enc_dec_init,
 362         .cra_module = THIS_MODULE,
 363 }, {
 364         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 365         .cra_blocksize = AES_BLOCK_SIZE,
 366         .cra_ctxsize = sizeof(struct cvm_enc_ctx),
 367         .cra_alignmask = 7,
 368         .cra_priority = 4001,
 369         .cra_name = "cbc(aes)",
 370         .cra_driver_name = "cavium-cbc-aes",
 371         .cra_type = &crypto_ablkcipher_type,
 372         .cra_u = {
 373                 .ablkcipher = {
 374                         .ivsize = AES_BLOCK_SIZE,
 375                         .min_keysize = AES_MIN_KEY_SIZE,
 376                         .max_keysize = AES_MAX_KEY_SIZE,
 377                         .setkey = cvm_cbc_aes_setkey,
 378                         .encrypt = cvm_encrypt,
 379                         .decrypt = cvm_decrypt,
 380                 },
 381         },
 382         .cra_init = cvm_enc_dec_init,
 383         .cra_module = THIS_MODULE,
 384 }, {
 385         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 386         .cra_blocksize = AES_BLOCK_SIZE,
 387         .cra_ctxsize = sizeof(struct cvm_enc_ctx),
 388         .cra_alignmask = 7,
 389         .cra_priority = 4001,
 390         .cra_name = "ecb(aes)",
 391         .cra_driver_name = "cavium-ecb-aes",
 392         .cra_type = &crypto_ablkcipher_type,
 393         .cra_u = {
 394                 .ablkcipher = {
 395                         .ivsize = AES_BLOCK_SIZE,
 396                         .min_keysize = AES_MIN_KEY_SIZE,
 397                         .max_keysize = AES_MAX_KEY_SIZE,
 398                         .setkey = cvm_ecb_aes_setkey,
 399                         .encrypt = cvm_encrypt,
 400                         .decrypt = cvm_decrypt,
 401                 },
 402         },
 403         .cra_init = cvm_enc_dec_init,
 404         .cra_module = THIS_MODULE,
 405 }, {
 406         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 407         .cra_blocksize = AES_BLOCK_SIZE,
 408         .cra_ctxsize = sizeof(struct cvm_enc_ctx),
 409         .cra_alignmask = 7,
 410         .cra_priority = 4001,
 411         .cra_name = "cfb(aes)",
 412         .cra_driver_name = "cavium-cfb-aes",
 413         .cra_type = &crypto_ablkcipher_type,
 414         .cra_u = {
 415                 .ablkcipher = {
 416                         .ivsize = AES_BLOCK_SIZE,
 417                         .min_keysize = AES_MIN_KEY_SIZE,
 418                         .max_keysize = AES_MAX_KEY_SIZE,
 419                         .setkey = cvm_cfb_aes_setkey,
 420                         .encrypt = cvm_encrypt,
 421                         .decrypt = cvm_decrypt,
 422                 },
 423         },
 424         .cra_init = cvm_enc_dec_init,
 425         .cra_module = THIS_MODULE,
 426 }, {
 427         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 428         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
 429         .cra_ctxsize = sizeof(struct cvm_des3_ctx),
 430         .cra_alignmask = 7,
 431         .cra_priority = 4001,
 432         .cra_name = "cbc(des3_ede)",
 433         .cra_driver_name = "cavium-cbc-des3_ede",
 434         .cra_type = &crypto_ablkcipher_type,
 435         .cra_u = {
 436                 .ablkcipher = {
 437                         .min_keysize = DES3_EDE_KEY_SIZE,
 438                         .max_keysize = DES3_EDE_KEY_SIZE,
 439                         .ivsize = DES_BLOCK_SIZE,
 440                         .setkey = cvm_cbc_des3_setkey,
 441                         .encrypt = cvm_encrypt,
 442                         .decrypt = cvm_decrypt,
 443                 },
 444         },
 445         .cra_init = cvm_enc_dec_init,
 446         .cra_module = THIS_MODULE,
 447 }, {
 448         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 449         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
 450         .cra_ctxsize = sizeof(struct cvm_des3_ctx),
 451         .cra_alignmask = 7,
 452         .cra_priority = 4001,
 453         .cra_name = "ecb(des3_ede)",
 454         .cra_driver_name = "cavium-ecb-des3_ede",
 455         .cra_type = &crypto_ablkcipher_type,
 456         .cra_u = {
 457                 .ablkcipher = {
 458                         .min_keysize = DES3_EDE_KEY_SIZE,
 459                         .max_keysize = DES3_EDE_KEY_SIZE,
 460                         .ivsize = DES_BLOCK_SIZE,
 461                         .setkey = cvm_ecb_des3_setkey,
 462                         .encrypt = cvm_encrypt,
 463                         .decrypt = cvm_decrypt,
 464                 },
 465         },
 466         .cra_init = cvm_enc_dec_init,
 467         .cra_module = THIS_MODULE,
 468 } };
 469 
 470 static inline int cav_register_algs(void)
 471 {
 472         int err = 0;
 473 
 474         err = crypto_register_algs(algs, ARRAY_SIZE(algs));
 475         if (err)
 476                 return err;
 477 
 478         return 0;
 479 }
 480 
 481 static inline void cav_unregister_algs(void)
 482 {
 483         crypto_unregister_algs(algs, ARRAY_SIZE(algs));
 484 }
 485 
 486 int cvm_crypto_init(struct cpt_vf *cptvf)
 487 {
 488         struct pci_dev *pdev = cptvf->pdev;
 489         u32 dev_count;
 490 
 491         dev_count = dev_handle.dev_count;
 492         dev_handle.cdev[dev_count] = cptvf;
 493         dev_handle.dev_count++;
 494 
 495         if (dev_count == 3) {
 496                 if (cav_register_algs()) {
 497                         dev_err(&pdev->dev, "Error in registering crypto algorithms\n");
 498                         return -EINVAL;
 499                 }
 500         }
 501 
 502         return 0;
 503 }
 504 
 505 void cvm_crypto_exit(void)
 506 {
 507         u32 dev_count;
 508 
 509         dev_count = --dev_handle.dev_count;
 510         if (!dev_count)
 511                 cav_unregister_algs();
 512 }

/* [<][>][^][v][top][bottom][index][help] */