root/drivers/crypto/marvell/cesa.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mv_cesa_dequeue_req_locked
  2. mv_cesa_rearm_engine
  3. mv_cesa_std_process
  4. mv_cesa_int_process
  5. mv_cesa_complete_req
  6. mv_cesa_int
  7. mv_cesa_queue_req
  8. mv_cesa_add_algs
  9. mv_cesa_remove_algs
  10. mv_cesa_conf_mbus_windows
  11. mv_cesa_dev_dma_init
  12. mv_cesa_get_sram
  13. mv_cesa_put_sram
  14. mv_cesa_probe
  15. mv_cesa_remove

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Support for Marvell's Cryptographic Engine and Security Accelerator (CESA)
   4  * that can be found on the following platform: Orion, Kirkwood, Armada. This
   5  * driver supports the TDMA engine on platforms on which it is available.
   6  *
   7  * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
   8  * Author: Arnaud Ebalard <arno@natisbad.org>
   9  *
  10  * This work is based on an initial version written by
  11  * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
  12  */
  13 
  14 #include <linux/delay.h>
  15 #include <linux/dma-mapping.h>
  16 #include <linux/genalloc.h>
  17 #include <linux/interrupt.h>
  18 #include <linux/io.h>
  19 #include <linux/kthread.h>
  20 #include <linux/mbus.h>
  21 #include <linux/platform_device.h>
  22 #include <linux/scatterlist.h>
  23 #include <linux/slab.h>
  24 #include <linux/module.h>
  25 #include <linux/clk.h>
  26 #include <linux/of.h>
  27 #include <linux/of_platform.h>
  28 #include <linux/of_irq.h>
  29 
  30 #include "cesa.h"
  31 
  32 /* Limit of the crypto queue before reaching the backlog */
  33 #define CESA_CRYPTO_DEFAULT_MAX_QLEN 128
  34 
  35 struct mv_cesa_dev *cesa_dev;
  36 
  37 struct crypto_async_request *
  38 mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine,
  39                            struct crypto_async_request **backlog)
  40 {
  41         struct crypto_async_request *req;
  42 
  43         *backlog = crypto_get_backlog(&engine->queue);
  44         req = crypto_dequeue_request(&engine->queue);
  45 
  46         if (!req)
  47                 return NULL;
  48 
  49         return req;
  50 }
  51 
  52 static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine)
  53 {
  54         struct crypto_async_request *req = NULL, *backlog = NULL;
  55         struct mv_cesa_ctx *ctx;
  56 
  57 
  58         spin_lock_bh(&engine->lock);
  59         if (!engine->req) {
  60                 req = mv_cesa_dequeue_req_locked(engine, &backlog);
  61                 engine->req = req;
  62         }
  63         spin_unlock_bh(&engine->lock);
  64 
  65         if (!req)
  66                 return;
  67 
  68         if (backlog)
  69                 backlog->complete(backlog, -EINPROGRESS);
  70 
  71         ctx = crypto_tfm_ctx(req->tfm);
  72         ctx->ops->step(req);
  73 }
  74 
  75 static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status)
  76 {
  77         struct crypto_async_request *req;
  78         struct mv_cesa_ctx *ctx;
  79         int res;
  80 
  81         req = engine->req;
  82         ctx = crypto_tfm_ctx(req->tfm);
  83         res = ctx->ops->process(req, status);
  84 
  85         if (res == 0) {
  86                 ctx->ops->complete(req);
  87                 mv_cesa_engine_enqueue_complete_request(engine, req);
  88         } else if (res == -EINPROGRESS) {
  89                 ctx->ops->step(req);
  90         }
  91 
  92         return res;
  93 }
  94 
  95 static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status)
  96 {
  97         if (engine->chain.first && engine->chain.last)
  98                 return mv_cesa_tdma_process(engine, status);
  99 
 100         return mv_cesa_std_process(engine, status);
 101 }
 102 
 103 static inline void
 104 mv_cesa_complete_req(struct mv_cesa_ctx *ctx, struct crypto_async_request *req,
 105                      int res)
 106 {
 107         ctx->ops->cleanup(req);
 108         local_bh_disable();
 109         req->complete(req, res);
 110         local_bh_enable();
 111 }
 112 
 113 static irqreturn_t mv_cesa_int(int irq, void *priv)
 114 {
 115         struct mv_cesa_engine *engine = priv;
 116         struct crypto_async_request *req;
 117         struct mv_cesa_ctx *ctx;
 118         u32 status, mask;
 119         irqreturn_t ret = IRQ_NONE;
 120 
 121         while (true) {
 122                 int res;
 123 
 124                 mask = mv_cesa_get_int_mask(engine);
 125                 status = readl(engine->regs + CESA_SA_INT_STATUS);
 126 
 127                 if (!(status & mask))
 128                         break;
 129 
 130                 /*
 131                  * TODO: avoid clearing the FPGA_INT_STATUS if this not
 132                  * relevant on some platforms.
 133                  */
 134                 writel(~status, engine->regs + CESA_SA_FPGA_INT_STATUS);
 135                 writel(~status, engine->regs + CESA_SA_INT_STATUS);
 136 
 137                 /* Process fetched requests */
 138                 res = mv_cesa_int_process(engine, status & mask);
 139                 ret = IRQ_HANDLED;
 140 
 141                 spin_lock_bh(&engine->lock);
 142                 req = engine->req;
 143                 if (res != -EINPROGRESS)
 144                         engine->req = NULL;
 145                 spin_unlock_bh(&engine->lock);
 146 
 147                 ctx = crypto_tfm_ctx(req->tfm);
 148 
 149                 if (res && res != -EINPROGRESS)
 150                         mv_cesa_complete_req(ctx, req, res);
 151 
 152                 /* Launch the next pending request */
 153                 mv_cesa_rearm_engine(engine);
 154 
 155                 /* Iterate over the complete queue */
 156                 while (true) {
 157                         req = mv_cesa_engine_dequeue_complete_request(engine);
 158                         if (!req)
 159                                 break;
 160 
 161                         ctx = crypto_tfm_ctx(req->tfm);
 162                         mv_cesa_complete_req(ctx, req, 0);
 163                 }
 164         }
 165 
 166         return ret;
 167 }
 168 
 169 int mv_cesa_queue_req(struct crypto_async_request *req,
 170                       struct mv_cesa_req *creq)
 171 {
 172         int ret;
 173         struct mv_cesa_engine *engine = creq->engine;
 174 
 175         spin_lock_bh(&engine->lock);
 176         ret = crypto_enqueue_request(&engine->queue, req);
 177         if ((mv_cesa_req_get_type(creq) == CESA_DMA_REQ) &&
 178             (ret == -EINPROGRESS || ret == -EBUSY))
 179                 mv_cesa_tdma_chain(engine, creq);
 180         spin_unlock_bh(&engine->lock);
 181 
 182         if (ret != -EINPROGRESS)
 183                 return ret;
 184 
 185         mv_cesa_rearm_engine(engine);
 186 
 187         return -EINPROGRESS;
 188 }
 189 
 190 static int mv_cesa_add_algs(struct mv_cesa_dev *cesa)
 191 {
 192         int ret;
 193         int i, j;
 194 
 195         for (i = 0; i < cesa->caps->ncipher_algs; i++) {
 196                 ret = crypto_register_skcipher(cesa->caps->cipher_algs[i]);
 197                 if (ret)
 198                         goto err_unregister_crypto;
 199         }
 200 
 201         for (i = 0; i < cesa->caps->nahash_algs; i++) {
 202                 ret = crypto_register_ahash(cesa->caps->ahash_algs[i]);
 203                 if (ret)
 204                         goto err_unregister_ahash;
 205         }
 206 
 207         return 0;
 208 
 209 err_unregister_ahash:
 210         for (j = 0; j < i; j++)
 211                 crypto_unregister_ahash(cesa->caps->ahash_algs[j]);
 212         i = cesa->caps->ncipher_algs;
 213 
 214 err_unregister_crypto:
 215         for (j = 0; j < i; j++)
 216                 crypto_unregister_skcipher(cesa->caps->cipher_algs[j]);
 217 
 218         return ret;
 219 }
 220 
 221 static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa)
 222 {
 223         int i;
 224 
 225         for (i = 0; i < cesa->caps->nahash_algs; i++)
 226                 crypto_unregister_ahash(cesa->caps->ahash_algs[i]);
 227 
 228         for (i = 0; i < cesa->caps->ncipher_algs; i++)
 229                 crypto_unregister_skcipher(cesa->caps->cipher_algs[i]);
 230 }
 231 
 232 static struct skcipher_alg *orion_cipher_algs[] = {
 233         &mv_cesa_ecb_des_alg,
 234         &mv_cesa_cbc_des_alg,
 235         &mv_cesa_ecb_des3_ede_alg,
 236         &mv_cesa_cbc_des3_ede_alg,
 237         &mv_cesa_ecb_aes_alg,
 238         &mv_cesa_cbc_aes_alg,
 239 };
 240 
 241 static struct ahash_alg *orion_ahash_algs[] = {
 242         &mv_md5_alg,
 243         &mv_sha1_alg,
 244         &mv_ahmac_md5_alg,
 245         &mv_ahmac_sha1_alg,
 246 };
 247 
 248 static struct skcipher_alg *armada_370_cipher_algs[] = {
 249         &mv_cesa_ecb_des_alg,
 250         &mv_cesa_cbc_des_alg,
 251         &mv_cesa_ecb_des3_ede_alg,
 252         &mv_cesa_cbc_des3_ede_alg,
 253         &mv_cesa_ecb_aes_alg,
 254         &mv_cesa_cbc_aes_alg,
 255 };
 256 
 257 static struct ahash_alg *armada_370_ahash_algs[] = {
 258         &mv_md5_alg,
 259         &mv_sha1_alg,
 260         &mv_sha256_alg,
 261         &mv_ahmac_md5_alg,
 262         &mv_ahmac_sha1_alg,
 263         &mv_ahmac_sha256_alg,
 264 };
 265 
 266 static const struct mv_cesa_caps orion_caps = {
 267         .nengines = 1,
 268         .cipher_algs = orion_cipher_algs,
 269         .ncipher_algs = ARRAY_SIZE(orion_cipher_algs),
 270         .ahash_algs = orion_ahash_algs,
 271         .nahash_algs = ARRAY_SIZE(orion_ahash_algs),
 272         .has_tdma = false,
 273 };
 274 
 275 static const struct mv_cesa_caps kirkwood_caps = {
 276         .nengines = 1,
 277         .cipher_algs = orion_cipher_algs,
 278         .ncipher_algs = ARRAY_SIZE(orion_cipher_algs),
 279         .ahash_algs = orion_ahash_algs,
 280         .nahash_algs = ARRAY_SIZE(orion_ahash_algs),
 281         .has_tdma = true,
 282 };
 283 
 284 static const struct mv_cesa_caps armada_370_caps = {
 285         .nengines = 1,
 286         .cipher_algs = armada_370_cipher_algs,
 287         .ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs),
 288         .ahash_algs = armada_370_ahash_algs,
 289         .nahash_algs = ARRAY_SIZE(armada_370_ahash_algs),
 290         .has_tdma = true,
 291 };
 292 
 293 static const struct mv_cesa_caps armada_xp_caps = {
 294         .nengines = 2,
 295         .cipher_algs = armada_370_cipher_algs,
 296         .ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs),
 297         .ahash_algs = armada_370_ahash_algs,
 298         .nahash_algs = ARRAY_SIZE(armada_370_ahash_algs),
 299         .has_tdma = true,
 300 };
 301 
 302 static const struct of_device_id mv_cesa_of_match_table[] = {
 303         { .compatible = "marvell,orion-crypto", .data = &orion_caps },
 304         { .compatible = "marvell,kirkwood-crypto", .data = &kirkwood_caps },
 305         { .compatible = "marvell,dove-crypto", .data = &kirkwood_caps },
 306         { .compatible = "marvell,armada-370-crypto", .data = &armada_370_caps },
 307         { .compatible = "marvell,armada-xp-crypto", .data = &armada_xp_caps },
 308         { .compatible = "marvell,armada-375-crypto", .data = &armada_xp_caps },
 309         { .compatible = "marvell,armada-38x-crypto", .data = &armada_xp_caps },
 310         {}
 311 };
 312 MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table);
 313 
 314 static void
 315 mv_cesa_conf_mbus_windows(struct mv_cesa_engine *engine,
 316                           const struct mbus_dram_target_info *dram)
 317 {
 318         void __iomem *iobase = engine->regs;
 319         int i;
 320 
 321         for (i = 0; i < 4; i++) {
 322                 writel(0, iobase + CESA_TDMA_WINDOW_CTRL(i));
 323                 writel(0, iobase + CESA_TDMA_WINDOW_BASE(i));
 324         }
 325 
 326         for (i = 0; i < dram->num_cs; i++) {
 327                 const struct mbus_dram_window *cs = dram->cs + i;
 328 
 329                 writel(((cs->size - 1) & 0xffff0000) |
 330                        (cs->mbus_attr << 8) |
 331                        (dram->mbus_dram_target_id << 4) | 1,
 332                        iobase + CESA_TDMA_WINDOW_CTRL(i));
 333                 writel(cs->base, iobase + CESA_TDMA_WINDOW_BASE(i));
 334         }
 335 }
 336 
 337 static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
 338 {
 339         struct device *dev = cesa->dev;
 340         struct mv_cesa_dev_dma *dma;
 341 
 342         if (!cesa->caps->has_tdma)
 343                 return 0;
 344 
 345         dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
 346         if (!dma)
 347                 return -ENOMEM;
 348 
 349         dma->tdma_desc_pool = dmam_pool_create("tdma_desc", dev,
 350                                         sizeof(struct mv_cesa_tdma_desc),
 351                                         16, 0);
 352         if (!dma->tdma_desc_pool)
 353                 return -ENOMEM;
 354 
 355         dma->op_pool = dmam_pool_create("cesa_op", dev,
 356                                         sizeof(struct mv_cesa_op_ctx), 16, 0);
 357         if (!dma->op_pool)
 358                 return -ENOMEM;
 359 
 360         dma->cache_pool = dmam_pool_create("cesa_cache", dev,
 361                                            CESA_MAX_HASH_BLOCK_SIZE, 1, 0);
 362         if (!dma->cache_pool)
 363                 return -ENOMEM;
 364 
 365         dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0);
 366         if (!dma->padding_pool)
 367                 return -ENOMEM;
 368 
 369         cesa->dma = dma;
 370 
 371         return 0;
 372 }
 373 
 374 static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
 375 {
 376         struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
 377         struct mv_cesa_engine *engine = &cesa->engines[idx];
 378         const char *res_name = "sram";
 379         struct resource *res;
 380 
 381         engine->pool = of_gen_pool_get(cesa->dev->of_node,
 382                                        "marvell,crypto-srams", idx);
 383         if (engine->pool) {
 384                 engine->sram = gen_pool_dma_alloc(engine->pool,
 385                                                   cesa->sram_size,
 386                                                   &engine->sram_dma);
 387                 if (engine->sram)
 388                         return 0;
 389 
 390                 engine->pool = NULL;
 391                 return -ENOMEM;
 392         }
 393 
 394         if (cesa->caps->nengines > 1) {
 395                 if (!idx)
 396                         res_name = "sram0";
 397                 else
 398                         res_name = "sram1";
 399         }
 400 
 401         res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 402                                            res_name);
 403         if (!res || resource_size(res) < cesa->sram_size)
 404                 return -EINVAL;
 405 
 406         engine->sram = devm_ioremap_resource(cesa->dev, res);
 407         if (IS_ERR(engine->sram))
 408                 return PTR_ERR(engine->sram);
 409 
 410         engine->sram_dma = dma_map_resource(cesa->dev, res->start,
 411                                             cesa->sram_size,
 412                                             DMA_BIDIRECTIONAL, 0);
 413         if (dma_mapping_error(cesa->dev, engine->sram_dma))
 414                 return -ENOMEM;
 415 
 416         return 0;
 417 }
 418 
 419 static void mv_cesa_put_sram(struct platform_device *pdev, int idx)
 420 {
 421         struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
 422         struct mv_cesa_engine *engine = &cesa->engines[idx];
 423 
 424         if (engine->pool)
 425                 gen_pool_free(engine->pool, (unsigned long)engine->sram,
 426                               cesa->sram_size);
 427         else
 428                 dma_unmap_resource(cesa->dev, engine->sram_dma,
 429                                    cesa->sram_size, DMA_BIDIRECTIONAL, 0);
 430 }
 431 
 432 static int mv_cesa_probe(struct platform_device *pdev)
 433 {
 434         const struct mv_cesa_caps *caps = &orion_caps;
 435         const struct mbus_dram_target_info *dram;
 436         const struct of_device_id *match;
 437         struct device *dev = &pdev->dev;
 438         struct mv_cesa_dev *cesa;
 439         struct mv_cesa_engine *engines;
 440         struct resource *res;
 441         int irq, ret, i;
 442         u32 sram_size;
 443 
 444         if (cesa_dev) {
 445                 dev_err(&pdev->dev, "Only one CESA device authorized\n");
 446                 return -EEXIST;
 447         }
 448 
 449         if (dev->of_node) {
 450                 match = of_match_node(mv_cesa_of_match_table, dev->of_node);
 451                 if (!match || !match->data)
 452                         return -ENOTSUPP;
 453 
 454                 caps = match->data;
 455         }
 456 
 457         cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL);
 458         if (!cesa)
 459                 return -ENOMEM;
 460 
 461         cesa->caps = caps;
 462         cesa->dev = dev;
 463 
 464         sram_size = CESA_SA_DEFAULT_SRAM_SIZE;
 465         of_property_read_u32(cesa->dev->of_node, "marvell,crypto-sram-size",
 466                              &sram_size);
 467         if (sram_size < CESA_SA_MIN_SRAM_SIZE)
 468                 sram_size = CESA_SA_MIN_SRAM_SIZE;
 469 
 470         cesa->sram_size = sram_size;
 471         cesa->engines = devm_kcalloc(dev, caps->nengines, sizeof(*engines),
 472                                      GFP_KERNEL);
 473         if (!cesa->engines)
 474                 return -ENOMEM;
 475 
 476         spin_lock_init(&cesa->lock);
 477 
 478         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
 479         cesa->regs = devm_ioremap_resource(dev, res);
 480         if (IS_ERR(cesa->regs))
 481                 return PTR_ERR(cesa->regs);
 482 
 483         ret = mv_cesa_dev_dma_init(cesa);
 484         if (ret)
 485                 return ret;
 486 
 487         dram = mv_mbus_dram_info_nooverlap();
 488 
 489         platform_set_drvdata(pdev, cesa);
 490 
 491         for (i = 0; i < caps->nengines; i++) {
 492                 struct mv_cesa_engine *engine = &cesa->engines[i];
 493                 char res_name[7];
 494 
 495                 engine->id = i;
 496                 spin_lock_init(&engine->lock);
 497 
 498                 ret = mv_cesa_get_sram(pdev, i);
 499                 if (ret)
 500                         goto err_cleanup;
 501 
 502                 irq = platform_get_irq(pdev, i);
 503                 if (irq < 0) {
 504                         ret = irq;
 505                         goto err_cleanup;
 506                 }
 507 
 508                 /*
 509                  * Not all platforms can gate the CESA clocks: do not complain
 510                  * if the clock does not exist.
 511                  */
 512                 snprintf(res_name, sizeof(res_name), "cesa%d", i);
 513                 engine->clk = devm_clk_get(dev, res_name);
 514                 if (IS_ERR(engine->clk)) {
 515                         engine->clk = devm_clk_get(dev, NULL);
 516                         if (IS_ERR(engine->clk))
 517                                 engine->clk = NULL;
 518                 }
 519 
 520                 snprintf(res_name, sizeof(res_name), "cesaz%d", i);
 521                 engine->zclk = devm_clk_get(dev, res_name);
 522                 if (IS_ERR(engine->zclk))
 523                         engine->zclk = NULL;
 524 
 525                 ret = clk_prepare_enable(engine->clk);
 526                 if (ret)
 527                         goto err_cleanup;
 528 
 529                 ret = clk_prepare_enable(engine->zclk);
 530                 if (ret)
 531                         goto err_cleanup;
 532 
 533                 engine->regs = cesa->regs + CESA_ENGINE_OFF(i);
 534 
 535                 if (dram && cesa->caps->has_tdma)
 536                         mv_cesa_conf_mbus_windows(engine, dram);
 537 
 538                 writel(0, engine->regs + CESA_SA_INT_STATUS);
 539                 writel(CESA_SA_CFG_STOP_DIG_ERR,
 540                        engine->regs + CESA_SA_CFG);
 541                 writel(engine->sram_dma & CESA_SA_SRAM_MSK,
 542                        engine->regs + CESA_SA_DESC_P0);
 543 
 544                 ret = devm_request_threaded_irq(dev, irq, NULL, mv_cesa_int,
 545                                                 IRQF_ONESHOT,
 546                                                 dev_name(&pdev->dev),
 547                                                 engine);
 548                 if (ret)
 549                         goto err_cleanup;
 550 
 551                 crypto_init_queue(&engine->queue, CESA_CRYPTO_DEFAULT_MAX_QLEN);
 552                 atomic_set(&engine->load, 0);
 553                 INIT_LIST_HEAD(&engine->complete_queue);
 554         }
 555 
 556         cesa_dev = cesa;
 557 
 558         ret = mv_cesa_add_algs(cesa);
 559         if (ret) {
 560                 cesa_dev = NULL;
 561                 goto err_cleanup;
 562         }
 563 
 564         dev_info(dev, "CESA device successfully registered\n");
 565 
 566         return 0;
 567 
 568 err_cleanup:
 569         for (i = 0; i < caps->nengines; i++) {
 570                 clk_disable_unprepare(cesa->engines[i].zclk);
 571                 clk_disable_unprepare(cesa->engines[i].clk);
 572                 mv_cesa_put_sram(pdev, i);
 573         }
 574 
 575         return ret;
 576 }
 577 
 578 static int mv_cesa_remove(struct platform_device *pdev)
 579 {
 580         struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
 581         int i;
 582 
 583         mv_cesa_remove_algs(cesa);
 584 
 585         for (i = 0; i < cesa->caps->nengines; i++) {
 586                 clk_disable_unprepare(cesa->engines[i].zclk);
 587                 clk_disable_unprepare(cesa->engines[i].clk);
 588                 mv_cesa_put_sram(pdev, i);
 589         }
 590 
 591         return 0;
 592 }
 593 
 594 static const struct platform_device_id mv_cesa_plat_id_table[] = {
 595         { .name = "mv_crypto" },
 596         { /* sentinel */ },
 597 };
 598 MODULE_DEVICE_TABLE(platform, mv_cesa_plat_id_table);
 599 
 600 static struct platform_driver marvell_cesa = {
 601         .probe          = mv_cesa_probe,
 602         .remove         = mv_cesa_remove,
 603         .id_table       = mv_cesa_plat_id_table,
 604         .driver         = {
 605                 .name   = "marvell-cesa",
 606                 .of_match_table = mv_cesa_of_match_table,
 607         },
 608 };
 609 module_platform_driver(marvell_cesa);
 610 
 611 MODULE_ALIAS("platform:mv_crypto");
 612 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
 613 MODULE_AUTHOR("Arnaud Ebalard <arno@natisbad.org>");
 614 MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
 615 MODULE_LICENSE("GPL v2");

/* [<][>][^][v][top][bottom][index][help] */