root/drivers/crypto/ccp/ccp-dev-v5.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ccp_lsb_alloc
  2. ccp_lsb_free
  3. low_address
  4. high_address
  5. ccp5_get_free_slots
  6. ccp5_do_cmd
  7. ccp5_perform_aes
  8. ccp5_perform_xts_aes
  9. ccp5_perform_sha
  10. ccp5_perform_des3
  11. ccp5_perform_rsa
  12. ccp5_perform_passthru
  13. ccp5_perform_ecc
  14. ccp_find_lsb_regions
  15. ccp_find_and_assign_lsb_to_q
  16. ccp_assign_lsbs
  17. ccp5_disable_queue_interrupts
  18. ccp5_enable_queue_interrupts
  19. ccp5_irq_bh
  20. ccp5_irq_handler
  21. ccp5_init
  22. ccp5_destroy
  23. ccp5_config
  24. ccp5other_config

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * AMD Cryptographic Coprocessor (CCP) driver
   4  *
   5  * Copyright (C) 2016,2019 Advanced Micro Devices, Inc.
   6  *
   7  * Author: Gary R Hook <gary.hook@amd.com>
   8  */
   9 
  10 #include <linux/kernel.h>
  11 #include <linux/kthread.h>
  12 #include <linux/dma-mapping.h>
  13 #include <linux/interrupt.h>
  14 #include <linux/compiler.h>
  15 #include <linux/ccp.h>
  16 
  17 #include "ccp-dev.h"
  18 
  19 /* Allocate the requested number of contiguous LSB slots
  20  * from the LSB bitmap. Look in the private range for this
  21  * queue first; failing that, check the public area.
  22  * If no space is available, wait around.
  23  * Return: first slot number
  24  */
  25 static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count)
  26 {
  27         struct ccp_device *ccp;
  28         int start;
  29 
  30         /* First look at the map for the queue */
  31         if (cmd_q->lsb >= 0) {
  32                 start = (u32)bitmap_find_next_zero_area(cmd_q->lsbmap,
  33                                                         LSB_SIZE,
  34                                                         0, count, 0);
  35                 if (start < LSB_SIZE) {
  36                         bitmap_set(cmd_q->lsbmap, start, count);
  37                         return start + cmd_q->lsb * LSB_SIZE;
  38                 }
  39         }
  40 
  41         /* No joy; try to get an entry from the shared blocks */
  42         ccp = cmd_q->ccp;
  43         for (;;) {
  44                 mutex_lock(&ccp->sb_mutex);
  45 
  46                 start = (u32)bitmap_find_next_zero_area(ccp->lsbmap,
  47                                                         MAX_LSB_CNT * LSB_SIZE,
  48                                                         0,
  49                                                         count, 0);
  50                 if (start <= MAX_LSB_CNT * LSB_SIZE) {
  51                         bitmap_set(ccp->lsbmap, start, count);
  52 
  53                         mutex_unlock(&ccp->sb_mutex);
  54                         return start;
  55                 }
  56 
  57                 ccp->sb_avail = 0;
  58 
  59                 mutex_unlock(&ccp->sb_mutex);
  60 
  61                 /* Wait for KSB entries to become available */
  62                 if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail))
  63                         return 0;
  64         }
  65 }
  66 
  67 /* Free a number of LSB slots from the bitmap, starting at
  68  * the indicated starting slot number.
  69  */
  70 static void ccp_lsb_free(struct ccp_cmd_queue *cmd_q, unsigned int start,
  71                          unsigned int count)
  72 {
  73         if (!start)
  74                 return;
  75 
  76         if (cmd_q->lsb == start) {
  77                 /* An entry from the private LSB */
  78                 bitmap_clear(cmd_q->lsbmap, start, count);
  79         } else {
  80                 /* From the shared LSBs */
  81                 struct ccp_device *ccp = cmd_q->ccp;
  82 
  83                 mutex_lock(&ccp->sb_mutex);
  84                 bitmap_clear(ccp->lsbmap, start, count);
  85                 ccp->sb_avail = 1;
  86                 mutex_unlock(&ccp->sb_mutex);
  87                 wake_up_interruptible_all(&ccp->sb_queue);
  88         }
  89 }
  90 
  91 /* CCP version 5: Union to define the function field (cmd_reg1/dword0) */
  92 union ccp_function {
  93         struct {
  94                 u16 size:7;
  95                 u16 encrypt:1;
  96                 u16 mode:5;
  97                 u16 type:2;
  98         } aes;
  99         struct {
 100                 u16 size:7;
 101                 u16 encrypt:1;
 102                 u16 rsvd:5;
 103                 u16 type:2;
 104         } aes_xts;
 105         struct {
 106                 u16 size:7;
 107                 u16 encrypt:1;
 108                 u16 mode:5;
 109                 u16 type:2;
 110         } des3;
 111         struct {
 112                 u16 rsvd1:10;
 113                 u16 type:4;
 114                 u16 rsvd2:1;
 115         } sha;
 116         struct {
 117                 u16 mode:3;
 118                 u16 size:12;
 119         } rsa;
 120         struct {
 121                 u16 byteswap:2;
 122                 u16 bitwise:3;
 123                 u16 reflect:2;
 124                 u16 rsvd:8;
 125         } pt;
 126         struct  {
 127                 u16 rsvd:13;
 128         } zlib;
 129         struct {
 130                 u16 size:10;
 131                 u16 type:2;
 132                 u16 mode:3;
 133         } ecc;
 134         u16 raw;
 135 };
 136 
 137 #define CCP_AES_SIZE(p)         ((p)->aes.size)
 138 #define CCP_AES_ENCRYPT(p)      ((p)->aes.encrypt)
 139 #define CCP_AES_MODE(p)         ((p)->aes.mode)
 140 #define CCP_AES_TYPE(p)         ((p)->aes.type)
 141 #define CCP_XTS_SIZE(p)         ((p)->aes_xts.size)
 142 #define CCP_XTS_TYPE(p)         ((p)->aes_xts.type)
 143 #define CCP_XTS_ENCRYPT(p)      ((p)->aes_xts.encrypt)
 144 #define CCP_DES3_SIZE(p)        ((p)->des3.size)
 145 #define CCP_DES3_ENCRYPT(p)     ((p)->des3.encrypt)
 146 #define CCP_DES3_MODE(p)        ((p)->des3.mode)
 147 #define CCP_DES3_TYPE(p)        ((p)->des3.type)
 148 #define CCP_SHA_TYPE(p)         ((p)->sha.type)
 149 #define CCP_RSA_SIZE(p)         ((p)->rsa.size)
 150 #define CCP_PT_BYTESWAP(p)      ((p)->pt.byteswap)
 151 #define CCP_PT_BITWISE(p)       ((p)->pt.bitwise)
 152 #define CCP_ECC_MODE(p)         ((p)->ecc.mode)
 153 #define CCP_ECC_AFFINE(p)       ((p)->ecc.one)
 154 
 155 /* Word 0 */
 156 #define CCP5_CMD_DW0(p)         ((p)->dw0)
 157 #define CCP5_CMD_SOC(p)         (CCP5_CMD_DW0(p).soc)
 158 #define CCP5_CMD_IOC(p)         (CCP5_CMD_DW0(p).ioc)
 159 #define CCP5_CMD_INIT(p)        (CCP5_CMD_DW0(p).init)
 160 #define CCP5_CMD_EOM(p)         (CCP5_CMD_DW0(p).eom)
 161 #define CCP5_CMD_FUNCTION(p)    (CCP5_CMD_DW0(p).function)
 162 #define CCP5_CMD_ENGINE(p)      (CCP5_CMD_DW0(p).engine)
 163 #define CCP5_CMD_PROT(p)        (CCP5_CMD_DW0(p).prot)
 164 
 165 /* Word 1 */
 166 #define CCP5_CMD_DW1(p)         ((p)->length)
 167 #define CCP5_CMD_LEN(p)         (CCP5_CMD_DW1(p))
 168 
 169 /* Word 2 */
 170 #define CCP5_CMD_DW2(p)         ((p)->src_lo)
 171 #define CCP5_CMD_SRC_LO(p)      (CCP5_CMD_DW2(p))
 172 
 173 /* Word 3 */
 174 #define CCP5_CMD_DW3(p)         ((p)->dw3)
 175 #define CCP5_CMD_SRC_MEM(p)     ((p)->dw3.src_mem)
 176 #define CCP5_CMD_SRC_HI(p)      ((p)->dw3.src_hi)
 177 #define CCP5_CMD_LSB_ID(p)      ((p)->dw3.lsb_cxt_id)
 178 #define CCP5_CMD_FIX_SRC(p)     ((p)->dw3.fixed)
 179 
 180 /* Words 4/5 */
 181 #define CCP5_CMD_DW4(p)         ((p)->dw4)
 182 #define CCP5_CMD_DST_LO(p)      (CCP5_CMD_DW4(p).dst_lo)
 183 #define CCP5_CMD_DW5(p)         ((p)->dw5.fields.dst_hi)
 184 #define CCP5_CMD_DST_HI(p)      (CCP5_CMD_DW5(p))
 185 #define CCP5_CMD_DST_MEM(p)     ((p)->dw5.fields.dst_mem)
 186 #define CCP5_CMD_FIX_DST(p)     ((p)->dw5.fields.fixed)
 187 #define CCP5_CMD_SHA_LO(p)      ((p)->dw4.sha_len_lo)
 188 #define CCP5_CMD_SHA_HI(p)      ((p)->dw5.sha_len_hi)
 189 
 190 /* Word 6/7 */
 191 #define CCP5_CMD_DW6(p)         ((p)->key_lo)
 192 #define CCP5_CMD_KEY_LO(p)      (CCP5_CMD_DW6(p))
 193 #define CCP5_CMD_DW7(p)         ((p)->dw7)
 194 #define CCP5_CMD_KEY_HI(p)      ((p)->dw7.key_hi)
 195 #define CCP5_CMD_KEY_MEM(p)     ((p)->dw7.key_mem)
 196 
 197 static inline u32 low_address(unsigned long addr)
 198 {
 199         return (u64)addr & 0x0ffffffff;
 200 }
 201 
 202 static inline u32 high_address(unsigned long addr)
 203 {
 204         return ((u64)addr >> 32) & 0x00000ffff;
 205 }
 206 
 207 static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q)
 208 {
 209         unsigned int head_idx, n;
 210         u32 head_lo, queue_start;
 211 
 212         queue_start = low_address(cmd_q->qdma_tail);
 213         head_lo = ioread32(cmd_q->reg_head_lo);
 214         head_idx = (head_lo - queue_start) / sizeof(struct ccp5_desc);
 215 
 216         n = head_idx + COMMANDS_PER_QUEUE - cmd_q->qidx - 1;
 217 
 218         return n % COMMANDS_PER_QUEUE; /* Always one unused spot */
 219 }
 220 
 221 static int ccp5_do_cmd(struct ccp5_desc *desc,
 222                        struct ccp_cmd_queue *cmd_q)
 223 {
 224         u32 *mP;
 225         __le32 *dP;
 226         u32 tail;
 227         int     i;
 228         int ret = 0;
 229 
 230         cmd_q->total_ops++;
 231 
 232         if (CCP5_CMD_SOC(desc)) {
 233                 CCP5_CMD_IOC(desc) = 1;
 234                 CCP5_CMD_SOC(desc) = 0;
 235         }
 236         mutex_lock(&cmd_q->q_mutex);
 237 
 238         mP = (u32 *) &cmd_q->qbase[cmd_q->qidx];
 239         dP = (__le32 *) desc;
 240         for (i = 0; i < 8; i++)
 241                 mP[i] = cpu_to_le32(dP[i]); /* handle endianness */
 242 
 243         cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
 244 
 245         /* The data used by this command must be flushed to memory */
 246         wmb();
 247 
 248         /* Write the new tail address back to the queue register */
 249         tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE);
 250         iowrite32(tail, cmd_q->reg_tail_lo);
 251 
 252         /* Turn the queue back on using our cached control register */
 253         iowrite32(cmd_q->qcontrol | CMD5_Q_RUN, cmd_q->reg_control);
 254         mutex_unlock(&cmd_q->q_mutex);
 255 
 256         if (CCP5_CMD_IOC(desc)) {
 257                 /* Wait for the job to complete */
 258                 ret = wait_event_interruptible(cmd_q->int_queue,
 259                                                cmd_q->int_rcvd);
 260                 if (ret || cmd_q->cmd_error) {
 261                         /* Log the error and flush the queue by
 262                          * moving the head pointer
 263                          */
 264                         if (cmd_q->cmd_error)
 265                                 ccp_log_error(cmd_q->ccp,
 266                                               cmd_q->cmd_error);
 267                         iowrite32(tail, cmd_q->reg_head_lo);
 268                         if (!ret)
 269                                 ret = -EIO;
 270                 }
 271                 cmd_q->int_rcvd = 0;
 272         }
 273 
 274         return ret;
 275 }
 276 
 277 static int ccp5_perform_aes(struct ccp_op *op)
 278 {
 279         struct ccp5_desc desc;
 280         union ccp_function function;
 281         u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
 282 
 283         op->cmd_q->total_aes_ops++;
 284 
 285         /* Zero out all the fields of the command desc */
 286         memset(&desc, 0, Q_DESC_SIZE);
 287 
 288         CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_AES;
 289 
 290         CCP5_CMD_SOC(&desc) = op->soc;
 291         CCP5_CMD_IOC(&desc) = 1;
 292         CCP5_CMD_INIT(&desc) = op->init;
 293         CCP5_CMD_EOM(&desc) = op->eom;
 294         CCP5_CMD_PROT(&desc) = 0;
 295 
 296         function.raw = 0;
 297         CCP_AES_ENCRYPT(&function) = op->u.aes.action;
 298         CCP_AES_MODE(&function) = op->u.aes.mode;
 299         CCP_AES_TYPE(&function) = op->u.aes.type;
 300         CCP_AES_SIZE(&function) = op->u.aes.size;
 301 
 302         CCP5_CMD_FUNCTION(&desc) = function.raw;
 303 
 304         CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
 305 
 306         CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
 307         CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
 308         CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 309 
 310         CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
 311         CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
 312         CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 313 
 314         CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr);
 315         CCP5_CMD_KEY_HI(&desc) = 0;
 316         CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
 317         CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
 318 
 319         return ccp5_do_cmd(&desc, op->cmd_q);
 320 }
 321 
 322 static int ccp5_perform_xts_aes(struct ccp_op *op)
 323 {
 324         struct ccp5_desc desc;
 325         union ccp_function function;
 326         u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
 327 
 328         op->cmd_q->total_xts_aes_ops++;
 329 
 330         /* Zero out all the fields of the command desc */
 331         memset(&desc, 0, Q_DESC_SIZE);
 332 
 333         CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_XTS_AES_128;
 334 
 335         CCP5_CMD_SOC(&desc) = op->soc;
 336         CCP5_CMD_IOC(&desc) = 1;
 337         CCP5_CMD_INIT(&desc) = op->init;
 338         CCP5_CMD_EOM(&desc) = op->eom;
 339         CCP5_CMD_PROT(&desc) = 0;
 340 
 341         function.raw = 0;
 342         CCP_XTS_TYPE(&function) = op->u.xts.type;
 343         CCP_XTS_ENCRYPT(&function) = op->u.xts.action;
 344         CCP_XTS_SIZE(&function) = op->u.xts.unit_size;
 345         CCP5_CMD_FUNCTION(&desc) = function.raw;
 346 
 347         CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
 348 
 349         CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
 350         CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
 351         CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 352 
 353         CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
 354         CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
 355         CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 356 
 357         CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr);
 358         CCP5_CMD_KEY_HI(&desc) =  0;
 359         CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
 360         CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
 361 
 362         return ccp5_do_cmd(&desc, op->cmd_q);
 363 }
 364 
 365 static int ccp5_perform_sha(struct ccp_op *op)
 366 {
 367         struct ccp5_desc desc;
 368         union ccp_function function;
 369 
 370         op->cmd_q->total_sha_ops++;
 371 
 372         /* Zero out all the fields of the command desc */
 373         memset(&desc, 0, Q_DESC_SIZE);
 374 
 375         CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SHA;
 376 
 377         CCP5_CMD_SOC(&desc) = op->soc;
 378         CCP5_CMD_IOC(&desc) = 1;
 379         CCP5_CMD_INIT(&desc) = 1;
 380         CCP5_CMD_EOM(&desc) = op->eom;
 381         CCP5_CMD_PROT(&desc) = 0;
 382 
 383         function.raw = 0;
 384         CCP_SHA_TYPE(&function) = op->u.sha.type;
 385         CCP5_CMD_FUNCTION(&desc) = function.raw;
 386 
 387         CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
 388 
 389         CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
 390         CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
 391         CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 392 
 393         CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
 394 
 395         if (op->eom) {
 396                 CCP5_CMD_SHA_LO(&desc) = lower_32_bits(op->u.sha.msg_bits);
 397                 CCP5_CMD_SHA_HI(&desc) = upper_32_bits(op->u.sha.msg_bits);
 398         } else {
 399                 CCP5_CMD_SHA_LO(&desc) = 0;
 400                 CCP5_CMD_SHA_HI(&desc) = 0;
 401         }
 402 
 403         return ccp5_do_cmd(&desc, op->cmd_q);
 404 }
 405 
 406 static int ccp5_perform_des3(struct ccp_op *op)
 407 {
 408         struct ccp5_desc desc;
 409         union ccp_function function;
 410         u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
 411 
 412         op->cmd_q->total_3des_ops++;
 413 
 414         /* Zero out all the fields of the command desc */
 415         memset(&desc, 0, sizeof(struct ccp5_desc));
 416 
 417         CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_DES3;
 418 
 419         CCP5_CMD_SOC(&desc) = op->soc;
 420         CCP5_CMD_IOC(&desc) = 1;
 421         CCP5_CMD_INIT(&desc) = op->init;
 422         CCP5_CMD_EOM(&desc) = op->eom;
 423         CCP5_CMD_PROT(&desc) = 0;
 424 
 425         function.raw = 0;
 426         CCP_DES3_ENCRYPT(&function) = op->u.des3.action;
 427         CCP_DES3_MODE(&function) = op->u.des3.mode;
 428         CCP_DES3_TYPE(&function) = op->u.des3.type;
 429         CCP5_CMD_FUNCTION(&desc) = function.raw;
 430 
 431         CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
 432 
 433         CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
 434         CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
 435         CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 436 
 437         CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
 438         CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
 439         CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 440 
 441         CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr);
 442         CCP5_CMD_KEY_HI(&desc) = 0;
 443         CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
 444         CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;
 445 
 446         return ccp5_do_cmd(&desc, op->cmd_q);
 447 }
 448 
 449 static int ccp5_perform_rsa(struct ccp_op *op)
 450 {
 451         struct ccp5_desc desc;
 452         union ccp_function function;
 453 
 454         op->cmd_q->total_rsa_ops++;
 455 
 456         /* Zero out all the fields of the command desc */
 457         memset(&desc, 0, Q_DESC_SIZE);
 458 
 459         CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_RSA;
 460 
 461         CCP5_CMD_SOC(&desc) = op->soc;
 462         CCP5_CMD_IOC(&desc) = 1;
 463         CCP5_CMD_INIT(&desc) = 0;
 464         CCP5_CMD_EOM(&desc) = 1;
 465         CCP5_CMD_PROT(&desc) = 0;
 466 
 467         function.raw = 0;
 468         CCP_RSA_SIZE(&function) = (op->u.rsa.mod_size + 7) >> 3;
 469         CCP5_CMD_FUNCTION(&desc) = function.raw;
 470 
 471         CCP5_CMD_LEN(&desc) = op->u.rsa.input_len;
 472 
 473         /* Source is from external memory */
 474         CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
 475         CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
 476         CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 477 
 478         /* Destination is in external memory */
 479         CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
 480         CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
 481         CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 482 
 483         /* Key (Exponent) is in external memory */
 484         CCP5_CMD_KEY_LO(&desc) = ccp_addr_lo(&op->exp.u.dma);
 485         CCP5_CMD_KEY_HI(&desc) = ccp_addr_hi(&op->exp.u.dma);
 486         CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 487 
 488         return ccp5_do_cmd(&desc, op->cmd_q);
 489 }
 490 
 491 static int ccp5_perform_passthru(struct ccp_op *op)
 492 {
 493         struct ccp5_desc desc;
 494         union ccp_function function;
 495         struct ccp_dma_info *saddr = &op->src.u.dma;
 496         struct ccp_dma_info *daddr = &op->dst.u.dma;
 497 
 498 
 499         op->cmd_q->total_pt_ops++;
 500 
 501         memset(&desc, 0, Q_DESC_SIZE);
 502 
 503         CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_PASSTHRU;
 504 
 505         CCP5_CMD_SOC(&desc) = 0;
 506         CCP5_CMD_IOC(&desc) = 1;
 507         CCP5_CMD_INIT(&desc) = 0;
 508         CCP5_CMD_EOM(&desc) = op->eom;
 509         CCP5_CMD_PROT(&desc) = 0;
 510 
 511         function.raw = 0;
 512         CCP_PT_BYTESWAP(&function) = op->u.passthru.byte_swap;
 513         CCP_PT_BITWISE(&function) = op->u.passthru.bit_mod;
 514         CCP5_CMD_FUNCTION(&desc) = function.raw;
 515 
 516         /* Length of source data is always 256 bytes */
 517         if (op->src.type == CCP_MEMTYPE_SYSTEM)
 518                 CCP5_CMD_LEN(&desc) = saddr->length;
 519         else
 520                 CCP5_CMD_LEN(&desc) = daddr->length;
 521 
 522         if (op->src.type == CCP_MEMTYPE_SYSTEM) {
 523                 CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
 524                 CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
 525                 CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 526 
 527                 if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
 528                         CCP5_CMD_LSB_ID(&desc) = op->sb_key;
 529         } else {
 530                 u32 key_addr = op->src.u.sb * CCP_SB_BYTES;
 531 
 532                 CCP5_CMD_SRC_LO(&desc) = lower_32_bits(key_addr);
 533                 CCP5_CMD_SRC_HI(&desc) = 0;
 534                 CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SB;
 535         }
 536 
 537         if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
 538                 CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
 539                 CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
 540                 CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 541         } else {
 542                 u32 key_addr = op->dst.u.sb * CCP_SB_BYTES;
 543 
 544                 CCP5_CMD_DST_LO(&desc) = lower_32_bits(key_addr);
 545                 CCP5_CMD_DST_HI(&desc) = 0;
 546                 CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SB;
 547         }
 548 
 549         return ccp5_do_cmd(&desc, op->cmd_q);
 550 }
 551 
 552 static int ccp5_perform_ecc(struct ccp_op *op)
 553 {
 554         struct ccp5_desc desc;
 555         union ccp_function function;
 556 
 557         op->cmd_q->total_ecc_ops++;
 558 
 559         /* Zero out all the fields of the command desc */
 560         memset(&desc, 0, Q_DESC_SIZE);
 561 
 562         CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_ECC;
 563 
 564         CCP5_CMD_SOC(&desc) = 0;
 565         CCP5_CMD_IOC(&desc) = 1;
 566         CCP5_CMD_INIT(&desc) = 0;
 567         CCP5_CMD_EOM(&desc) = 1;
 568         CCP5_CMD_PROT(&desc) = 0;
 569 
 570         function.raw = 0;
 571         function.ecc.mode = op->u.ecc.function;
 572         CCP5_CMD_FUNCTION(&desc) = function.raw;
 573 
 574         CCP5_CMD_LEN(&desc) = op->src.u.dma.length;
 575 
 576         CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
 577         CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
 578         CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 579 
 580         CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
 581         CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
 582         CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;
 583 
 584         return ccp5_do_cmd(&desc, op->cmd_q);
 585 }
 586 
 587 static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status)
 588 {
 589         int q_mask = 1 << cmd_q->id;
 590         int queues = 0;
 591         int j;
 592 
 593         /* Build a bit mask to know which LSBs this queue has access to.
 594          * Don't bother with segment 0 as it has special privileges.
 595          */
 596         for (j = 1; j < MAX_LSB_CNT; j++) {
 597                 if (status & q_mask)
 598                         bitmap_set(cmd_q->lsbmask, j, 1);
 599                 status >>= LSB_REGION_WIDTH;
 600         }
 601         queues = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT);
 602         dev_dbg(cmd_q->ccp->dev, "Queue %d can access %d LSB regions\n",
 603                  cmd_q->id, queues);
 604 
 605         return queues ? 0 : -EINVAL;
 606 }
 607 
 608 static int ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp,
 609                                         int lsb_cnt, int n_lsbs,
 610                                         unsigned long *lsb_pub)
 611 {
 612         DECLARE_BITMAP(qlsb, MAX_LSB_CNT);
 613         int bitno;
 614         int qlsb_wgt;
 615         int i;
 616 
 617         /* For each queue:
 618          * If the count of potential LSBs available to a queue matches the
 619          * ordinal given to us in lsb_cnt:
 620          * Copy the mask of possible LSBs for this queue into "qlsb";
 621          * For each bit in qlsb, see if the corresponding bit in the
 622          * aggregation mask is set; if so, we have a match.
 623          *     If we have a match, clear the bit in the aggregation to
 624          *     mark it as no longer available.
 625          *     If there is no match, clear the bit in qlsb and keep looking.
 626          */
 627         for (i = 0; i < ccp->cmd_q_count; i++) {
 628                 struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
 629 
 630                 qlsb_wgt = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT);
 631 
 632                 if (qlsb_wgt == lsb_cnt) {
 633                         bitmap_copy(qlsb, cmd_q->lsbmask, MAX_LSB_CNT);
 634 
 635                         bitno = find_first_bit(qlsb, MAX_LSB_CNT);
 636                         while (bitno < MAX_LSB_CNT) {
 637                                 if (test_bit(bitno, lsb_pub)) {
 638                                         /* We found an available LSB
 639                                          * that this queue can access
 640                                          */
 641                                         cmd_q->lsb = bitno;
 642                                         bitmap_clear(lsb_pub, bitno, 1);
 643                                         dev_dbg(ccp->dev,
 644                                                  "Queue %d gets LSB %d\n",
 645                                                  i, bitno);
 646                                         break;
 647                                 }
 648                                 bitmap_clear(qlsb, bitno, 1);
 649                                 bitno = find_first_bit(qlsb, MAX_LSB_CNT);
 650                         }
 651                         if (bitno >= MAX_LSB_CNT)
 652                                 return -EINVAL;
 653                         n_lsbs--;
 654                 }
 655         }
 656         return n_lsbs;
 657 }
 658 
 659 /* For each queue, from the most- to least-constrained:
 660  * find an LSB that can be assigned to the queue. If there are N queues that
 661  * can only use M LSBs, where N > M, fail; otherwise, every queue will get a
 662  * dedicated LSB. Remaining LSB regions become a shared resource.
 663  * If we have fewer LSBs than queues, all LSB regions become shared resources.
 664  */
 665 static int ccp_assign_lsbs(struct ccp_device *ccp)
 666 {
 667         DECLARE_BITMAP(lsb_pub, MAX_LSB_CNT);
 668         DECLARE_BITMAP(qlsb, MAX_LSB_CNT);
 669         int n_lsbs = 0;
 670         int bitno;
 671         int i, lsb_cnt;
 672         int rc = 0;
 673 
 674         bitmap_zero(lsb_pub, MAX_LSB_CNT);
 675 
 676         /* Create an aggregate bitmap to get a total count of available LSBs */
 677         for (i = 0; i < ccp->cmd_q_count; i++)
 678                 bitmap_or(lsb_pub,
 679                           lsb_pub, ccp->cmd_q[i].lsbmask,
 680                           MAX_LSB_CNT);
 681 
 682         n_lsbs = bitmap_weight(lsb_pub, MAX_LSB_CNT);
 683 
 684         if (n_lsbs >= ccp->cmd_q_count) {
 685                 /* We have enough LSBS to give every queue a private LSB.
 686                  * Brute force search to start with the queues that are more
 687                  * constrained in LSB choice. When an LSB is privately
 688                  * assigned, it is removed from the public mask.
 689                  * This is an ugly N squared algorithm with some optimization.
 690                  */
 691                 for (lsb_cnt = 1;
 692                      n_lsbs && (lsb_cnt <= MAX_LSB_CNT);
 693                      lsb_cnt++) {
 694                         rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs,
 695                                                           lsb_pub);
 696                         if (rc < 0)
 697                                 return -EINVAL;
 698                         n_lsbs = rc;
 699                 }
 700         }
 701 
 702         rc = 0;
 703         /* What's left of the LSBs, according to the public mask, now become
 704          * shared. Any zero bits in the lsb_pub mask represent an LSB region
 705          * that can't be used as a shared resource, so mark the LSB slots for
 706          * them as "in use".
 707          */
 708         bitmap_copy(qlsb, lsb_pub, MAX_LSB_CNT);
 709 
 710         bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT);
 711         while (bitno < MAX_LSB_CNT) {
 712                 bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE);
 713                 bitmap_set(qlsb, bitno, 1);
 714                 bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT);
 715         }
 716 
 717         return rc;
 718 }
 719 
 720 static void ccp5_disable_queue_interrupts(struct ccp_device *ccp)
 721 {
 722         unsigned int i;
 723 
 724         for (i = 0; i < ccp->cmd_q_count; i++)
 725                 iowrite32(0x0, ccp->cmd_q[i].reg_int_enable);
 726 }
 727 
 728 static void ccp5_enable_queue_interrupts(struct ccp_device *ccp)
 729 {
 730         unsigned int i;
 731 
 732         for (i = 0; i < ccp->cmd_q_count; i++)
 733                 iowrite32(SUPPORTED_INTERRUPTS, ccp->cmd_q[i].reg_int_enable);
 734 }
 735 
 736 static void ccp5_irq_bh(unsigned long data)
 737 {
 738         struct ccp_device *ccp = (struct ccp_device *)data;
 739         u32 status;
 740         unsigned int i;
 741 
 742         for (i = 0; i < ccp->cmd_q_count; i++) {
 743                 struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
 744 
 745                 status = ioread32(cmd_q->reg_interrupt_status);
 746 
 747                 if (status) {
 748                         cmd_q->int_status = status;
 749                         cmd_q->q_status = ioread32(cmd_q->reg_status);
 750                         cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
 751 
 752                         /* On error, only save the first error value */
 753                         if ((status & INT_ERROR) && !cmd_q->cmd_error)
 754                                 cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
 755 
 756                         cmd_q->int_rcvd = 1;
 757 
 758                         /* Acknowledge the interrupt and wake the kthread */
 759                         iowrite32(status, cmd_q->reg_interrupt_status);
 760                         wake_up_interruptible(&cmd_q->int_queue);
 761                 }
 762         }
 763         ccp5_enable_queue_interrupts(ccp);
 764 }
 765 
 766 static irqreturn_t ccp5_irq_handler(int irq, void *data)
 767 {
 768         struct ccp_device *ccp = (struct ccp_device *)data;
 769 
 770         ccp5_disable_queue_interrupts(ccp);
 771         ccp->total_interrupts++;
 772         if (ccp->use_tasklet)
 773                 tasklet_schedule(&ccp->irq_tasklet);
 774         else
 775                 ccp5_irq_bh((unsigned long)ccp);
 776         return IRQ_HANDLED;
 777 }
 778 
 779 static int ccp5_init(struct ccp_device *ccp)
 780 {
 781         struct device *dev = ccp->dev;
 782         struct ccp_cmd_queue *cmd_q;
 783         struct dma_pool *dma_pool;
 784         char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
 785         unsigned int qmr, i;
 786         u64 status;
 787         u32 status_lo, status_hi;
 788         int ret;
 789 
 790         /* Find available queues */
 791         qmr = ioread32(ccp->io_regs + Q_MASK_REG);
 792         for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) {
 793                 if (!(qmr & (1 << i)))
 794                         continue;
 795 
 796                 /* Allocate a dma pool for this queue */
 797                 snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d",
 798                          ccp->name, i);
 799                 dma_pool = dma_pool_create(dma_pool_name, dev,
 800                                            CCP_DMAPOOL_MAX_SIZE,
 801                                            CCP_DMAPOOL_ALIGN, 0);
 802                 if (!dma_pool) {
 803                         dev_err(dev, "unable to allocate dma pool\n");
 804                         ret = -ENOMEM;
 805                         goto e_pool;
 806                 }
 807 
 808                 cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
 809                 ccp->cmd_q_count++;
 810 
 811                 cmd_q->ccp = ccp;
 812                 cmd_q->id = i;
 813                 cmd_q->dma_pool = dma_pool;
 814                 mutex_init(&cmd_q->q_mutex);
 815 
 816                 /* Page alignment satisfies our needs for N <= 128 */
 817                 BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128);
 818                 cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
 819                 cmd_q->qbase = dmam_alloc_coherent(dev, cmd_q->qsize,
 820                                                    &cmd_q->qbase_dma,
 821                                                    GFP_KERNEL);
 822                 if (!cmd_q->qbase) {
 823                         dev_err(dev, "unable to allocate command queue\n");
 824                         ret = -ENOMEM;
 825                         goto e_pool;
 826                 }
 827 
 828                 cmd_q->qidx = 0;
 829                 /* Preset some register values and masks that are queue
 830                  * number dependent
 831                  */
 832                 cmd_q->reg_control = ccp->io_regs +
 833                                      CMD5_Q_STATUS_INCR * (i + 1);
 834                 cmd_q->reg_tail_lo = cmd_q->reg_control + CMD5_Q_TAIL_LO_BASE;
 835                 cmd_q->reg_head_lo = cmd_q->reg_control + CMD5_Q_HEAD_LO_BASE;
 836                 cmd_q->reg_int_enable = cmd_q->reg_control +
 837                                         CMD5_Q_INT_ENABLE_BASE;
 838                 cmd_q->reg_interrupt_status = cmd_q->reg_control +
 839                                               CMD5_Q_INTERRUPT_STATUS_BASE;
 840                 cmd_q->reg_status = cmd_q->reg_control + CMD5_Q_STATUS_BASE;
 841                 cmd_q->reg_int_status = cmd_q->reg_control +
 842                                         CMD5_Q_INT_STATUS_BASE;
 843                 cmd_q->reg_dma_status = cmd_q->reg_control +
 844                                         CMD5_Q_DMA_STATUS_BASE;
 845                 cmd_q->reg_dma_read_status = cmd_q->reg_control +
 846                                              CMD5_Q_DMA_READ_STATUS_BASE;
 847                 cmd_q->reg_dma_write_status = cmd_q->reg_control +
 848                                               CMD5_Q_DMA_WRITE_STATUS_BASE;
 849 
 850                 init_waitqueue_head(&cmd_q->int_queue);
 851 
 852                 dev_dbg(dev, "queue #%u available\n", i);
 853         }
 854 
 855         if (ccp->cmd_q_count == 0) {
 856                 dev_notice(dev, "no command queues available\n");
 857                 ret = -EIO;
 858                 goto e_pool;
 859         }
 860 
 861         /* Turn off the queues and disable interrupts until ready */
 862         ccp5_disable_queue_interrupts(ccp);
 863         for (i = 0; i < ccp->cmd_q_count; i++) {
 864                 cmd_q = &ccp->cmd_q[i];
 865 
 866                 cmd_q->qcontrol = 0; /* Start with nothing */
 867                 iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
 868 
 869                 ioread32(cmd_q->reg_int_status);
 870                 ioread32(cmd_q->reg_status);
 871 
 872                 /* Clear the interrupt status */
 873                 iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
 874         }
 875 
 876         dev_dbg(dev, "Requesting an IRQ...\n");
 877         /* Request an irq */
 878         ret = sp_request_ccp_irq(ccp->sp, ccp5_irq_handler, ccp->name, ccp);
 879         if (ret) {
 880                 dev_err(dev, "unable to allocate an IRQ\n");
 881                 goto e_pool;
 882         }
 883         /* Initialize the ISR tasklet */
 884         if (ccp->use_tasklet)
 885                 tasklet_init(&ccp->irq_tasklet, ccp5_irq_bh,
 886                              (unsigned long)ccp);
 887 
 888         dev_dbg(dev, "Loading LSB map...\n");
 889         /* Copy the private LSB mask to the public registers */
 890         status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET);
 891         status_hi = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET);
 892         iowrite32(status_lo, ccp->io_regs + LSB_PUBLIC_MASK_LO_OFFSET);
 893         iowrite32(status_hi, ccp->io_regs + LSB_PUBLIC_MASK_HI_OFFSET);
 894         status = ((u64)status_hi<<30) | (u64)status_lo;
 895 
 896         dev_dbg(dev, "Configuring virtual queues...\n");
 897         /* Configure size of each virtual queue accessible to host */
 898         for (i = 0; i < ccp->cmd_q_count; i++) {
 899                 u32 dma_addr_lo;
 900                 u32 dma_addr_hi;
 901 
 902                 cmd_q = &ccp->cmd_q[i];
 903 
 904                 cmd_q->qcontrol &= ~(CMD5_Q_SIZE << CMD5_Q_SHIFT);
 905                 cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD5_Q_SHIFT;
 906 
 907                 cmd_q->qdma_tail = cmd_q->qbase_dma;
 908                 dma_addr_lo = low_address(cmd_q->qdma_tail);
 909                 iowrite32((u32)dma_addr_lo, cmd_q->reg_tail_lo);
 910                 iowrite32((u32)dma_addr_lo, cmd_q->reg_head_lo);
 911 
 912                 dma_addr_hi = high_address(cmd_q->qdma_tail);
 913                 cmd_q->qcontrol |= (dma_addr_hi << 16);
 914                 iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
 915 
 916                 /* Find the LSB regions accessible to the queue */
 917                 ccp_find_lsb_regions(cmd_q, status);
 918                 cmd_q->lsb = -1; /* Unassigned value */
 919         }
 920 
 921         dev_dbg(dev, "Assigning LSBs...\n");
 922         ret = ccp_assign_lsbs(ccp);
 923         if (ret) {
 924                 dev_err(dev, "Unable to assign LSBs (%d)\n", ret);
 925                 goto e_irq;
 926         }
 927 
 928         /* Optimization: pre-allocate LSB slots for each queue */
 929         for (i = 0; i < ccp->cmd_q_count; i++) {
 930                 ccp->cmd_q[i].sb_key = ccp_lsb_alloc(&ccp->cmd_q[i], 2);
 931                 ccp->cmd_q[i].sb_ctx = ccp_lsb_alloc(&ccp->cmd_q[i], 2);
 932         }
 933 
 934         dev_dbg(dev, "Starting threads...\n");
 935         /* Create a kthread for each queue */
 936         for (i = 0; i < ccp->cmd_q_count; i++) {
 937                 struct task_struct *kthread;
 938 
 939                 cmd_q = &ccp->cmd_q[i];
 940 
 941                 kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
 942                                          "%s-q%u", ccp->name, cmd_q->id);
 943                 if (IS_ERR(kthread)) {
 944                         dev_err(dev, "error creating queue thread (%ld)\n",
 945                                 PTR_ERR(kthread));
 946                         ret = PTR_ERR(kthread);
 947                         goto e_kthread;
 948                 }
 949 
 950                 cmd_q->kthread = kthread;
 951                 wake_up_process(kthread);
 952         }
 953 
 954         dev_dbg(dev, "Enabling interrupts...\n");
 955         ccp5_enable_queue_interrupts(ccp);
 956 
 957         dev_dbg(dev, "Registering device...\n");
 958         /* Put this on the unit list to make it available */
 959         ccp_add_device(ccp);
 960 
 961         ret = ccp_register_rng(ccp);
 962         if (ret)
 963                 goto e_kthread;
 964 
 965         /* Register the DMA engine support */
 966         ret = ccp_dmaengine_register(ccp);
 967         if (ret)
 968                 goto e_hwrng;
 969 
 970 #ifdef CONFIG_CRYPTO_DEV_CCP_DEBUGFS
 971         /* Set up debugfs entries */
 972         ccp5_debugfs_setup(ccp);
 973 #endif
 974 
 975         return 0;
 976 
 977 e_hwrng:
 978         ccp_unregister_rng(ccp);
 979 
 980 e_kthread:
 981         for (i = 0; i < ccp->cmd_q_count; i++)
 982                 if (ccp->cmd_q[i].kthread)
 983                         kthread_stop(ccp->cmd_q[i].kthread);
 984 
 985 e_irq:
 986         sp_free_ccp_irq(ccp->sp, ccp);
 987 
 988 e_pool:
 989         for (i = 0; i < ccp->cmd_q_count; i++)
 990                 dma_pool_destroy(ccp->cmd_q[i].dma_pool);
 991 
 992         return ret;
 993 }
 994 
 995 static void ccp5_destroy(struct ccp_device *ccp)
 996 {
 997         struct ccp_cmd_queue *cmd_q;
 998         struct ccp_cmd *cmd;
 999         unsigned int i;
1000 
1001         /* Unregister the DMA engine */
1002         ccp_dmaengine_unregister(ccp);
1003 
1004         /* Unregister the RNG */
1005         ccp_unregister_rng(ccp);
1006 
1007         /* Remove this device from the list of available units first */
1008         ccp_del_device(ccp);
1009 
1010 #ifdef CONFIG_CRYPTO_DEV_CCP_DEBUGFS
1011         /* We're in the process of tearing down the entire driver;
1012          * when all the devices are gone clean up debugfs
1013          */
1014         if (ccp_present())
1015                 ccp5_debugfs_destroy();
1016 #endif
1017 
1018         /* Disable and clear interrupts */
1019         ccp5_disable_queue_interrupts(ccp);
1020         for (i = 0; i < ccp->cmd_q_count; i++) {
1021                 cmd_q = &ccp->cmd_q[i];
1022 
1023                 /* Turn off the run bit */
1024                 iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control);
1025 
1026                 /* Clear the interrupt status */
1027                 iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
1028                 ioread32(cmd_q->reg_int_status);
1029                 ioread32(cmd_q->reg_status);
1030         }
1031 
1032         /* Stop the queue kthreads */
1033         for (i = 0; i < ccp->cmd_q_count; i++)
1034                 if (ccp->cmd_q[i].kthread)
1035                         kthread_stop(ccp->cmd_q[i].kthread);
1036 
1037         sp_free_ccp_irq(ccp->sp, ccp);
1038 
1039         /* Flush the cmd and backlog queue */
1040         while (!list_empty(&ccp->cmd)) {
1041                 /* Invoke the callback directly with an error code */
1042                 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
1043                 list_del(&cmd->entry);
1044                 cmd->callback(cmd->data, -ENODEV);
1045         }
1046         while (!list_empty(&ccp->backlog)) {
1047                 /* Invoke the callback directly with an error code */
1048                 cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
1049                 list_del(&cmd->entry);
1050                 cmd->callback(cmd->data, -ENODEV);
1051         }
1052 }
1053 
1054 static void ccp5_config(struct ccp_device *ccp)
1055 {
1056         /* Public side */
1057         iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
1058 }
1059 
1060 static void ccp5other_config(struct ccp_device *ccp)
1061 {
1062         int i;
1063         u32 rnd;
1064 
1065         /* We own all of the queues on the NTB CCP */
1066 
1067         iowrite32(0x00012D57, ccp->io_regs + CMD5_TRNG_CTL_OFFSET);
1068         iowrite32(0x00000003, ccp->io_regs + CMD5_CONFIG_0_OFFSET);
1069         for (i = 0; i < 12; i++) {
1070                 rnd = ioread32(ccp->io_regs + TRNG_OUT_REG);
1071                 iowrite32(rnd, ccp->io_regs + CMD5_AES_MASK_OFFSET);
1072         }
1073 
1074         iowrite32(0x0000001F, ccp->io_regs + CMD5_QUEUE_MASK_OFFSET);
1075         iowrite32(0x00005B6D, ccp->io_regs + CMD5_QUEUE_PRIO_OFFSET);
1076         iowrite32(0x00000000, ccp->io_regs + CMD5_CMD_TIMEOUT_OFFSET);
1077 
1078         iowrite32(0x3FFFFFFF, ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET);
1079         iowrite32(0x000003FF, ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET);
1080 
1081         iowrite32(0x00108823, ccp->io_regs + CMD5_CLK_GATE_CTL_OFFSET);
1082 
1083         ccp5_config(ccp);
1084 }
1085 
1086 /* Version 5 adds some function, but is essentially the same as v5 */
1087 static const struct ccp_actions ccp5_actions = {
1088         .aes = ccp5_perform_aes,
1089         .xts_aes = ccp5_perform_xts_aes,
1090         .sha = ccp5_perform_sha,
1091         .des3 = ccp5_perform_des3,
1092         .rsa = ccp5_perform_rsa,
1093         .passthru = ccp5_perform_passthru,
1094         .ecc = ccp5_perform_ecc,
1095         .sballoc = ccp_lsb_alloc,
1096         .sbfree = ccp_lsb_free,
1097         .init = ccp5_init,
1098         .destroy = ccp5_destroy,
1099         .get_free_slots = ccp5_get_free_slots,
1100 };
1101 
1102 const struct ccp_vdata ccpv5a = {
1103         .version = CCP_VERSION(5, 0),
1104         .setup = ccp5_config,
1105         .perform = &ccp5_actions,
1106         .offset = 0x0,
1107         .rsamax = CCP5_RSA_MAX_WIDTH,
1108 };
1109 
1110 const struct ccp_vdata ccpv5b = {
1111         .version = CCP_VERSION(5, 0),
1112         .dma_chan_attr = DMA_PRIVATE,
1113         .setup = ccp5other_config,
1114         .perform = &ccp5_actions,
1115         .offset = 0x0,
1116         .rsamax = CCP5_RSA_MAX_WIDTH,
1117 };

/* [<][>][^][v][top][bottom][index][help] */