root/drivers/crypto/qat/qat_common/adf_isr.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. adf_enable_msix
  2. adf_disable_msix
  3. adf_msix_isr_bundle
  4. adf_msix_isr_ae
  5. adf_request_irqs
  6. adf_free_irqs
  7. adf_isr_alloc_msix_entry_table
  8. adf_isr_free_msix_entry_table
  9. adf_setup_bh
  10. adf_cleanup_bh
  11. adf_isr_resource_free
  12. adf_isr_resource_alloc

   1 /*
   2   This file is provided under a dual BSD/GPLv2 license.  When using or
   3   redistributing this file, you may do so under either license.
   4 
   5   GPL LICENSE SUMMARY
   6   Copyright(c) 2014 Intel Corporation.
   7   This program is free software; you can redistribute it and/or modify
   8   it under the terms of version 2 of the GNU General Public License as
   9   published by the Free Software Foundation.
  10 
  11   This program is distributed in the hope that it will be useful, but
  12   WITHOUT ANY WARRANTY; without even the implied warranty of
  13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14   General Public License for more details.
  15 
  16   Contact Information:
  17   qat-linux@intel.com
  18 
  19   BSD LICENSE
  20   Copyright(c) 2014 Intel Corporation.
  21   Redistribution and use in source and binary forms, with or without
  22   modification, are permitted provided that the following conditions
  23   are met:
  24 
  25     * Redistributions of source code must retain the above copyright
  26       notice, this list of conditions and the following disclaimer.
  27     * Redistributions in binary form must reproduce the above copyright
  28       notice, this list of conditions and the following disclaimer in
  29       the documentation and/or other materials provided with the
  30       distribution.
  31     * Neither the name of Intel Corporation nor the names of its
  32       contributors may be used to endorse or promote products derived
  33       from this software without specific prior written permission.
  34 
  35   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  36   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  37   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  38   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  39   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  40   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  41   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  42   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  43   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  44   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  45   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  46 */
  47 #include <linux/kernel.h>
  48 #include <linux/init.h>
  49 #include <linux/types.h>
  50 #include <linux/pci.h>
  51 #include <linux/slab.h>
  52 #include <linux/errno.h>
  53 #include <linux/interrupt.h>
  54 #include "adf_accel_devices.h"
  55 #include "adf_common_drv.h"
  56 #include "adf_cfg.h"
  57 #include "adf_cfg_strings.h"
  58 #include "adf_cfg_common.h"
  59 #include "adf_transport_access_macros.h"
  60 #include "adf_transport_internal.h"
  61 
  62 static int adf_enable_msix(struct adf_accel_dev *accel_dev)
  63 {
  64         struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
  65         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
  66         u32 msix_num_entries = 1;
  67 
  68         /* If SR-IOV is disabled, add entries for each bank */
  69         if (!accel_dev->pf.vf_info) {
  70                 int i;
  71 
  72                 msix_num_entries += hw_data->num_banks;
  73                 for (i = 0; i < msix_num_entries; i++)
  74                         pci_dev_info->msix_entries.entries[i].entry = i;
  75         } else {
  76                 pci_dev_info->msix_entries.entries[0].entry =
  77                         hw_data->num_banks;
  78         }
  79 
  80         if (pci_enable_msix_exact(pci_dev_info->pci_dev,
  81                                   pci_dev_info->msix_entries.entries,
  82                                   msix_num_entries)) {
  83                 dev_err(&GET_DEV(accel_dev), "Failed to enable MSI-X IRQ(s)\n");
  84                 return -EFAULT;
  85         }
  86         return 0;
  87 }
  88 
  89 static void adf_disable_msix(struct adf_accel_pci *pci_dev_info)
  90 {
  91         pci_disable_msix(pci_dev_info->pci_dev);
  92 }
  93 
  94 static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr)
  95 {
  96         struct adf_etr_bank_data *bank = bank_ptr;
  97 
  98         WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, 0);
  99         tasklet_hi_schedule(&bank->resp_handler);
 100         return IRQ_HANDLED;
 101 }
 102 
 103 static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
 104 {
 105         struct adf_accel_dev *accel_dev = dev_ptr;
 106 
 107 #ifdef CONFIG_PCI_IOV
 108         /* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */
 109         if (accel_dev->pf.vf_info) {
 110                 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
 111                 struct adf_bar *pmisc =
 112                         &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
 113                 void __iomem *pmisc_bar_addr = pmisc->virt_addr;
 114                 u32 vf_mask;
 115 
 116                 /* Get the interrupt sources triggered by VFs */
 117                 vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU5) &
 118                             0x0000FFFF) << 16) |
 119                           ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU3) &
 120                             0x01FFFE00) >> 9);
 121 
 122                 if (vf_mask) {
 123                         struct adf_accel_vf_info *vf_info;
 124                         bool irq_handled = false;
 125                         int i;
 126 
 127                         /* Disable VF2PF interrupts for VFs with pending ints */
 128                         adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
 129 
 130                         /*
 131                          * Schedule tasklets to handle VF2PF interrupt BHs
 132                          * unless the VF is malicious and is attempting to
 133                          * flood the host OS with VF2PF interrupts.
 134                          */
 135                         for_each_set_bit(i, (const unsigned long *)&vf_mask,
 136                                          (sizeof(vf_mask) * BITS_PER_BYTE)) {
 137                                 vf_info = accel_dev->pf.vf_info + i;
 138 
 139                                 if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
 140                                         dev_info(&GET_DEV(accel_dev),
 141                                                  "Too many ints from VF%d\n",
 142                                                   vf_info->vf_nr + 1);
 143                                         continue;
 144                                 }
 145 
 146                                 /* Tasklet will re-enable ints from this VF */
 147                                 tasklet_hi_schedule(&vf_info->vf2pf_bh_tasklet);
 148                                 irq_handled = true;
 149                         }
 150 
 151                         if (irq_handled)
 152                                 return IRQ_HANDLED;
 153                 }
 154         }
 155 #endif /* CONFIG_PCI_IOV */
 156 
 157         dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
 158                 accel_dev->accel_id);
 159 
 160         return IRQ_NONE;
 161 }
 162 
 163 static int adf_request_irqs(struct adf_accel_dev *accel_dev)
 164 {
 165         struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
 166         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
 167         struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
 168         struct adf_etr_data *etr_data = accel_dev->transport;
 169         int ret, i = 0;
 170         char *name;
 171 
 172         /* Request msix irq for all banks unless SR-IOV enabled */
 173         if (!accel_dev->pf.vf_info) {
 174                 for (i = 0; i < hw_data->num_banks; i++) {
 175                         struct adf_etr_bank_data *bank = &etr_data->banks[i];
 176                         unsigned int cpu, cpus = num_online_cpus();
 177 
 178                         name = *(pci_dev_info->msix_entries.names + i);
 179                         snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
 180                                  "qat%d-bundle%d", accel_dev->accel_id, i);
 181                         ret = request_irq(msixe[i].vector,
 182                                           adf_msix_isr_bundle, 0, name, bank);
 183                         if (ret) {
 184                                 dev_err(&GET_DEV(accel_dev),
 185                                         "failed to enable irq %d for %s\n",
 186                                         msixe[i].vector, name);
 187                                 return ret;
 188                         }
 189 
 190                         cpu = ((accel_dev->accel_id * hw_data->num_banks) +
 191                                i) % cpus;
 192                         irq_set_affinity_hint(msixe[i].vector,
 193                                               get_cpu_mask(cpu));
 194                 }
 195         }
 196 
 197         /* Request msix irq for AE */
 198         name = *(pci_dev_info->msix_entries.names + i);
 199         snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
 200                  "qat%d-ae-cluster", accel_dev->accel_id);
 201         ret = request_irq(msixe[i].vector, adf_msix_isr_ae, 0, name, accel_dev);
 202         if (ret) {
 203                 dev_err(&GET_DEV(accel_dev),
 204                         "failed to enable irq %d, for %s\n",
 205                         msixe[i].vector, name);
 206                 return ret;
 207         }
 208         return ret;
 209 }
 210 
 211 static void adf_free_irqs(struct adf_accel_dev *accel_dev)
 212 {
 213         struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
 214         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
 215         struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
 216         struct adf_etr_data *etr_data = accel_dev->transport;
 217         int i = 0;
 218 
 219         if (pci_dev_info->msix_entries.num_entries > 1) {
 220                 for (i = 0; i < hw_data->num_banks; i++) {
 221                         irq_set_affinity_hint(msixe[i].vector, NULL);
 222                         free_irq(msixe[i].vector, &etr_data->banks[i]);
 223                 }
 224         }
 225         irq_set_affinity_hint(msixe[i].vector, NULL);
 226         free_irq(msixe[i].vector, accel_dev);
 227 }
 228 
 229 static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
 230 {
 231         int i;
 232         char **names;
 233         struct msix_entry *entries;
 234         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
 235         u32 msix_num_entries = 1;
 236 
 237         /* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */
 238         if (!accel_dev->pf.vf_info)
 239                 msix_num_entries += hw_data->num_banks;
 240 
 241         entries = kcalloc_node(msix_num_entries, sizeof(*entries),
 242                                GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
 243         if (!entries)
 244                 return -ENOMEM;
 245 
 246         names = kcalloc(msix_num_entries, sizeof(char *), GFP_KERNEL);
 247         if (!names) {
 248                 kfree(entries);
 249                 return -ENOMEM;
 250         }
 251         for (i = 0; i < msix_num_entries; i++) {
 252                 *(names + i) = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
 253                 if (!(*(names + i)))
 254                         goto err;
 255         }
 256         accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries;
 257         accel_dev->accel_pci_dev.msix_entries.entries = entries;
 258         accel_dev->accel_pci_dev.msix_entries.names = names;
 259         return 0;
 260 err:
 261         for (i = 0; i < msix_num_entries; i++)
 262                 kfree(*(names + i));
 263         kfree(entries);
 264         kfree(names);
 265         return -ENOMEM;
 266 }
 267 
 268 static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev)
 269 {
 270         char **names = accel_dev->accel_pci_dev.msix_entries.names;
 271         int i;
 272 
 273         kfree(accel_dev->accel_pci_dev.msix_entries.entries);
 274         for (i = 0; i < accel_dev->accel_pci_dev.msix_entries.num_entries; i++)
 275                 kfree(*(names + i));
 276         kfree(names);
 277 }
 278 
 279 static int adf_setup_bh(struct adf_accel_dev *accel_dev)
 280 {
 281         struct adf_etr_data *priv_data = accel_dev->transport;
 282         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
 283         int i;
 284 
 285         for (i = 0; i < hw_data->num_banks; i++)
 286                 tasklet_init(&priv_data->banks[i].resp_handler,
 287                              adf_response_handler,
 288                              (unsigned long)&priv_data->banks[i]);
 289         return 0;
 290 }
 291 
 292 static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
 293 {
 294         struct adf_etr_data *priv_data = accel_dev->transport;
 295         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
 296         int i;
 297 
 298         for (i = 0; i < hw_data->num_banks; i++) {
 299                 tasklet_disable(&priv_data->banks[i].resp_handler);
 300                 tasklet_kill(&priv_data->banks[i].resp_handler);
 301         }
 302 }
 303 
 304 /**
 305  * adf_isr_resource_free() - Free IRQ for acceleration device
 306  * @accel_dev:  Pointer to acceleration device.
 307  *
 308  * Function frees interrupts for acceleration device.
 309  */
 310 void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
 311 {
 312         adf_free_irqs(accel_dev);
 313         adf_cleanup_bh(accel_dev);
 314         adf_disable_msix(&accel_dev->accel_pci_dev);
 315         adf_isr_free_msix_entry_table(accel_dev);
 316 }
 317 EXPORT_SYMBOL_GPL(adf_isr_resource_free);
 318 
 319 /**
 320  * adf_isr_resource_alloc() - Allocate IRQ for acceleration device
 321  * @accel_dev:  Pointer to acceleration device.
 322  *
 323  * Function allocates interrupts for acceleration device.
 324  *
 325  * Return: 0 on success, error code otherwise.
 326  */
 327 int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
 328 {
 329         int ret;
 330 
 331         ret = adf_isr_alloc_msix_entry_table(accel_dev);
 332         if (ret)
 333                 return ret;
 334         if (adf_enable_msix(accel_dev))
 335                 goto err_out;
 336 
 337         if (adf_setup_bh(accel_dev))
 338                 goto err_out;
 339 
 340         if (adf_request_irqs(accel_dev))
 341                 goto err_out;
 342 
 343         return 0;
 344 err_out:
 345         adf_isr_resource_free(accel_dev);
 346         return -EFAULT;
 347 }
 348 EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);

/* [<][>][^][v][top][bottom][index][help] */