root/drivers/irqchip/irq-csky-mpintc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. setup_trigger
  2. csky_mpintc_handler
  3. csky_mpintc_enable
  4. csky_mpintc_disable
  5. csky_mpintc_eoi
  6. csky_mpintc_set_type
  7. csky_irq_set_affinity
  8. csky_irqdomain_map
  9. csky_irq_domain_xlate_cells
  10. csky_mpintc_send_ipi
  11. csky_mpintc_init

   1 // SPDX-License-Identifier: GPL-2.0
   2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
   3 
   4 #include <linux/kernel.h>
   5 #include <linux/init.h>
   6 #include <linux/of.h>
   7 #include <linux/of_address.h>
   8 #include <linux/module.h>
   9 #include <linux/irqdomain.h>
  10 #include <linux/irqchip.h>
  11 #include <linux/irq.h>
  12 #include <linux/interrupt.h>
  13 #include <linux/smp.h>
  14 #include <linux/io.h>
  15 #include <asm/irq.h>
  16 #include <asm/traps.h>
  17 #include <asm/reg_ops.h>
  18 
  19 static struct irq_domain *root_domain;
  20 static void __iomem *INTCG_base;
  21 static void __iomem *INTCL_base;
  22 
  23 #define IPI_IRQ         15
  24 #define INTC_IRQS       256
  25 #define COMM_IRQ_BASE   32
  26 
  27 #define INTCG_SIZE      0x8000
  28 #define INTCL_SIZE      0x1000
  29 
  30 #define INTCG_ICTLR     0x0
  31 #define INTCG_CICFGR    0x100
  32 #define INTCG_CIDSTR    0x1000
  33 
  34 #define INTCL_PICTLR    0x0
  35 #define INTCL_CFGR      0x14
  36 #define INTCL_SIGR      0x60
  37 #define INTCL_RDYIR     0x6c
  38 #define INTCL_SENR      0xa0
  39 #define INTCL_CENR      0xa4
  40 #define INTCL_CACR      0xb4
  41 
  42 static DEFINE_PER_CPU(void __iomem *, intcl_reg);
  43 
  44 static unsigned long *__trigger;
  45 
  46 #define IRQ_OFFSET(irq) ((irq < COMM_IRQ_BASE) ? irq : (irq - COMM_IRQ_BASE))
  47 
  48 #define TRIG_BYTE_OFFSET(i)     ((((i) * 2) / 32) * 4)
  49 #define TRIG_BIT_OFFSET(i)       (((i) * 2) % 32)
  50 
  51 #define TRIG_VAL(trigger, irq)  (trigger << TRIG_BIT_OFFSET(IRQ_OFFSET(irq)))
  52 #define TRIG_VAL_MSK(irq)           (~(3 << TRIG_BIT_OFFSET(IRQ_OFFSET(irq))))
  53 
  54 #define TRIG_BASE(irq) \
  55         (TRIG_BYTE_OFFSET(IRQ_OFFSET(irq)) + ((irq < COMM_IRQ_BASE) ? \
  56         (this_cpu_read(intcl_reg) + INTCL_CFGR) : (INTCG_base + INTCG_CICFGR)))
  57 
  58 static DEFINE_SPINLOCK(setup_lock);
  59 static void setup_trigger(unsigned long irq, unsigned long trigger)
  60 {
  61         unsigned int tmp;
  62 
  63         spin_lock(&setup_lock);
  64 
  65         /* setup trigger */
  66         tmp = readl_relaxed(TRIG_BASE(irq)) & TRIG_VAL_MSK(irq);
  67 
  68         writel_relaxed(tmp | TRIG_VAL(trigger, irq), TRIG_BASE(irq));
  69 
  70         spin_unlock(&setup_lock);
  71 }
  72 
  73 static void csky_mpintc_handler(struct pt_regs *regs)
  74 {
  75         void __iomem *reg_base = this_cpu_read(intcl_reg);
  76 
  77         handle_domain_irq(root_domain,
  78                 readl_relaxed(reg_base + INTCL_RDYIR), regs);
  79 }
  80 
  81 static void csky_mpintc_enable(struct irq_data *d)
  82 {
  83         void __iomem *reg_base = this_cpu_read(intcl_reg);
  84 
  85         setup_trigger(d->hwirq, __trigger[d->hwirq]);
  86 
  87         writel_relaxed(d->hwirq, reg_base + INTCL_SENR);
  88 }
  89 
  90 static void csky_mpintc_disable(struct irq_data *d)
  91 {
  92         void __iomem *reg_base = this_cpu_read(intcl_reg);
  93 
  94         writel_relaxed(d->hwirq, reg_base + INTCL_CENR);
  95 }
  96 
  97 static void csky_mpintc_eoi(struct irq_data *d)
  98 {
  99         void __iomem *reg_base = this_cpu_read(intcl_reg);
 100 
 101         writel_relaxed(d->hwirq, reg_base + INTCL_CACR);
 102 }
 103 
 104 static int csky_mpintc_set_type(struct irq_data *d, unsigned int type)
 105 {
 106         switch (type & IRQ_TYPE_SENSE_MASK) {
 107         case IRQ_TYPE_LEVEL_HIGH:
 108                 __trigger[d->hwirq] = 0;
 109                 break;
 110         case IRQ_TYPE_LEVEL_LOW:
 111                 __trigger[d->hwirq] = 1;
 112                 break;
 113         case IRQ_TYPE_EDGE_RISING:
 114                 __trigger[d->hwirq] = 2;
 115                 break;
 116         case IRQ_TYPE_EDGE_FALLING:
 117                 __trigger[d->hwirq] = 3;
 118                 break;
 119         default:
 120                 return -EINVAL;
 121         }
 122 
 123         return 0;
 124 }
 125 
 126 #ifdef CONFIG_SMP
 127 static int csky_irq_set_affinity(struct irq_data *d,
 128                                  const struct cpumask *mask_val,
 129                                  bool force)
 130 {
 131         unsigned int cpu;
 132         unsigned int offset = 4 * (d->hwirq - COMM_IRQ_BASE);
 133 
 134         if (!force)
 135                 cpu = cpumask_any_and(mask_val, cpu_online_mask);
 136         else
 137                 cpu = cpumask_first(mask_val);
 138 
 139         if (cpu >= nr_cpu_ids)
 140                 return -EINVAL;
 141 
 142         /*
 143          * The csky,mpintc could support auto irq deliver, but it only
 144          * could deliver external irq to one cpu or all cpus. So it
 145          * doesn't support deliver external irq to a group of cpus
 146          * with cpu_mask.
 147          * SO we only use auto deliver mode when affinity mask_val is
 148          * equal to cpu_present_mask.
 149          *
 150          */
 151         if (cpumask_equal(mask_val, cpu_present_mask))
 152                 cpu = 0;
 153         else
 154                 cpu |= BIT(31);
 155 
 156         writel_relaxed(cpu, INTCG_base + INTCG_CIDSTR + offset);
 157 
 158         irq_data_update_effective_affinity(d, cpumask_of(cpu));
 159 
 160         return IRQ_SET_MASK_OK_DONE;
 161 }
 162 #endif
 163 
 164 static struct irq_chip csky_irq_chip = {
 165         .name           = "C-SKY SMP Intc",
 166         .irq_eoi        = csky_mpintc_eoi,
 167         .irq_enable     = csky_mpintc_enable,
 168         .irq_disable    = csky_mpintc_disable,
 169         .irq_set_type   = csky_mpintc_set_type,
 170 #ifdef CONFIG_SMP
 171         .irq_set_affinity = csky_irq_set_affinity,
 172 #endif
 173 };
 174 
 175 static int csky_irqdomain_map(struct irq_domain *d, unsigned int irq,
 176                               irq_hw_number_t hwirq)
 177 {
 178         if (hwirq < COMM_IRQ_BASE) {
 179                 irq_set_percpu_devid(irq);
 180                 irq_set_chip_and_handler(irq, &csky_irq_chip,
 181                                          handle_percpu_irq);
 182         } else {
 183                 irq_set_chip_and_handler(irq, &csky_irq_chip,
 184                                          handle_fasteoi_irq);
 185         }
 186 
 187         return 0;
 188 }
 189 
 190 static int csky_irq_domain_xlate_cells(struct irq_domain *d,
 191                 struct device_node *ctrlr, const u32 *intspec,
 192                 unsigned int intsize, unsigned long *out_hwirq,
 193                 unsigned int *out_type)
 194 {
 195         if (WARN_ON(intsize < 1))
 196                 return -EINVAL;
 197 
 198         *out_hwirq = intspec[0];
 199         if (intsize > 1)
 200                 *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
 201         else
 202                 *out_type = IRQ_TYPE_LEVEL_HIGH;
 203 
 204         return 0;
 205 }
 206 
 207 static const struct irq_domain_ops csky_irqdomain_ops = {
 208         .map    = csky_irqdomain_map,
 209         .xlate  = csky_irq_domain_xlate_cells,
 210 };
 211 
 212 #ifdef CONFIG_SMP
 213 static void csky_mpintc_send_ipi(const struct cpumask *mask)
 214 {
 215         void __iomem *reg_base = this_cpu_read(intcl_reg);
 216 
 217         /*
 218          * INTCL_SIGR[3:0] INTID
 219          * INTCL_SIGR[8:15] CPUMASK
 220          */
 221         writel_relaxed((*cpumask_bits(mask)) << 8 | IPI_IRQ,
 222                                         reg_base + INTCL_SIGR);
 223 }
 224 #endif
 225 
 226 /* C-SKY multi processor interrupt controller */
 227 static int __init
 228 csky_mpintc_init(struct device_node *node, struct device_node *parent)
 229 {
 230         int ret;
 231         unsigned int cpu, nr_irq;
 232 #ifdef CONFIG_SMP
 233         unsigned int ipi_irq;
 234 #endif
 235 
 236         if (parent)
 237                 return 0;
 238 
 239         ret = of_property_read_u32(node, "csky,num-irqs", &nr_irq);
 240         if (ret < 0)
 241                 nr_irq = INTC_IRQS;
 242 
 243         __trigger  = kcalloc(nr_irq, sizeof(unsigned long), GFP_KERNEL);
 244         if (__trigger == NULL)
 245                 return -ENXIO;
 246 
 247         if (INTCG_base == NULL) {
 248                 INTCG_base = ioremap(mfcr("cr<31, 14>"),
 249                                      INTCL_SIZE*nr_cpu_ids + INTCG_SIZE);
 250                 if (INTCG_base == NULL)
 251                         return -EIO;
 252 
 253                 INTCL_base = INTCG_base + INTCG_SIZE;
 254 
 255                 writel_relaxed(BIT(0), INTCG_base + INTCG_ICTLR);
 256         }
 257 
 258         root_domain = irq_domain_add_linear(node, nr_irq, &csky_irqdomain_ops,
 259                                             NULL);
 260         if (!root_domain)
 261                 return -ENXIO;
 262 
 263         /* for every cpu */
 264         for_each_present_cpu(cpu) {
 265                 per_cpu(intcl_reg, cpu) = INTCL_base + (INTCL_SIZE * cpu);
 266                 writel_relaxed(BIT(0), per_cpu(intcl_reg, cpu) + INTCL_PICTLR);
 267         }
 268 
 269         set_handle_irq(&csky_mpintc_handler);
 270 
 271 #ifdef CONFIG_SMP
 272         ipi_irq = irq_create_mapping(root_domain, IPI_IRQ);
 273         if (!ipi_irq)
 274                 return -EIO;
 275 
 276         set_send_ipi(&csky_mpintc_send_ipi, ipi_irq);
 277 #endif
 278 
 279         return 0;
 280 }
 281 IRQCHIP_DECLARE(csky_mpintc, "csky,mpintc", csky_mpintc_init);

/* [<][>][^][v][top][bottom][index][help] */