root/drivers/soc/dove/pmu.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. pmu_reset_reset
  2. pmu_reset_assert
  3. pmu_reset_deassert
  4. pmu_reset_init
  5. pmu_reset_init
  6. pmu_domain_power_off
  7. pmu_domain_power_on
  8. __pmu_domain_register
  9. pmu_irq_handler
  10. dove_init_pmu_irq
  11. dove_init_pmu_legacy
  12. dove_init_pmu

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Marvell Dove PMU support
   4  */
   5 #include <linux/io.h>
   6 #include <linux/irq.h>
   7 #include <linux/irqdomain.h>
   8 #include <linux/of.h>
   9 #include <linux/of_irq.h>
  10 #include <linux/of_address.h>
  11 #include <linux/platform_device.h>
  12 #include <linux/pm_domain.h>
  13 #include <linux/reset.h>
  14 #include <linux/reset-controller.h>
  15 #include <linux/sched.h>
  16 #include <linux/slab.h>
  17 #include <linux/soc/dove/pmu.h>
  18 #include <linux/spinlock.h>
  19 
  20 #define NR_PMU_IRQS             7
  21 
  22 #define PMC_SW_RST              0x30
  23 #define PMC_IRQ_CAUSE           0x50
  24 #define PMC_IRQ_MASK            0x54
  25 
  26 #define PMU_PWR                 0x10
  27 #define PMU_ISO                 0x58
  28 
  29 struct pmu_data {
  30         spinlock_t lock;
  31         struct device_node *of_node;
  32         void __iomem *pmc_base;
  33         void __iomem *pmu_base;
  34         struct irq_chip_generic *irq_gc;
  35         struct irq_domain *irq_domain;
  36 #ifdef CONFIG_RESET_CONTROLLER
  37         struct reset_controller_dev reset;
  38 #endif
  39 };
  40 
  41 /*
  42  * The PMU contains a register to reset various subsystems within the
  43  * SoC.  Export this as a reset controller.
  44  */
  45 #ifdef CONFIG_RESET_CONTROLLER
  46 #define rcdev_to_pmu(rcdev) container_of(rcdev, struct pmu_data, reset)
  47 
  48 static int pmu_reset_reset(struct reset_controller_dev *rc, unsigned long id)
  49 {
  50         struct pmu_data *pmu = rcdev_to_pmu(rc);
  51         unsigned long flags;
  52         u32 val;
  53 
  54         spin_lock_irqsave(&pmu->lock, flags);
  55         val = readl_relaxed(pmu->pmc_base + PMC_SW_RST);
  56         writel_relaxed(val & ~BIT(id), pmu->pmc_base + PMC_SW_RST);
  57         writel_relaxed(val | BIT(id), pmu->pmc_base + PMC_SW_RST);
  58         spin_unlock_irqrestore(&pmu->lock, flags);
  59 
  60         return 0;
  61 }
  62 
  63 static int pmu_reset_assert(struct reset_controller_dev *rc, unsigned long id)
  64 {
  65         struct pmu_data *pmu = rcdev_to_pmu(rc);
  66         unsigned long flags;
  67         u32 val = ~BIT(id);
  68 
  69         spin_lock_irqsave(&pmu->lock, flags);
  70         val &= readl_relaxed(pmu->pmc_base + PMC_SW_RST);
  71         writel_relaxed(val, pmu->pmc_base + PMC_SW_RST);
  72         spin_unlock_irqrestore(&pmu->lock, flags);
  73 
  74         return 0;
  75 }
  76 
  77 static int pmu_reset_deassert(struct reset_controller_dev *rc, unsigned long id)
  78 {
  79         struct pmu_data *pmu = rcdev_to_pmu(rc);
  80         unsigned long flags;
  81         u32 val = BIT(id);
  82 
  83         spin_lock_irqsave(&pmu->lock, flags);
  84         val |= readl_relaxed(pmu->pmc_base + PMC_SW_RST);
  85         writel_relaxed(val, pmu->pmc_base + PMC_SW_RST);
  86         spin_unlock_irqrestore(&pmu->lock, flags);
  87 
  88         return 0;
  89 }
  90 
  91 static const struct reset_control_ops pmu_reset_ops = {
  92         .reset = pmu_reset_reset,
  93         .assert = pmu_reset_assert,
  94         .deassert = pmu_reset_deassert,
  95 };
  96 
  97 static struct reset_controller_dev pmu_reset __initdata = {
  98         .ops = &pmu_reset_ops,
  99         .owner = THIS_MODULE,
 100         .nr_resets = 32,
 101 };
 102 
 103 static void __init pmu_reset_init(struct pmu_data *pmu)
 104 {
 105         int ret;
 106 
 107         pmu->reset = pmu_reset;
 108         pmu->reset.of_node = pmu->of_node;
 109 
 110         ret = reset_controller_register(&pmu->reset);
 111         if (ret)
 112                 pr_err("pmu: %s failed: %d\n", "reset_controller_register", ret);
 113 }
 114 #else
 115 static void __init pmu_reset_init(struct pmu_data *pmu)
 116 {
 117 }
 118 #endif
 119 
 120 struct pmu_domain {
 121         struct pmu_data *pmu;
 122         u32 pwr_mask;
 123         u32 rst_mask;
 124         u32 iso_mask;
 125         struct generic_pm_domain base;
 126 };
 127 
 128 #define to_pmu_domain(dom) container_of(dom, struct pmu_domain, base)
 129 
 130 /*
 131  * This deals with the "old" Marvell sequence of bringing a power domain
 132  * down/up, which is: apply power, release reset, disable isolators.
 133  *
 134  * Later devices apparantly use a different sequence: power up, disable
 135  * isolators, assert repair signal, enable SRMA clock, enable AXI clock,
 136  * enable module clock, deassert reset.
 137  *
 138  * Note: reading the assembly, it seems that the IO accessors have an
 139  * unfortunate side-effect - they cause memory already read into registers
 140  * for the if () to be re-read for the bit-set or bit-clear operation.
 141  * The code is written to avoid this.
 142  */
 143 static int pmu_domain_power_off(struct generic_pm_domain *domain)
 144 {
 145         struct pmu_domain *pmu_dom = to_pmu_domain(domain);
 146         struct pmu_data *pmu = pmu_dom->pmu;
 147         unsigned long flags;
 148         unsigned int val;
 149         void __iomem *pmu_base = pmu->pmu_base;
 150         void __iomem *pmc_base = pmu->pmc_base;
 151 
 152         spin_lock_irqsave(&pmu->lock, flags);
 153 
 154         /* Enable isolators */
 155         if (pmu_dom->iso_mask) {
 156                 val = ~pmu_dom->iso_mask;
 157                 val &= readl_relaxed(pmu_base + PMU_ISO);
 158                 writel_relaxed(val, pmu_base + PMU_ISO);
 159         }
 160 
 161         /* Reset unit */
 162         if (pmu_dom->rst_mask) {
 163                 val = ~pmu_dom->rst_mask;
 164                 val &= readl_relaxed(pmc_base + PMC_SW_RST);
 165                 writel_relaxed(val, pmc_base + PMC_SW_RST);
 166         }
 167 
 168         /* Power down */
 169         val = readl_relaxed(pmu_base + PMU_PWR) | pmu_dom->pwr_mask;
 170         writel_relaxed(val, pmu_base + PMU_PWR);
 171 
 172         spin_unlock_irqrestore(&pmu->lock, flags);
 173 
 174         return 0;
 175 }
 176 
 177 static int pmu_domain_power_on(struct generic_pm_domain *domain)
 178 {
 179         struct pmu_domain *pmu_dom = to_pmu_domain(domain);
 180         struct pmu_data *pmu = pmu_dom->pmu;
 181         unsigned long flags;
 182         unsigned int val;
 183         void __iomem *pmu_base = pmu->pmu_base;
 184         void __iomem *pmc_base = pmu->pmc_base;
 185 
 186         spin_lock_irqsave(&pmu->lock, flags);
 187 
 188         /* Power on */
 189         val = ~pmu_dom->pwr_mask & readl_relaxed(pmu_base + PMU_PWR);
 190         writel_relaxed(val, pmu_base + PMU_PWR);
 191 
 192         /* Release reset */
 193         if (pmu_dom->rst_mask) {
 194                 val = pmu_dom->rst_mask;
 195                 val |= readl_relaxed(pmc_base + PMC_SW_RST);
 196                 writel_relaxed(val, pmc_base + PMC_SW_RST);
 197         }
 198 
 199         /* Disable isolators */
 200         if (pmu_dom->iso_mask) {
 201                 val = pmu_dom->iso_mask;
 202                 val |= readl_relaxed(pmu_base + PMU_ISO);
 203                 writel_relaxed(val, pmu_base + PMU_ISO);
 204         }
 205 
 206         spin_unlock_irqrestore(&pmu->lock, flags);
 207 
 208         return 0;
 209 }
 210 
 211 static void __pmu_domain_register(struct pmu_domain *domain,
 212         struct device_node *np)
 213 {
 214         unsigned int val = readl_relaxed(domain->pmu->pmu_base + PMU_PWR);
 215 
 216         domain->base.power_off = pmu_domain_power_off;
 217         domain->base.power_on = pmu_domain_power_on;
 218 
 219         pm_genpd_init(&domain->base, NULL, !(val & domain->pwr_mask));
 220 
 221         if (np)
 222                 of_genpd_add_provider_simple(np, &domain->base);
 223 }
 224 
 225 /* PMU IRQ controller */
 226 static void pmu_irq_handler(struct irq_desc *desc)
 227 {
 228         struct pmu_data *pmu = irq_desc_get_handler_data(desc);
 229         struct irq_chip_generic *gc = pmu->irq_gc;
 230         struct irq_domain *domain = pmu->irq_domain;
 231         void __iomem *base = gc->reg_base;
 232         u32 stat = readl_relaxed(base + PMC_IRQ_CAUSE) & gc->mask_cache;
 233         u32 done = ~0;
 234 
 235         if (stat == 0) {
 236                 handle_bad_irq(desc);
 237                 return;
 238         }
 239 
 240         while (stat) {
 241                 u32 hwirq = fls(stat) - 1;
 242 
 243                 stat &= ~(1 << hwirq);
 244                 done &= ~(1 << hwirq);
 245 
 246                 generic_handle_irq(irq_find_mapping(domain, hwirq));
 247         }
 248 
 249         /*
 250          * The PMU mask register is not RW0C: it is RW.  This means that
 251          * the bits take whatever value is written to them; if you write
 252          * a '1', you will set the interrupt.
 253          *
 254          * Unfortunately this means there is NO race free way to clear
 255          * these interrupts.
 256          *
 257          * So, let's structure the code so that the window is as small as
 258          * possible.
 259          */
 260         irq_gc_lock(gc);
 261         done &= readl_relaxed(base + PMC_IRQ_CAUSE);
 262         writel_relaxed(done, base + PMC_IRQ_CAUSE);
 263         irq_gc_unlock(gc);
 264 }
 265 
 266 static int __init dove_init_pmu_irq(struct pmu_data *pmu, int irq)
 267 {
 268         const char *name = "pmu_irq";
 269         struct irq_chip_generic *gc;
 270         struct irq_domain *domain;
 271         int ret;
 272 
 273         /* mask and clear all interrupts */
 274         writel(0, pmu->pmc_base + PMC_IRQ_MASK);
 275         writel(0, pmu->pmc_base + PMC_IRQ_CAUSE);
 276 
 277         domain = irq_domain_add_linear(pmu->of_node, NR_PMU_IRQS,
 278                                        &irq_generic_chip_ops, NULL);
 279         if (!domain) {
 280                 pr_err("%s: unable to add irq domain\n", name);
 281                 return -ENOMEM;
 282         }
 283 
 284         ret = irq_alloc_domain_generic_chips(domain, NR_PMU_IRQS, 1, name,
 285                                              handle_level_irq,
 286                                              IRQ_NOREQUEST | IRQ_NOPROBE, 0,
 287                                              IRQ_GC_INIT_MASK_CACHE);
 288         if (ret) {
 289                 pr_err("%s: unable to alloc irq domain gc: %d\n", name, ret);
 290                 irq_domain_remove(domain);
 291                 return ret;
 292         }
 293 
 294         gc = irq_get_domain_generic_chip(domain, 0);
 295         gc->reg_base = pmu->pmc_base;
 296         gc->chip_types[0].regs.mask = PMC_IRQ_MASK;
 297         gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
 298         gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
 299 
 300         pmu->irq_domain = domain;
 301         pmu->irq_gc = gc;
 302 
 303         irq_set_handler_data(irq, pmu);
 304         irq_set_chained_handler(irq, pmu_irq_handler);
 305 
 306         return 0;
 307 }
 308 
 309 int __init dove_init_pmu_legacy(const struct dove_pmu_initdata *initdata)
 310 {
 311         const struct dove_pmu_domain_initdata *domain_initdata;
 312         struct pmu_data *pmu;
 313         int ret;
 314 
 315         pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
 316         if (!pmu)
 317                 return -ENOMEM;
 318 
 319         spin_lock_init(&pmu->lock);
 320         pmu->pmc_base = initdata->pmc_base;
 321         pmu->pmu_base = initdata->pmu_base;
 322 
 323         pmu_reset_init(pmu);
 324         for (domain_initdata = initdata->domains; domain_initdata->name;
 325              domain_initdata++) {
 326                 struct pmu_domain *domain;
 327 
 328                 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
 329                 if (domain) {
 330                         domain->pmu = pmu;
 331                         domain->pwr_mask = domain_initdata->pwr_mask;
 332                         domain->rst_mask = domain_initdata->rst_mask;
 333                         domain->iso_mask = domain_initdata->iso_mask;
 334                         domain->base.name = domain_initdata->name;
 335 
 336                         __pmu_domain_register(domain, NULL);
 337                 }
 338         }
 339 
 340         ret = dove_init_pmu_irq(pmu, initdata->irq);
 341         if (ret)
 342                 pr_err("dove_init_pmu_irq() failed: %d\n", ret);
 343 
 344         if (pmu->irq_domain)
 345                 irq_domain_associate_many(pmu->irq_domain,
 346                                           initdata->irq_domain_start,
 347                                           0, NR_PMU_IRQS);
 348 
 349         return 0;
 350 }
 351 
 352 /*
 353  * pmu: power-manager@d0000 {
 354  *      compatible = "marvell,dove-pmu";
 355  *      reg = <0xd0000 0x8000> <0xd8000 0x8000>;
 356  *      interrupts = <33>;
 357  *      interrupt-controller;
 358  *      #reset-cells = 1;
 359  *      vpu_domain: vpu-domain {
 360  *              #power-domain-cells = <0>;
 361  *              marvell,pmu_pwr_mask = <0x00000008>;
 362  *              marvell,pmu_iso_mask = <0x00000001>;
 363  *              resets = <&pmu 16>;
 364  *      };
 365  *      gpu_domain: gpu-domain {
 366  *              #power-domain-cells = <0>;
 367  *              marvell,pmu_pwr_mask = <0x00000004>;
 368  *              marvell,pmu_iso_mask = <0x00000002>;
 369  *              resets = <&pmu 18>;
 370  *      };
 371  * };
 372  */
 373 int __init dove_init_pmu(void)
 374 {
 375         struct device_node *np_pmu, *domains_node, *np;
 376         struct pmu_data *pmu;
 377         int ret, parent_irq;
 378 
 379         /* Lookup the PMU node */
 380         np_pmu = of_find_compatible_node(NULL, NULL, "marvell,dove-pmu");
 381         if (!np_pmu)
 382                 return 0;
 383 
 384         domains_node = of_get_child_by_name(np_pmu, "domains");
 385         if (!domains_node) {
 386                 pr_err("%pOFn: failed to find domains sub-node\n", np_pmu);
 387                 return 0;
 388         }
 389 
 390         pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
 391         if (!pmu)
 392                 return -ENOMEM;
 393 
 394         spin_lock_init(&pmu->lock);
 395         pmu->of_node = np_pmu;
 396         pmu->pmc_base = of_iomap(pmu->of_node, 0);
 397         pmu->pmu_base = of_iomap(pmu->of_node, 1);
 398         if (!pmu->pmc_base || !pmu->pmu_base) {
 399                 pr_err("%pOFn: failed to map PMU\n", np_pmu);
 400                 iounmap(pmu->pmu_base);
 401                 iounmap(pmu->pmc_base);
 402                 kfree(pmu);
 403                 return -ENOMEM;
 404         }
 405 
 406         pmu_reset_init(pmu);
 407 
 408         for_each_available_child_of_node(domains_node, np) {
 409                 struct of_phandle_args args;
 410                 struct pmu_domain *domain;
 411 
 412                 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
 413                 if (!domain)
 414                         break;
 415 
 416                 domain->pmu = pmu;
 417                 domain->base.name = kasprintf(GFP_KERNEL, "%pOFn", np);
 418                 if (!domain->base.name) {
 419                         kfree(domain);
 420                         break;
 421                 }
 422 
 423                 of_property_read_u32(np, "marvell,pmu_pwr_mask",
 424                                      &domain->pwr_mask);
 425                 of_property_read_u32(np, "marvell,pmu_iso_mask",
 426                                      &domain->iso_mask);
 427 
 428                 /*
 429                  * We parse the reset controller property directly here
 430                  * to ensure that we can operate when the reset controller
 431                  * support is not configured into the kernel.
 432                  */
 433                 ret = of_parse_phandle_with_args(np, "resets", "#reset-cells",
 434                                                  0, &args);
 435                 if (ret == 0) {
 436                         if (args.np == pmu->of_node)
 437                                 domain->rst_mask = BIT(args.args[0]);
 438                         of_node_put(args.np);
 439                 }
 440 
 441                 __pmu_domain_register(domain, np);
 442         }
 443 
 444         /* Loss of the interrupt controller is not a fatal error. */
 445         parent_irq = irq_of_parse_and_map(pmu->of_node, 0);
 446         if (!parent_irq) {
 447                 pr_err("%pOFn: no interrupt specified\n", np_pmu);
 448         } else {
 449                 ret = dove_init_pmu_irq(pmu, parent_irq);
 450                 if (ret)
 451                         pr_err("dove_init_pmu_irq() failed: %d\n", ret);
 452         }
 453 
 454         return 0;
 455 }

/* [<][>][^][v][top][bottom][index][help] */