root/drivers/clocksource/sh_cmt.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. sh_cmt_read16
  2. sh_cmt_read32
  3. sh_cmt_write16
  4. sh_cmt_write32
  5. sh_cmt_read_cmstr
  6. sh_cmt_write_cmstr
  7. sh_cmt_read_cmcsr
  8. sh_cmt_write_cmcsr
  9. sh_cmt_read_cmcnt
  10. sh_cmt_write_cmcnt
  11. sh_cmt_write_cmcor
  12. sh_cmt_get_counter
  13. sh_cmt_start_stop_ch
  14. sh_cmt_enable
  15. sh_cmt_disable
  16. sh_cmt_clock_event_program_verify
  17. __sh_cmt_set_next
  18. sh_cmt_set_next
  19. sh_cmt_interrupt
  20. sh_cmt_start
  21. sh_cmt_stop
  22. cs_to_sh_cmt
  23. sh_cmt_clocksource_read
  24. sh_cmt_clocksource_enable
  25. sh_cmt_clocksource_disable
  26. sh_cmt_clocksource_suspend
  27. sh_cmt_clocksource_resume
  28. sh_cmt_register_clocksource
  29. ced_to_sh_cmt
  30. sh_cmt_clock_event_start
  31. sh_cmt_clock_event_shutdown
  32. sh_cmt_clock_event_set_state
  33. sh_cmt_clock_event_set_oneshot
  34. sh_cmt_clock_event_set_periodic
  35. sh_cmt_clock_event_next
  36. sh_cmt_clock_event_suspend
  37. sh_cmt_clock_event_resume
  38. sh_cmt_register_clockevent
  39. sh_cmt_register
  40. sh_cmt_setup_channel
  41. sh_cmt_map_memory
  42. sh_cmt_setup
  43. sh_cmt_probe
  44. sh_cmt_remove
  45. sh_cmt_init
  46. sh_cmt_exit

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * SuperH Timer Support - CMT
   4  *
   5  *  Copyright (C) 2008 Magnus Damm
   6  */
   7 
   8 #include <linux/clk.h>
   9 #include <linux/clockchips.h>
  10 #include <linux/clocksource.h>
  11 #include <linux/delay.h>
  12 #include <linux/err.h>
  13 #include <linux/init.h>
  14 #include <linux/interrupt.h>
  15 #include <linux/io.h>
  16 #include <linux/ioport.h>
  17 #include <linux/irq.h>
  18 #include <linux/module.h>
  19 #include <linux/of.h>
  20 #include <linux/of_device.h>
  21 #include <linux/platform_device.h>
  22 #include <linux/pm_domain.h>
  23 #include <linux/pm_runtime.h>
  24 #include <linux/sh_timer.h>
  25 #include <linux/slab.h>
  26 #include <linux/spinlock.h>
  27 
  28 struct sh_cmt_device;
  29 
  30 /*
  31  * The CMT comes in 5 different identified flavours, depending not only on the
  32  * SoC but also on the particular instance. The following table lists the main
  33  * characteristics of those flavours.
  34  *
  35  *                      16B     32B     32B-F   48B     R-Car Gen2
  36  * -----------------------------------------------------------------------------
  37  * Channels             2       1/4     1       6       2/8
  38  * Control Width        16      16      16      16      32
  39  * Counter Width        16      32      32      32/48   32/48
  40  * Shared Start/Stop    Y       Y       Y       Y       N
  41  *
  42  * The r8a73a4 / R-Car Gen2 version has a per-channel start/stop register
  43  * located in the channel registers block. All other versions have a shared
  44  * start/stop register located in the global space.
  45  *
  46  * Channels are indexed from 0 to N-1 in the documentation. The channel index
  47  * infers the start/stop bit position in the control register and the channel
  48  * registers block address. Some CMT instances have a subset of channels
  49  * available, in which case the index in the documentation doesn't match the
  50  * "real" index as implemented in hardware. This is for instance the case with
  51  * CMT0 on r8a7740, which is a 32-bit variant with a single channel numbered 0
  52  * in the documentation but using start/stop bit 5 and having its registers
  53  * block at 0x60.
  54  *
  55  * Similarly CMT0 on r8a73a4, r8a7790 and r8a7791, while implementing 32-bit
  56  * channels only, is a 48-bit gen2 CMT with the 48-bit channels unavailable.
  57  */
  58 
  59 enum sh_cmt_model {
  60         SH_CMT_16BIT,
  61         SH_CMT_32BIT,
  62         SH_CMT_48BIT,
  63         SH_CMT0_RCAR_GEN2,
  64         SH_CMT1_RCAR_GEN2,
  65 };
  66 
  67 struct sh_cmt_info {
  68         enum sh_cmt_model model;
  69 
  70         unsigned int channels_mask;
  71 
  72         unsigned long width; /* 16 or 32 bit version of hardware block */
  73         u32 overflow_bit;
  74         u32 clear_bits;
  75 
  76         /* callbacks for CMSTR and CMCSR access */
  77         u32 (*read_control)(void __iomem *base, unsigned long offs);
  78         void (*write_control)(void __iomem *base, unsigned long offs,
  79                               u32 value);
  80 
  81         /* callbacks for CMCNT and CMCOR access */
  82         u32 (*read_count)(void __iomem *base, unsigned long offs);
  83         void (*write_count)(void __iomem *base, unsigned long offs, u32 value);
  84 };
  85 
  86 struct sh_cmt_channel {
  87         struct sh_cmt_device *cmt;
  88 
  89         unsigned int index;     /* Index in the documentation */
  90         unsigned int hwidx;     /* Real hardware index */
  91 
  92         void __iomem *iostart;
  93         void __iomem *ioctrl;
  94 
  95         unsigned int timer_bit;
  96         unsigned long flags;
  97         u32 match_value;
  98         u32 next_match_value;
  99         u32 max_match_value;
 100         raw_spinlock_t lock;
 101         struct clock_event_device ced;
 102         struct clocksource cs;
 103         u64 total_cycles;
 104         bool cs_enabled;
 105 };
 106 
 107 struct sh_cmt_device {
 108         struct platform_device *pdev;
 109 
 110         const struct sh_cmt_info *info;
 111 
 112         void __iomem *mapbase;
 113         struct clk *clk;
 114         unsigned long rate;
 115 
 116         raw_spinlock_t lock; /* Protect the shared start/stop register */
 117 
 118         struct sh_cmt_channel *channels;
 119         unsigned int num_channels;
 120         unsigned int hw_channels;
 121 
 122         bool has_clockevent;
 123         bool has_clocksource;
 124 };
 125 
 126 #define SH_CMT16_CMCSR_CMF              (1 << 7)
 127 #define SH_CMT16_CMCSR_CMIE             (1 << 6)
 128 #define SH_CMT16_CMCSR_CKS8             (0 << 0)
 129 #define SH_CMT16_CMCSR_CKS32            (1 << 0)
 130 #define SH_CMT16_CMCSR_CKS128           (2 << 0)
 131 #define SH_CMT16_CMCSR_CKS512           (3 << 0)
 132 #define SH_CMT16_CMCSR_CKS_MASK         (3 << 0)
 133 
 134 #define SH_CMT32_CMCSR_CMF              (1 << 15)
 135 #define SH_CMT32_CMCSR_OVF              (1 << 14)
 136 #define SH_CMT32_CMCSR_WRFLG            (1 << 13)
 137 #define SH_CMT32_CMCSR_STTF             (1 << 12)
 138 #define SH_CMT32_CMCSR_STPF             (1 << 11)
 139 #define SH_CMT32_CMCSR_SSIE             (1 << 10)
 140 #define SH_CMT32_CMCSR_CMS              (1 << 9)
 141 #define SH_CMT32_CMCSR_CMM              (1 << 8)
 142 #define SH_CMT32_CMCSR_CMTOUT_IE        (1 << 7)
 143 #define SH_CMT32_CMCSR_CMR_NONE         (0 << 4)
 144 #define SH_CMT32_CMCSR_CMR_DMA          (1 << 4)
 145 #define SH_CMT32_CMCSR_CMR_IRQ          (2 << 4)
 146 #define SH_CMT32_CMCSR_CMR_MASK         (3 << 4)
 147 #define SH_CMT32_CMCSR_DBGIVD           (1 << 3)
 148 #define SH_CMT32_CMCSR_CKS_RCLK8        (4 << 0)
 149 #define SH_CMT32_CMCSR_CKS_RCLK32       (5 << 0)
 150 #define SH_CMT32_CMCSR_CKS_RCLK128      (6 << 0)
 151 #define SH_CMT32_CMCSR_CKS_RCLK1        (7 << 0)
 152 #define SH_CMT32_CMCSR_CKS_MASK         (7 << 0)
 153 
 154 static u32 sh_cmt_read16(void __iomem *base, unsigned long offs)
 155 {
 156         return ioread16(base + (offs << 1));
 157 }
 158 
 159 static u32 sh_cmt_read32(void __iomem *base, unsigned long offs)
 160 {
 161         return ioread32(base + (offs << 2));
 162 }
 163 
 164 static void sh_cmt_write16(void __iomem *base, unsigned long offs, u32 value)
 165 {
 166         iowrite16(value, base + (offs << 1));
 167 }
 168 
 169 static void sh_cmt_write32(void __iomem *base, unsigned long offs, u32 value)
 170 {
 171         iowrite32(value, base + (offs << 2));
 172 }
 173 
 174 static const struct sh_cmt_info sh_cmt_info[] = {
 175         [SH_CMT_16BIT] = {
 176                 .model = SH_CMT_16BIT,
 177                 .width = 16,
 178                 .overflow_bit = SH_CMT16_CMCSR_CMF,
 179                 .clear_bits = ~SH_CMT16_CMCSR_CMF,
 180                 .read_control = sh_cmt_read16,
 181                 .write_control = sh_cmt_write16,
 182                 .read_count = sh_cmt_read16,
 183                 .write_count = sh_cmt_write16,
 184         },
 185         [SH_CMT_32BIT] = {
 186                 .model = SH_CMT_32BIT,
 187                 .width = 32,
 188                 .overflow_bit = SH_CMT32_CMCSR_CMF,
 189                 .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
 190                 .read_control = sh_cmt_read16,
 191                 .write_control = sh_cmt_write16,
 192                 .read_count = sh_cmt_read32,
 193                 .write_count = sh_cmt_write32,
 194         },
 195         [SH_CMT_48BIT] = {
 196                 .model = SH_CMT_48BIT,
 197                 .channels_mask = 0x3f,
 198                 .width = 32,
 199                 .overflow_bit = SH_CMT32_CMCSR_CMF,
 200                 .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
 201                 .read_control = sh_cmt_read32,
 202                 .write_control = sh_cmt_write32,
 203                 .read_count = sh_cmt_read32,
 204                 .write_count = sh_cmt_write32,
 205         },
 206         [SH_CMT0_RCAR_GEN2] = {
 207                 .model = SH_CMT0_RCAR_GEN2,
 208                 .channels_mask = 0x60,
 209                 .width = 32,
 210                 .overflow_bit = SH_CMT32_CMCSR_CMF,
 211                 .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
 212                 .read_control = sh_cmt_read32,
 213                 .write_control = sh_cmt_write32,
 214                 .read_count = sh_cmt_read32,
 215                 .write_count = sh_cmt_write32,
 216         },
 217         [SH_CMT1_RCAR_GEN2] = {
 218                 .model = SH_CMT1_RCAR_GEN2,
 219                 .channels_mask = 0xff,
 220                 .width = 32,
 221                 .overflow_bit = SH_CMT32_CMCSR_CMF,
 222                 .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
 223                 .read_control = sh_cmt_read32,
 224                 .write_control = sh_cmt_write32,
 225                 .read_count = sh_cmt_read32,
 226                 .write_count = sh_cmt_write32,
 227         },
 228 };
 229 
 230 #define CMCSR 0 /* channel register */
 231 #define CMCNT 1 /* channel register */
 232 #define CMCOR 2 /* channel register */
 233 
 234 static inline u32 sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
 235 {
 236         if (ch->iostart)
 237                 return ch->cmt->info->read_control(ch->iostart, 0);
 238         else
 239                 return ch->cmt->info->read_control(ch->cmt->mapbase, 0);
 240 }
 241 
 242 static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, u32 value)
 243 {
 244         if (ch->iostart)
 245                 ch->cmt->info->write_control(ch->iostart, 0, value);
 246         else
 247                 ch->cmt->info->write_control(ch->cmt->mapbase, 0, value);
 248 }
 249 
 250 static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch)
 251 {
 252         return ch->cmt->info->read_control(ch->ioctrl, CMCSR);
 253 }
 254 
 255 static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch, u32 value)
 256 {
 257         ch->cmt->info->write_control(ch->ioctrl, CMCSR, value);
 258 }
 259 
 260 static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch)
 261 {
 262         return ch->cmt->info->read_count(ch->ioctrl, CMCNT);
 263 }
 264 
 265 static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value)
 266 {
 267         ch->cmt->info->write_count(ch->ioctrl, CMCNT, value);
 268 }
 269 
 270 static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch, u32 value)
 271 {
 272         ch->cmt->info->write_count(ch->ioctrl, CMCOR, value);
 273 }
 274 
 275 static u32 sh_cmt_get_counter(struct sh_cmt_channel *ch, u32 *has_wrapped)
 276 {
 277         u32 v1, v2, v3;
 278         u32 o1, o2;
 279 
 280         o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
 281 
 282         /* Make sure the timer value is stable. Stolen from acpi_pm.c */
 283         do {
 284                 o2 = o1;
 285                 v1 = sh_cmt_read_cmcnt(ch);
 286                 v2 = sh_cmt_read_cmcnt(ch);
 287                 v3 = sh_cmt_read_cmcnt(ch);
 288                 o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
 289         } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
 290                           || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
 291 
 292         *has_wrapped = o1;
 293         return v2;
 294 }
 295 
 296 static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start)
 297 {
 298         unsigned long flags;
 299         u32 value;
 300 
 301         /* start stop register shared by multiple timer channels */
 302         raw_spin_lock_irqsave(&ch->cmt->lock, flags);
 303         value = sh_cmt_read_cmstr(ch);
 304 
 305         if (start)
 306                 value |= 1 << ch->timer_bit;
 307         else
 308                 value &= ~(1 << ch->timer_bit);
 309 
 310         sh_cmt_write_cmstr(ch, value);
 311         raw_spin_unlock_irqrestore(&ch->cmt->lock, flags);
 312 }
 313 
 314 static int sh_cmt_enable(struct sh_cmt_channel *ch)
 315 {
 316         int k, ret;
 317 
 318         pm_runtime_get_sync(&ch->cmt->pdev->dev);
 319         dev_pm_syscore_device(&ch->cmt->pdev->dev, true);
 320 
 321         /* enable clock */
 322         ret = clk_enable(ch->cmt->clk);
 323         if (ret) {
 324                 dev_err(&ch->cmt->pdev->dev, "ch%u: cannot enable clock\n",
 325                         ch->index);
 326                 goto err0;
 327         }
 328 
 329         /* make sure channel is disabled */
 330         sh_cmt_start_stop_ch(ch, 0);
 331 
 332         /* configure channel, periodic mode and maximum timeout */
 333         if (ch->cmt->info->width == 16) {
 334                 sh_cmt_write_cmcsr(ch, SH_CMT16_CMCSR_CMIE |
 335                                    SH_CMT16_CMCSR_CKS512);
 336         } else {
 337                 sh_cmt_write_cmcsr(ch, SH_CMT32_CMCSR_CMM |
 338                                    SH_CMT32_CMCSR_CMTOUT_IE |
 339                                    SH_CMT32_CMCSR_CMR_IRQ |
 340                                    SH_CMT32_CMCSR_CKS_RCLK8);
 341         }
 342 
 343         sh_cmt_write_cmcor(ch, 0xffffffff);
 344         sh_cmt_write_cmcnt(ch, 0);
 345 
 346         /*
 347          * According to the sh73a0 user's manual, as CMCNT can be operated
 348          * only by the RCLK (Pseudo 32 KHz), there's one restriction on
 349          * modifying CMCNT register; two RCLK cycles are necessary before
 350          * this register is either read or any modification of the value
 351          * it holds is reflected in the LSI's actual operation.
 352          *
 353          * While at it, we're supposed to clear out the CMCNT as of this
 354          * moment, so make sure it's processed properly here.  This will
 355          * take RCLKx2 at maximum.
 356          */
 357         for (k = 0; k < 100; k++) {
 358                 if (!sh_cmt_read_cmcnt(ch))
 359                         break;
 360                 udelay(1);
 361         }
 362 
 363         if (sh_cmt_read_cmcnt(ch)) {
 364                 dev_err(&ch->cmt->pdev->dev, "ch%u: cannot clear CMCNT\n",
 365                         ch->index);
 366                 ret = -ETIMEDOUT;
 367                 goto err1;
 368         }
 369 
 370         /* enable channel */
 371         sh_cmt_start_stop_ch(ch, 1);
 372         return 0;
 373  err1:
 374         /* stop clock */
 375         clk_disable(ch->cmt->clk);
 376 
 377  err0:
 378         return ret;
 379 }
 380 
 381 static void sh_cmt_disable(struct sh_cmt_channel *ch)
 382 {
 383         /* disable channel */
 384         sh_cmt_start_stop_ch(ch, 0);
 385 
 386         /* disable interrupts in CMT block */
 387         sh_cmt_write_cmcsr(ch, 0);
 388 
 389         /* stop clock */
 390         clk_disable(ch->cmt->clk);
 391 
 392         dev_pm_syscore_device(&ch->cmt->pdev->dev, false);
 393         pm_runtime_put(&ch->cmt->pdev->dev);
 394 }
 395 
 396 /* private flags */
 397 #define FLAG_CLOCKEVENT (1 << 0)
 398 #define FLAG_CLOCKSOURCE (1 << 1)
 399 #define FLAG_REPROGRAM (1 << 2)
 400 #define FLAG_SKIPEVENT (1 << 3)
 401 #define FLAG_IRQCONTEXT (1 << 4)
 402 
 403 static void sh_cmt_clock_event_program_verify(struct sh_cmt_channel *ch,
 404                                               int absolute)
 405 {
 406         u32 value = ch->next_match_value;
 407         u32 new_match;
 408         u32 delay = 0;
 409         u32 now = 0;
 410         u32 has_wrapped;
 411 
 412         now = sh_cmt_get_counter(ch, &has_wrapped);
 413         ch->flags |= FLAG_REPROGRAM; /* force reprogram */
 414 
 415         if (has_wrapped) {
 416                 /* we're competing with the interrupt handler.
 417                  *  -> let the interrupt handler reprogram the timer.
 418                  *  -> interrupt number two handles the event.
 419                  */
 420                 ch->flags |= FLAG_SKIPEVENT;
 421                 return;
 422         }
 423 
 424         if (absolute)
 425                 now = 0;
 426 
 427         do {
 428                 /* reprogram the timer hardware,
 429                  * but don't save the new match value yet.
 430                  */
 431                 new_match = now + value + delay;
 432                 if (new_match > ch->max_match_value)
 433                         new_match = ch->max_match_value;
 434 
 435                 sh_cmt_write_cmcor(ch, new_match);
 436 
 437                 now = sh_cmt_get_counter(ch, &has_wrapped);
 438                 if (has_wrapped && (new_match > ch->match_value)) {
 439                         /* we are changing to a greater match value,
 440                          * so this wrap must be caused by the counter
 441                          * matching the old value.
 442                          * -> first interrupt reprograms the timer.
 443                          * -> interrupt number two handles the event.
 444                          */
 445                         ch->flags |= FLAG_SKIPEVENT;
 446                         break;
 447                 }
 448 
 449                 if (has_wrapped) {
 450                         /* we are changing to a smaller match value,
 451                          * so the wrap must be caused by the counter
 452                          * matching the new value.
 453                          * -> save programmed match value.
 454                          * -> let isr handle the event.
 455                          */
 456                         ch->match_value = new_match;
 457                         break;
 458                 }
 459 
 460                 /* be safe: verify hardware settings */
 461                 if (now < new_match) {
 462                         /* timer value is below match value, all good.
 463                          * this makes sure we won't miss any match events.
 464                          * -> save programmed match value.
 465                          * -> let isr handle the event.
 466                          */
 467                         ch->match_value = new_match;
 468                         break;
 469                 }
 470 
 471                 /* the counter has reached a value greater
 472                  * than our new match value. and since the
 473                  * has_wrapped flag isn't set we must have
 474                  * programmed a too close event.
 475                  * -> increase delay and retry.
 476                  */
 477                 if (delay)
 478                         delay <<= 1;
 479                 else
 480                         delay = 1;
 481 
 482                 if (!delay)
 483                         dev_warn(&ch->cmt->pdev->dev, "ch%u: too long delay\n",
 484                                  ch->index);
 485 
 486         } while (delay);
 487 }
 488 
 489 static void __sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
 490 {
 491         if (delta > ch->max_match_value)
 492                 dev_warn(&ch->cmt->pdev->dev, "ch%u: delta out of range\n",
 493                          ch->index);
 494 
 495         ch->next_match_value = delta;
 496         sh_cmt_clock_event_program_verify(ch, 0);
 497 }
 498 
 499 static void sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
 500 {
 501         unsigned long flags;
 502 
 503         raw_spin_lock_irqsave(&ch->lock, flags);
 504         __sh_cmt_set_next(ch, delta);
 505         raw_spin_unlock_irqrestore(&ch->lock, flags);
 506 }
 507 
 508 static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
 509 {
 510         struct sh_cmt_channel *ch = dev_id;
 511 
 512         /* clear flags */
 513         sh_cmt_write_cmcsr(ch, sh_cmt_read_cmcsr(ch) &
 514                            ch->cmt->info->clear_bits);
 515 
 516         /* update clock source counter to begin with if enabled
 517          * the wrap flag should be cleared by the timer specific
 518          * isr before we end up here.
 519          */
 520         if (ch->flags & FLAG_CLOCKSOURCE)
 521                 ch->total_cycles += ch->match_value + 1;
 522 
 523         if (!(ch->flags & FLAG_REPROGRAM))
 524                 ch->next_match_value = ch->max_match_value;
 525 
 526         ch->flags |= FLAG_IRQCONTEXT;
 527 
 528         if (ch->flags & FLAG_CLOCKEVENT) {
 529                 if (!(ch->flags & FLAG_SKIPEVENT)) {
 530                         if (clockevent_state_oneshot(&ch->ced)) {
 531                                 ch->next_match_value = ch->max_match_value;
 532                                 ch->flags |= FLAG_REPROGRAM;
 533                         }
 534 
 535                         ch->ced.event_handler(&ch->ced);
 536                 }
 537         }
 538 
 539         ch->flags &= ~FLAG_SKIPEVENT;
 540 
 541         if (ch->flags & FLAG_REPROGRAM) {
 542                 ch->flags &= ~FLAG_REPROGRAM;
 543                 sh_cmt_clock_event_program_verify(ch, 1);
 544 
 545                 if (ch->flags & FLAG_CLOCKEVENT)
 546                         if ((clockevent_state_shutdown(&ch->ced))
 547                             || (ch->match_value == ch->next_match_value))
 548                                 ch->flags &= ~FLAG_REPROGRAM;
 549         }
 550 
 551         ch->flags &= ~FLAG_IRQCONTEXT;
 552 
 553         return IRQ_HANDLED;
 554 }
 555 
 556 static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag)
 557 {
 558         int ret = 0;
 559         unsigned long flags;
 560 
 561         raw_spin_lock_irqsave(&ch->lock, flags);
 562 
 563         if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
 564                 ret = sh_cmt_enable(ch);
 565 
 566         if (ret)
 567                 goto out;
 568         ch->flags |= flag;
 569 
 570         /* setup timeout if no clockevent */
 571         if ((flag == FLAG_CLOCKSOURCE) && (!(ch->flags & FLAG_CLOCKEVENT)))
 572                 __sh_cmt_set_next(ch, ch->max_match_value);
 573  out:
 574         raw_spin_unlock_irqrestore(&ch->lock, flags);
 575 
 576         return ret;
 577 }
 578 
 579 static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag)
 580 {
 581         unsigned long flags;
 582         unsigned long f;
 583 
 584         raw_spin_lock_irqsave(&ch->lock, flags);
 585 
 586         f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
 587         ch->flags &= ~flag;
 588 
 589         if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
 590                 sh_cmt_disable(ch);
 591 
 592         /* adjust the timeout to maximum if only clocksource left */
 593         if ((flag == FLAG_CLOCKEVENT) && (ch->flags & FLAG_CLOCKSOURCE))
 594                 __sh_cmt_set_next(ch, ch->max_match_value);
 595 
 596         raw_spin_unlock_irqrestore(&ch->lock, flags);
 597 }
 598 
 599 static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
 600 {
 601         return container_of(cs, struct sh_cmt_channel, cs);
 602 }
 603 
 604 static u64 sh_cmt_clocksource_read(struct clocksource *cs)
 605 {
 606         struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
 607         unsigned long flags;
 608         u32 has_wrapped;
 609         u64 value;
 610         u32 raw;
 611 
 612         raw_spin_lock_irqsave(&ch->lock, flags);
 613         value = ch->total_cycles;
 614         raw = sh_cmt_get_counter(ch, &has_wrapped);
 615 
 616         if (unlikely(has_wrapped))
 617                 raw += ch->match_value + 1;
 618         raw_spin_unlock_irqrestore(&ch->lock, flags);
 619 
 620         return value + raw;
 621 }
 622 
 623 static int sh_cmt_clocksource_enable(struct clocksource *cs)
 624 {
 625         int ret;
 626         struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
 627 
 628         WARN_ON(ch->cs_enabled);
 629 
 630         ch->total_cycles = 0;
 631 
 632         ret = sh_cmt_start(ch, FLAG_CLOCKSOURCE);
 633         if (!ret)
 634                 ch->cs_enabled = true;
 635 
 636         return ret;
 637 }
 638 
 639 static void sh_cmt_clocksource_disable(struct clocksource *cs)
 640 {
 641         struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
 642 
 643         WARN_ON(!ch->cs_enabled);
 644 
 645         sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
 646         ch->cs_enabled = false;
 647 }
 648 
 649 static void sh_cmt_clocksource_suspend(struct clocksource *cs)
 650 {
 651         struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
 652 
 653         if (!ch->cs_enabled)
 654                 return;
 655 
 656         sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
 657         pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
 658 }
 659 
 660 static void sh_cmt_clocksource_resume(struct clocksource *cs)
 661 {
 662         struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
 663 
 664         if (!ch->cs_enabled)
 665                 return;
 666 
 667         pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
 668         sh_cmt_start(ch, FLAG_CLOCKSOURCE);
 669 }
 670 
 671 static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
 672                                        const char *name)
 673 {
 674         struct clocksource *cs = &ch->cs;
 675 
 676         cs->name = name;
 677         cs->rating = 125;
 678         cs->read = sh_cmt_clocksource_read;
 679         cs->enable = sh_cmt_clocksource_enable;
 680         cs->disable = sh_cmt_clocksource_disable;
 681         cs->suspend = sh_cmt_clocksource_suspend;
 682         cs->resume = sh_cmt_clocksource_resume;
 683         cs->mask = CLOCKSOURCE_MASK(sizeof(u64) * 8);
 684         cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
 685 
 686         dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n",
 687                  ch->index);
 688 
 689         clocksource_register_hz(cs, ch->cmt->rate);
 690         return 0;
 691 }
 692 
 693 static struct sh_cmt_channel *ced_to_sh_cmt(struct clock_event_device *ced)
 694 {
 695         return container_of(ced, struct sh_cmt_channel, ced);
 696 }
 697 
 698 static void sh_cmt_clock_event_start(struct sh_cmt_channel *ch, int periodic)
 699 {
 700         sh_cmt_start(ch, FLAG_CLOCKEVENT);
 701 
 702         if (periodic)
 703                 sh_cmt_set_next(ch, ((ch->cmt->rate + HZ/2) / HZ) - 1);
 704         else
 705                 sh_cmt_set_next(ch, ch->max_match_value);
 706 }
 707 
 708 static int sh_cmt_clock_event_shutdown(struct clock_event_device *ced)
 709 {
 710         struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
 711 
 712         sh_cmt_stop(ch, FLAG_CLOCKEVENT);
 713         return 0;
 714 }
 715 
 716 static int sh_cmt_clock_event_set_state(struct clock_event_device *ced,
 717                                         int periodic)
 718 {
 719         struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
 720 
 721         /* deal with old setting first */
 722         if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
 723                 sh_cmt_stop(ch, FLAG_CLOCKEVENT);
 724 
 725         dev_info(&ch->cmt->pdev->dev, "ch%u: used for %s clock events\n",
 726                  ch->index, periodic ? "periodic" : "oneshot");
 727         sh_cmt_clock_event_start(ch, periodic);
 728         return 0;
 729 }
 730 
 731 static int sh_cmt_clock_event_set_oneshot(struct clock_event_device *ced)
 732 {
 733         return sh_cmt_clock_event_set_state(ced, 0);
 734 }
 735 
 736 static int sh_cmt_clock_event_set_periodic(struct clock_event_device *ced)
 737 {
 738         return sh_cmt_clock_event_set_state(ced, 1);
 739 }
 740 
 741 static int sh_cmt_clock_event_next(unsigned long delta,
 742                                    struct clock_event_device *ced)
 743 {
 744         struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
 745 
 746         BUG_ON(!clockevent_state_oneshot(ced));
 747         if (likely(ch->flags & FLAG_IRQCONTEXT))
 748                 ch->next_match_value = delta - 1;
 749         else
 750                 sh_cmt_set_next(ch, delta - 1);
 751 
 752         return 0;
 753 }
 754 
 755 static void sh_cmt_clock_event_suspend(struct clock_event_device *ced)
 756 {
 757         struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
 758 
 759         pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
 760         clk_unprepare(ch->cmt->clk);
 761 }
 762 
 763 static void sh_cmt_clock_event_resume(struct clock_event_device *ced)
 764 {
 765         struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
 766 
 767         clk_prepare(ch->cmt->clk);
 768         pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
 769 }
 770 
 771 static int sh_cmt_register_clockevent(struct sh_cmt_channel *ch,
 772                                       const char *name)
 773 {
 774         struct clock_event_device *ced = &ch->ced;
 775         int irq;
 776         int ret;
 777 
 778         irq = platform_get_irq(ch->cmt->pdev, ch->index);
 779         if (irq < 0)
 780                 return irq;
 781 
 782         ret = request_irq(irq, sh_cmt_interrupt,
 783                           IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
 784                           dev_name(&ch->cmt->pdev->dev), ch);
 785         if (ret) {
 786                 dev_err(&ch->cmt->pdev->dev, "ch%u: failed to request irq %d\n",
 787                         ch->index, irq);
 788                 return ret;
 789         }
 790 
 791         ced->name = name;
 792         ced->features = CLOCK_EVT_FEAT_PERIODIC;
 793         ced->features |= CLOCK_EVT_FEAT_ONESHOT;
 794         ced->rating = 125;
 795         ced->cpumask = cpu_possible_mask;
 796         ced->set_next_event = sh_cmt_clock_event_next;
 797         ced->set_state_shutdown = sh_cmt_clock_event_shutdown;
 798         ced->set_state_periodic = sh_cmt_clock_event_set_periodic;
 799         ced->set_state_oneshot = sh_cmt_clock_event_set_oneshot;
 800         ced->suspend = sh_cmt_clock_event_suspend;
 801         ced->resume = sh_cmt_clock_event_resume;
 802 
 803         /* TODO: calculate good shift from rate and counter bit width */
 804         ced->shift = 32;
 805         ced->mult = div_sc(ch->cmt->rate, NSEC_PER_SEC, ced->shift);
 806         ced->max_delta_ns = clockevent_delta2ns(ch->max_match_value, ced);
 807         ced->max_delta_ticks = ch->max_match_value;
 808         ced->min_delta_ns = clockevent_delta2ns(0x1f, ced);
 809         ced->min_delta_ticks = 0x1f;
 810 
 811         dev_info(&ch->cmt->pdev->dev, "ch%u: used for clock events\n",
 812                  ch->index);
 813         clockevents_register_device(ced);
 814 
 815         return 0;
 816 }
 817 
 818 static int sh_cmt_register(struct sh_cmt_channel *ch, const char *name,
 819                            bool clockevent, bool clocksource)
 820 {
 821         int ret;
 822 
 823         if (clockevent) {
 824                 ch->cmt->has_clockevent = true;
 825                 ret = sh_cmt_register_clockevent(ch, name);
 826                 if (ret < 0)
 827                         return ret;
 828         }
 829 
 830         if (clocksource) {
 831                 ch->cmt->has_clocksource = true;
 832                 sh_cmt_register_clocksource(ch, name);
 833         }
 834 
 835         return 0;
 836 }
 837 
 838 static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
 839                                 unsigned int hwidx, bool clockevent,
 840                                 bool clocksource, struct sh_cmt_device *cmt)
 841 {
 842         int ret;
 843 
 844         /* Skip unused channels. */
 845         if (!clockevent && !clocksource)
 846                 return 0;
 847 
 848         ch->cmt = cmt;
 849         ch->index = index;
 850         ch->hwidx = hwidx;
 851         ch->timer_bit = hwidx;
 852 
 853         /*
 854          * Compute the address of the channel control register block. For the
 855          * timers with a per-channel start/stop register, compute its address
 856          * as well.
 857          */
 858         switch (cmt->info->model) {
 859         case SH_CMT_16BIT:
 860                 ch->ioctrl = cmt->mapbase + 2 + ch->hwidx * 6;
 861                 break;
 862         case SH_CMT_32BIT:
 863         case SH_CMT_48BIT:
 864                 ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10;
 865                 break;
 866         case SH_CMT0_RCAR_GEN2:
 867         case SH_CMT1_RCAR_GEN2:
 868                 ch->iostart = cmt->mapbase + ch->hwidx * 0x100;
 869                 ch->ioctrl = ch->iostart + 0x10;
 870                 ch->timer_bit = 0;
 871                 break;
 872         }
 873 
 874         if (cmt->info->width == (sizeof(ch->max_match_value) * 8))
 875                 ch->max_match_value = ~0;
 876         else
 877                 ch->max_match_value = (1 << cmt->info->width) - 1;
 878 
 879         ch->match_value = ch->max_match_value;
 880         raw_spin_lock_init(&ch->lock);
 881 
 882         ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev),
 883                               clockevent, clocksource);
 884         if (ret) {
 885                 dev_err(&cmt->pdev->dev, "ch%u: registration failed\n",
 886                         ch->index);
 887                 return ret;
 888         }
 889         ch->cs_enabled = false;
 890 
 891         return 0;
 892 }
 893 
 894 static int sh_cmt_map_memory(struct sh_cmt_device *cmt)
 895 {
 896         struct resource *mem;
 897 
 898         mem = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 0);
 899         if (!mem) {
 900                 dev_err(&cmt->pdev->dev, "failed to get I/O memory\n");
 901                 return -ENXIO;
 902         }
 903 
 904         cmt->mapbase = ioremap_nocache(mem->start, resource_size(mem));
 905         if (cmt->mapbase == NULL) {
 906                 dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n");
 907                 return -ENXIO;
 908         }
 909 
 910         return 0;
 911 }
 912 
 913 static const struct platform_device_id sh_cmt_id_table[] = {
 914         { "sh-cmt-16", (kernel_ulong_t)&sh_cmt_info[SH_CMT_16BIT] },
 915         { "sh-cmt-32", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT] },
 916         { }
 917 };
 918 MODULE_DEVICE_TABLE(platform, sh_cmt_id_table);
 919 
 920 static const struct of_device_id sh_cmt_of_table[] __maybe_unused = {
 921         {
 922                 /* deprecated, preserved for backward compatibility */
 923                 .compatible = "renesas,cmt-48",
 924                 .data = &sh_cmt_info[SH_CMT_48BIT]
 925         },
 926         {
 927                 /* deprecated, preserved for backward compatibility */
 928                 .compatible = "renesas,cmt-48-gen2",
 929                 .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
 930         },
 931         {
 932                 .compatible = "renesas,r8a7740-cmt1",
 933                 .data = &sh_cmt_info[SH_CMT_48BIT]
 934         },
 935         {
 936                 .compatible = "renesas,sh73a0-cmt1",
 937                 .data = &sh_cmt_info[SH_CMT_48BIT]
 938         },
 939         {
 940                 .compatible = "renesas,rcar-gen2-cmt0",
 941                 .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
 942         },
 943         {
 944                 .compatible = "renesas,rcar-gen2-cmt1",
 945                 .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2]
 946         },
 947         {
 948                 .compatible = "renesas,rcar-gen3-cmt0",
 949                 .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
 950         },
 951         {
 952                 .compatible = "renesas,rcar-gen3-cmt1",
 953                 .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2]
 954         },
 955         { }
 956 };
 957 MODULE_DEVICE_TABLE(of, sh_cmt_of_table);
 958 
 959 static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
 960 {
 961         unsigned int mask;
 962         unsigned int i;
 963         int ret;
 964 
 965         cmt->pdev = pdev;
 966         raw_spin_lock_init(&cmt->lock);
 967 
 968         if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
 969                 cmt->info = of_device_get_match_data(&pdev->dev);
 970                 cmt->hw_channels = cmt->info->channels_mask;
 971         } else if (pdev->dev.platform_data) {
 972                 struct sh_timer_config *cfg = pdev->dev.platform_data;
 973                 const struct platform_device_id *id = pdev->id_entry;
 974 
 975                 cmt->info = (const struct sh_cmt_info *)id->driver_data;
 976                 cmt->hw_channels = cfg->channels_mask;
 977         } else {
 978                 dev_err(&cmt->pdev->dev, "missing platform data\n");
 979                 return -ENXIO;
 980         }
 981 
 982         /* Get hold of clock. */
 983         cmt->clk = clk_get(&cmt->pdev->dev, "fck");
 984         if (IS_ERR(cmt->clk)) {
 985                 dev_err(&cmt->pdev->dev, "cannot get clock\n");
 986                 return PTR_ERR(cmt->clk);
 987         }
 988 
 989         ret = clk_prepare(cmt->clk);
 990         if (ret < 0)
 991                 goto err_clk_put;
 992 
 993         /* Determine clock rate. */
 994         ret = clk_enable(cmt->clk);
 995         if (ret < 0)
 996                 goto err_clk_unprepare;
 997 
 998         if (cmt->info->width == 16)
 999                 cmt->rate = clk_get_rate(cmt->clk) / 512;
1000         else
1001                 cmt->rate = clk_get_rate(cmt->clk) / 8;
1002 
1003         clk_disable(cmt->clk);
1004 
1005         /* Map the memory resource(s). */
1006         ret = sh_cmt_map_memory(cmt);
1007         if (ret < 0)
1008                 goto err_clk_unprepare;
1009 
1010         /* Allocate and setup the channels. */
1011         cmt->num_channels = hweight8(cmt->hw_channels);
1012         cmt->channels = kcalloc(cmt->num_channels, sizeof(*cmt->channels),
1013                                 GFP_KERNEL);
1014         if (cmt->channels == NULL) {
1015                 ret = -ENOMEM;
1016                 goto err_unmap;
1017         }
1018 
1019         /*
1020          * Use the first channel as a clock event device and the second channel
1021          * as a clock source. If only one channel is available use it for both.
1022          */
1023         for (i = 0, mask = cmt->hw_channels; i < cmt->num_channels; ++i) {
1024                 unsigned int hwidx = ffs(mask) - 1;
1025                 bool clocksource = i == 1 || cmt->num_channels == 1;
1026                 bool clockevent = i == 0;
1027 
1028                 ret = sh_cmt_setup_channel(&cmt->channels[i], i, hwidx,
1029                                            clockevent, clocksource, cmt);
1030                 if (ret < 0)
1031                         goto err_unmap;
1032 
1033                 mask &= ~(1 << hwidx);
1034         }
1035 
1036         platform_set_drvdata(pdev, cmt);
1037 
1038         return 0;
1039 
1040 err_unmap:
1041         kfree(cmt->channels);
1042         iounmap(cmt->mapbase);
1043 err_clk_unprepare:
1044         clk_unprepare(cmt->clk);
1045 err_clk_put:
1046         clk_put(cmt->clk);
1047         return ret;
1048 }
1049 
1050 static int sh_cmt_probe(struct platform_device *pdev)
1051 {
1052         struct sh_cmt_device *cmt = platform_get_drvdata(pdev);
1053         int ret;
1054 
1055         if (!is_early_platform_device(pdev)) {
1056                 pm_runtime_set_active(&pdev->dev);
1057                 pm_runtime_enable(&pdev->dev);
1058         }
1059 
1060         if (cmt) {
1061                 dev_info(&pdev->dev, "kept as earlytimer\n");
1062                 goto out;
1063         }
1064 
1065         cmt = kzalloc(sizeof(*cmt), GFP_KERNEL);
1066         if (cmt == NULL)
1067                 return -ENOMEM;
1068 
1069         ret = sh_cmt_setup(cmt, pdev);
1070         if (ret) {
1071                 kfree(cmt);
1072                 pm_runtime_idle(&pdev->dev);
1073                 return ret;
1074         }
1075         if (is_early_platform_device(pdev))
1076                 return 0;
1077 
1078  out:
1079         if (cmt->has_clockevent || cmt->has_clocksource)
1080                 pm_runtime_irq_safe(&pdev->dev);
1081         else
1082                 pm_runtime_idle(&pdev->dev);
1083 
1084         return 0;
1085 }
1086 
1087 static int sh_cmt_remove(struct platform_device *pdev)
1088 {
1089         return -EBUSY; /* cannot unregister clockevent and clocksource */
1090 }
1091 
1092 static struct platform_driver sh_cmt_device_driver = {
1093         .probe          = sh_cmt_probe,
1094         .remove         = sh_cmt_remove,
1095         .driver         = {
1096                 .name   = "sh_cmt",
1097                 .of_match_table = of_match_ptr(sh_cmt_of_table),
1098         },
1099         .id_table       = sh_cmt_id_table,
1100 };
1101 
1102 static int __init sh_cmt_init(void)
1103 {
1104         return platform_driver_register(&sh_cmt_device_driver);
1105 }
1106 
1107 static void __exit sh_cmt_exit(void)
1108 {
1109         platform_driver_unregister(&sh_cmt_device_driver);
1110 }
1111 
1112 early_platform_init("earlytimer", &sh_cmt_device_driver);
1113 subsys_initcall(sh_cmt_init);
1114 module_exit(sh_cmt_exit);
1115 
1116 MODULE_AUTHOR("Magnus Damm");
1117 MODULE_DESCRIPTION("SuperH CMT Timer Driver");
1118 MODULE_LICENSE("GPL v2");

/* [<][>][^][v][top][bottom][index][help] */