root/drivers/soc/qcom/rpmh-rsc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. read_tcs_reg
  2. write_tcs_cmd
  3. write_tcs_reg
  4. write_tcs_reg_sync
  5. tcs_is_free
  6. get_tcs_of_type
  7. tcs_invalidate
  8. rpmh_rsc_invalidate
  9. get_tcs_for_msg
  10. get_req_from_tcs
  11. tcs_tx_done
  12. __tcs_buffer_write
  13. __tcs_trigger
  14. check_for_req_inflight
  15. find_free_tcs
  16. tcs_write
  17. rpmh_rsc_send_data
  18. find_match
  19. find_slots
  20. tcs_ctrl_write
  21. rpmh_rsc_write_ctrl_data
  22. rpmh_probe_tcs_config
  23. rpmh_rsc_probe
  24. rpmh_driver_init

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
   4  */
   5 
   6 #define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
   7 
   8 #include <linux/atomic.h>
   9 #include <linux/delay.h>
  10 #include <linux/interrupt.h>
  11 #include <linux/io.h>
  12 #include <linux/kernel.h>
  13 #include <linux/list.h>
  14 #include <linux/of.h>
  15 #include <linux/of_irq.h>
  16 #include <linux/of_platform.h>
  17 #include <linux/platform_device.h>
  18 #include <linux/slab.h>
  19 #include <linux/spinlock.h>
  20 
  21 #include <soc/qcom/cmd-db.h>
  22 #include <soc/qcom/tcs.h>
  23 #include <dt-bindings/soc/qcom,rpmh-rsc.h>
  24 
  25 #include "rpmh-internal.h"
  26 
  27 #define CREATE_TRACE_POINTS
  28 #include "trace-rpmh.h"
  29 
  30 #define RSC_DRV_TCS_OFFSET              672
  31 #define RSC_DRV_CMD_OFFSET              20
  32 
  33 /* DRV Configuration Information Register */
  34 #define DRV_PRNT_CHLD_CONFIG            0x0C
  35 #define DRV_NUM_TCS_MASK                0x3F
  36 #define DRV_NUM_TCS_SHIFT               6
  37 #define DRV_NCPT_MASK                   0x1F
  38 #define DRV_NCPT_SHIFT                  27
  39 
  40 /* Register offsets */
  41 #define RSC_DRV_IRQ_ENABLE              0x00
  42 #define RSC_DRV_IRQ_STATUS              0x04
  43 #define RSC_DRV_IRQ_CLEAR               0x08
  44 #define RSC_DRV_CMD_WAIT_FOR_CMPL       0x10
  45 #define RSC_DRV_CONTROL                 0x14
  46 #define RSC_DRV_STATUS                  0x18
  47 #define RSC_DRV_CMD_ENABLE              0x1C
  48 #define RSC_DRV_CMD_MSGID               0x30
  49 #define RSC_DRV_CMD_ADDR                0x34
  50 #define RSC_DRV_CMD_DATA                0x38
  51 #define RSC_DRV_CMD_STATUS              0x3C
  52 #define RSC_DRV_CMD_RESP_DATA           0x40
  53 
  54 #define TCS_AMC_MODE_ENABLE             BIT(16)
  55 #define TCS_AMC_MODE_TRIGGER            BIT(24)
  56 
  57 /* TCS CMD register bit mask */
  58 #define CMD_MSGID_LEN                   8
  59 #define CMD_MSGID_RESP_REQ              BIT(8)
  60 #define CMD_MSGID_WRITE                 BIT(16)
  61 #define CMD_STATUS_ISSUED               BIT(8)
  62 #define CMD_STATUS_COMPL                BIT(16)
  63 
  64 static u32 read_tcs_reg(struct rsc_drv *drv, int reg, int tcs_id, int cmd_id)
  65 {
  66         return readl_relaxed(drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id +
  67                              RSC_DRV_CMD_OFFSET * cmd_id);
  68 }
  69 
  70 static void write_tcs_cmd(struct rsc_drv *drv, int reg, int tcs_id, int cmd_id,
  71                           u32 data)
  72 {
  73         writel_relaxed(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id +
  74                        RSC_DRV_CMD_OFFSET * cmd_id);
  75 }
  76 
  77 static void write_tcs_reg(struct rsc_drv *drv, int reg, int tcs_id, u32 data)
  78 {
  79         writel_relaxed(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id);
  80 }
  81 
  82 static void write_tcs_reg_sync(struct rsc_drv *drv, int reg, int tcs_id,
  83                                u32 data)
  84 {
  85         writel(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id);
  86         for (;;) {
  87                 if (data == readl(drv->tcs_base + reg +
  88                                   RSC_DRV_TCS_OFFSET * tcs_id))
  89                         break;
  90                 udelay(1);
  91         }
  92 }
  93 
  94 static bool tcs_is_free(struct rsc_drv *drv, int tcs_id)
  95 {
  96         return !test_bit(tcs_id, drv->tcs_in_use) &&
  97                read_tcs_reg(drv, RSC_DRV_STATUS, tcs_id, 0);
  98 }
  99 
 100 static struct tcs_group *get_tcs_of_type(struct rsc_drv *drv, int type)
 101 {
 102         return &drv->tcs[type];
 103 }
 104 
 105 static int tcs_invalidate(struct rsc_drv *drv, int type)
 106 {
 107         int m;
 108         struct tcs_group *tcs;
 109 
 110         tcs = get_tcs_of_type(drv, type);
 111 
 112         spin_lock(&tcs->lock);
 113         if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS)) {
 114                 spin_unlock(&tcs->lock);
 115                 return 0;
 116         }
 117 
 118         for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) {
 119                 if (!tcs_is_free(drv, m)) {
 120                         spin_unlock(&tcs->lock);
 121                         return -EAGAIN;
 122                 }
 123                 write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, m, 0);
 124                 write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, m, 0);
 125         }
 126         bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
 127         spin_unlock(&tcs->lock);
 128 
 129         return 0;
 130 }
 131 
 132 /**
 133  * rpmh_rsc_invalidate - Invalidate sleep and wake TCSes
 134  *
 135  * @drv: the RSC controller
 136  */
 137 int rpmh_rsc_invalidate(struct rsc_drv *drv)
 138 {
 139         int ret;
 140 
 141         ret = tcs_invalidate(drv, SLEEP_TCS);
 142         if (!ret)
 143                 ret = tcs_invalidate(drv, WAKE_TCS);
 144 
 145         return ret;
 146 }
 147 
 148 static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv,
 149                                          const struct tcs_request *msg)
 150 {
 151         int type, ret;
 152         struct tcs_group *tcs;
 153 
 154         switch (msg->state) {
 155         case RPMH_ACTIVE_ONLY_STATE:
 156                 type = ACTIVE_TCS;
 157                 break;
 158         case RPMH_WAKE_ONLY_STATE:
 159                 type = WAKE_TCS;
 160                 break;
 161         case RPMH_SLEEP_STATE:
 162                 type = SLEEP_TCS;
 163                 break;
 164         default:
 165                 return ERR_PTR(-EINVAL);
 166         }
 167 
 168         /*
 169          * If we are making an active request on a RSC that does not have a
 170          * dedicated TCS for active state use, then re-purpose a wake TCS to
 171          * send active votes.
 172          * NOTE: The driver must be aware that this RSC does not have a
 173          * dedicated AMC, and therefore would invalidate the sleep and wake
 174          * TCSes before making an active state request.
 175          */
 176         tcs = get_tcs_of_type(drv, type);
 177         if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs) {
 178                 tcs = get_tcs_of_type(drv, WAKE_TCS);
 179                 if (tcs->num_tcs) {
 180                         ret = rpmh_rsc_invalidate(drv);
 181                         if (ret)
 182                                 return ERR_PTR(ret);
 183                 }
 184         }
 185 
 186         return tcs;
 187 }
 188 
 189 static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv,
 190                                                   int tcs_id)
 191 {
 192         struct tcs_group *tcs;
 193         int i;
 194 
 195         for (i = 0; i < TCS_TYPE_NR; i++) {
 196                 tcs = &drv->tcs[i];
 197                 if (tcs->mask & BIT(tcs_id))
 198                         return tcs->req[tcs_id - tcs->offset];
 199         }
 200 
 201         return NULL;
 202 }
 203 
 204 /**
 205  * tcs_tx_done: TX Done interrupt handler
 206  */
 207 static irqreturn_t tcs_tx_done(int irq, void *p)
 208 {
 209         struct rsc_drv *drv = p;
 210         int i, j, err = 0;
 211         unsigned long irq_status;
 212         const struct tcs_request *req;
 213         struct tcs_cmd *cmd;
 214 
 215         irq_status = read_tcs_reg(drv, RSC_DRV_IRQ_STATUS, 0, 0);
 216 
 217         for_each_set_bit(i, &irq_status, BITS_PER_LONG) {
 218                 req = get_req_from_tcs(drv, i);
 219                 if (!req) {
 220                         WARN_ON(1);
 221                         goto skip;
 222                 }
 223 
 224                 err = 0;
 225                 for (j = 0; j < req->num_cmds; j++) {
 226                         u32 sts;
 227 
 228                         cmd = &req->cmds[j];
 229                         sts = read_tcs_reg(drv, RSC_DRV_CMD_STATUS, i, j);
 230                         if (!(sts & CMD_STATUS_ISSUED) ||
 231                            ((req->wait_for_compl || cmd->wait) &&
 232                            !(sts & CMD_STATUS_COMPL))) {
 233                                 pr_err("Incomplete request: %s: addr=%#x data=%#x",
 234                                        drv->name, cmd->addr, cmd->data);
 235                                 err = -EIO;
 236                         }
 237                 }
 238 
 239                 trace_rpmh_tx_done(drv, i, req, err);
 240 skip:
 241                 /* Reclaim the TCS */
 242                 write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, i, 0);
 243                 write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, i, 0);
 244                 write_tcs_reg(drv, RSC_DRV_IRQ_CLEAR, 0, BIT(i));
 245                 spin_lock(&drv->lock);
 246                 clear_bit(i, drv->tcs_in_use);
 247                 spin_unlock(&drv->lock);
 248                 if (req)
 249                         rpmh_tx_done(req, err);
 250         }
 251 
 252         return IRQ_HANDLED;
 253 }
 254 
 255 static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
 256                                const struct tcs_request *msg)
 257 {
 258         u32 msgid, cmd_msgid;
 259         u32 cmd_enable = 0;
 260         u32 cmd_complete;
 261         struct tcs_cmd *cmd;
 262         int i, j;
 263 
 264         cmd_msgid = CMD_MSGID_LEN;
 265         cmd_msgid |= msg->wait_for_compl ? CMD_MSGID_RESP_REQ : 0;
 266         cmd_msgid |= CMD_MSGID_WRITE;
 267 
 268         cmd_complete = read_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
 269 
 270         for (i = 0, j = cmd_id; i < msg->num_cmds; i++, j++) {
 271                 cmd = &msg->cmds[i];
 272                 cmd_enable |= BIT(j);
 273                 cmd_complete |= cmd->wait << j;
 274                 msgid = cmd_msgid;
 275                 msgid |= cmd->wait ? CMD_MSGID_RESP_REQ : 0;
 276 
 277                 write_tcs_cmd(drv, RSC_DRV_CMD_MSGID, tcs_id, j, msgid);
 278                 write_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j, cmd->addr);
 279                 write_tcs_cmd(drv, RSC_DRV_CMD_DATA, tcs_id, j, cmd->data);
 280                 trace_rpmh_send_msg(drv, tcs_id, j, msgid, cmd);
 281         }
 282 
 283         write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, cmd_complete);
 284         cmd_enable |= read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
 285         write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, cmd_enable);
 286 }
 287 
 288 static void __tcs_trigger(struct rsc_drv *drv, int tcs_id)
 289 {
 290         u32 enable;
 291 
 292         /*
 293          * HW req: Clear the DRV_CONTROL and enable TCS again
 294          * While clearing ensure that the AMC mode trigger is cleared
 295          * and then the mode enable is cleared.
 296          */
 297         enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id, 0);
 298         enable &= ~TCS_AMC_MODE_TRIGGER;
 299         write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
 300         enable &= ~TCS_AMC_MODE_ENABLE;
 301         write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
 302 
 303         /* Enable the AMC mode on the TCS and then trigger the TCS */
 304         enable = TCS_AMC_MODE_ENABLE;
 305         write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
 306         enable |= TCS_AMC_MODE_TRIGGER;
 307         write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
 308 }
 309 
 310 static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
 311                                   const struct tcs_request *msg)
 312 {
 313         unsigned long curr_enabled;
 314         u32 addr;
 315         int i, j, k;
 316         int tcs_id = tcs->offset;
 317 
 318         for (i = 0; i < tcs->num_tcs; i++, tcs_id++) {
 319                 if (tcs_is_free(drv, tcs_id))
 320                         continue;
 321 
 322                 curr_enabled = read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
 323 
 324                 for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) {
 325                         addr = read_tcs_reg(drv, RSC_DRV_CMD_ADDR, tcs_id, j);
 326                         for (k = 0; k < msg->num_cmds; k++) {
 327                                 if (addr == msg->cmds[k].addr)
 328                                         return -EBUSY;
 329                         }
 330                 }
 331         }
 332 
 333         return 0;
 334 }
 335 
 336 static int find_free_tcs(struct tcs_group *tcs)
 337 {
 338         int i;
 339 
 340         for (i = 0; i < tcs->num_tcs; i++) {
 341                 if (tcs_is_free(tcs->drv, tcs->offset + i))
 342                         return tcs->offset + i;
 343         }
 344 
 345         return -EBUSY;
 346 }
 347 
 348 static int tcs_write(struct rsc_drv *drv, const struct tcs_request *msg)
 349 {
 350         struct tcs_group *tcs;
 351         int tcs_id;
 352         unsigned long flags;
 353         int ret;
 354 
 355         tcs = get_tcs_for_msg(drv, msg);
 356         if (IS_ERR(tcs))
 357                 return PTR_ERR(tcs);
 358 
 359         spin_lock_irqsave(&tcs->lock, flags);
 360         spin_lock(&drv->lock);
 361         /*
 362          * The h/w does not like if we send a request to the same address,
 363          * when one is already in-flight or being processed.
 364          */
 365         ret = check_for_req_inflight(drv, tcs, msg);
 366         if (ret) {
 367                 spin_unlock(&drv->lock);
 368                 goto done_write;
 369         }
 370 
 371         tcs_id = find_free_tcs(tcs);
 372         if (tcs_id < 0) {
 373                 ret = tcs_id;
 374                 spin_unlock(&drv->lock);
 375                 goto done_write;
 376         }
 377 
 378         tcs->req[tcs_id - tcs->offset] = msg;
 379         set_bit(tcs_id, drv->tcs_in_use);
 380         spin_unlock(&drv->lock);
 381 
 382         __tcs_buffer_write(drv, tcs_id, 0, msg);
 383         __tcs_trigger(drv, tcs_id);
 384 
 385 done_write:
 386         spin_unlock_irqrestore(&tcs->lock, flags);
 387         return ret;
 388 }
 389 
 390 /**
 391  * rpmh_rsc_send_data: Validate the incoming message and write to the
 392  * appropriate TCS block.
 393  *
 394  * @drv: the controller
 395  * @msg: the data to be sent
 396  *
 397  * Return: 0 on success, -EINVAL on error.
 398  * Note: This call blocks until a valid data is written to the TCS.
 399  */
 400 int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
 401 {
 402         int ret;
 403 
 404         if (!msg || !msg->cmds || !msg->num_cmds ||
 405             msg->num_cmds > MAX_RPMH_PAYLOAD) {
 406                 WARN_ON(1);
 407                 return -EINVAL;
 408         }
 409 
 410         do {
 411                 ret = tcs_write(drv, msg);
 412                 if (ret == -EBUSY) {
 413                         pr_info_ratelimited("TCS Busy, retrying RPMH message send: addr=%#x\n",
 414                                             msg->cmds[0].addr);
 415                         udelay(10);
 416                 }
 417         } while (ret == -EBUSY);
 418 
 419         return ret;
 420 }
 421 
 422 static int find_match(const struct tcs_group *tcs, const struct tcs_cmd *cmd,
 423                       int len)
 424 {
 425         int i, j;
 426 
 427         /* Check for already cached commands */
 428         for_each_set_bit(i, tcs->slots, MAX_TCS_SLOTS) {
 429                 if (tcs->cmd_cache[i] != cmd[0].addr)
 430                         continue;
 431                 if (i + len >= tcs->num_tcs * tcs->ncpt)
 432                         goto seq_err;
 433                 for (j = 0; j < len; j++) {
 434                         if (tcs->cmd_cache[i + j] != cmd[j].addr)
 435                                 goto seq_err;
 436                 }
 437                 return i;
 438         }
 439 
 440         return -ENODATA;
 441 
 442 seq_err:
 443         WARN(1, "Message does not match previous sequence.\n");
 444         return -EINVAL;
 445 }
 446 
 447 static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg,
 448                       int *tcs_id, int *cmd_id)
 449 {
 450         int slot, offset;
 451         int i = 0;
 452 
 453         /* Find if we already have the msg in our TCS */
 454         slot = find_match(tcs, msg->cmds, msg->num_cmds);
 455         if (slot >= 0)
 456                 goto copy_data;
 457 
 458         /* Do over, until we can fit the full payload in a TCS */
 459         do {
 460                 slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS,
 461                                                   i, msg->num_cmds, 0);
 462                 if (slot >= tcs->num_tcs * tcs->ncpt)
 463                         return -ENOMEM;
 464                 i += tcs->ncpt;
 465         } while (slot + msg->num_cmds - 1 >= i);
 466 
 467 copy_data:
 468         bitmap_set(tcs->slots, slot, msg->num_cmds);
 469         /* Copy the addresses of the resources over to the slots */
 470         for (i = 0; i < msg->num_cmds; i++)
 471                 tcs->cmd_cache[slot + i] = msg->cmds[i].addr;
 472 
 473         offset = slot / tcs->ncpt;
 474         *tcs_id = offset + tcs->offset;
 475         *cmd_id = slot % tcs->ncpt;
 476 
 477         return 0;
 478 }
 479 
 480 static int tcs_ctrl_write(struct rsc_drv *drv, const struct tcs_request *msg)
 481 {
 482         struct tcs_group *tcs;
 483         int tcs_id = 0, cmd_id = 0;
 484         unsigned long flags;
 485         int ret;
 486 
 487         tcs = get_tcs_for_msg(drv, msg);
 488         if (IS_ERR(tcs))
 489                 return PTR_ERR(tcs);
 490 
 491         spin_lock_irqsave(&tcs->lock, flags);
 492         /* find the TCS id and the command in the TCS to write to */
 493         ret = find_slots(tcs, msg, &tcs_id, &cmd_id);
 494         if (!ret)
 495                 __tcs_buffer_write(drv, tcs_id, cmd_id, msg);
 496         spin_unlock_irqrestore(&tcs->lock, flags);
 497 
 498         return ret;
 499 }
 500 
 501 /**
 502  * rpmh_rsc_write_ctrl_data: Write request to the controller
 503  *
 504  * @drv: the controller
 505  * @msg: the data to be written to the controller
 506  *
 507  * There is no response returned for writing the request to the controller.
 508  */
 509 int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg)
 510 {
 511         if (!msg || !msg->cmds || !msg->num_cmds ||
 512             msg->num_cmds > MAX_RPMH_PAYLOAD) {
 513                 pr_err("Payload error\n");
 514                 return -EINVAL;
 515         }
 516 
 517         /* Data sent to this API will not be sent immediately */
 518         if (msg->state == RPMH_ACTIVE_ONLY_STATE)
 519                 return -EINVAL;
 520 
 521         return tcs_ctrl_write(drv, msg);
 522 }
 523 
 524 static int rpmh_probe_tcs_config(struct platform_device *pdev,
 525                                  struct rsc_drv *drv)
 526 {
 527         struct tcs_type_config {
 528                 u32 type;
 529                 u32 n;
 530         } tcs_cfg[TCS_TYPE_NR] = { { 0 } };
 531         struct device_node *dn = pdev->dev.of_node;
 532         u32 config, max_tcs, ncpt, offset;
 533         int i, ret, n, st = 0;
 534         struct tcs_group *tcs;
 535         struct resource *res;
 536         void __iomem *base;
 537         char drv_id[10] = {0};
 538 
 539         snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id);
 540         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, drv_id);
 541         base = devm_ioremap_resource(&pdev->dev, res);
 542         if (IS_ERR(base))
 543                 return PTR_ERR(base);
 544 
 545         ret = of_property_read_u32(dn, "qcom,tcs-offset", &offset);
 546         if (ret)
 547                 return ret;
 548         drv->tcs_base = base + offset;
 549 
 550         config = readl_relaxed(base + DRV_PRNT_CHLD_CONFIG);
 551 
 552         max_tcs = config;
 553         max_tcs &= DRV_NUM_TCS_MASK << (DRV_NUM_TCS_SHIFT * drv->id);
 554         max_tcs = max_tcs >> (DRV_NUM_TCS_SHIFT * drv->id);
 555 
 556         ncpt = config & (DRV_NCPT_MASK << DRV_NCPT_SHIFT);
 557         ncpt = ncpt >> DRV_NCPT_SHIFT;
 558 
 559         n = of_property_count_u32_elems(dn, "qcom,tcs-config");
 560         if (n != 2 * TCS_TYPE_NR)
 561                 return -EINVAL;
 562 
 563         for (i = 0; i < TCS_TYPE_NR; i++) {
 564                 ret = of_property_read_u32_index(dn, "qcom,tcs-config",
 565                                                  i * 2, &tcs_cfg[i].type);
 566                 if (ret)
 567                         return ret;
 568                 if (tcs_cfg[i].type >= TCS_TYPE_NR)
 569                         return -EINVAL;
 570 
 571                 ret = of_property_read_u32_index(dn, "qcom,tcs-config",
 572                                                  i * 2 + 1, &tcs_cfg[i].n);
 573                 if (ret)
 574                         return ret;
 575                 if (tcs_cfg[i].n > MAX_TCS_PER_TYPE)
 576                         return -EINVAL;
 577         }
 578 
 579         for (i = 0; i < TCS_TYPE_NR; i++) {
 580                 tcs = &drv->tcs[tcs_cfg[i].type];
 581                 if (tcs->drv)
 582                         return -EINVAL;
 583                 tcs->drv = drv;
 584                 tcs->type = tcs_cfg[i].type;
 585                 tcs->num_tcs = tcs_cfg[i].n;
 586                 tcs->ncpt = ncpt;
 587                 spin_lock_init(&tcs->lock);
 588 
 589                 if (!tcs->num_tcs || tcs->type == CONTROL_TCS)
 590                         continue;
 591 
 592                 if (st + tcs->num_tcs > max_tcs ||
 593                     st + tcs->num_tcs >= BITS_PER_BYTE * sizeof(tcs->mask))
 594                         return -EINVAL;
 595 
 596                 tcs->mask = ((1 << tcs->num_tcs) - 1) << st;
 597                 tcs->offset = st;
 598                 st += tcs->num_tcs;
 599 
 600                 /*
 601                  * Allocate memory to cache sleep and wake requests to
 602                  * avoid reading TCS register memory.
 603                  */
 604                 if (tcs->type == ACTIVE_TCS)
 605                         continue;
 606 
 607                 tcs->cmd_cache = devm_kcalloc(&pdev->dev,
 608                                               tcs->num_tcs * ncpt, sizeof(u32),
 609                                               GFP_KERNEL);
 610                 if (!tcs->cmd_cache)
 611                         return -ENOMEM;
 612         }
 613 
 614         drv->num_tcs = st;
 615 
 616         return 0;
 617 }
 618 
 619 static int rpmh_rsc_probe(struct platform_device *pdev)
 620 {
 621         struct device_node *dn = pdev->dev.of_node;
 622         struct rsc_drv *drv;
 623         int ret, irq;
 624 
 625         /*
 626          * Even though RPMh doesn't directly use cmd-db, all of its children
 627          * do. To avoid adding this check to our children we'll do it now.
 628          */
 629         ret = cmd_db_ready();
 630         if (ret) {
 631                 if (ret != -EPROBE_DEFER)
 632                         dev_err(&pdev->dev, "Command DB not available (%d)\n",
 633                                                                         ret);
 634                 return ret;
 635         }
 636 
 637         drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
 638         if (!drv)
 639                 return -ENOMEM;
 640 
 641         ret = of_property_read_u32(dn, "qcom,drv-id", &drv->id);
 642         if (ret)
 643                 return ret;
 644 
 645         drv->name = of_get_property(dn, "label", NULL);
 646         if (!drv->name)
 647                 drv->name = dev_name(&pdev->dev);
 648 
 649         ret = rpmh_probe_tcs_config(pdev, drv);
 650         if (ret)
 651                 return ret;
 652 
 653         spin_lock_init(&drv->lock);
 654         bitmap_zero(drv->tcs_in_use, MAX_TCS_NR);
 655 
 656         irq = platform_get_irq(pdev, drv->id);
 657         if (irq < 0)
 658                 return irq;
 659 
 660         ret = devm_request_irq(&pdev->dev, irq, tcs_tx_done,
 661                                IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND,
 662                                drv->name, drv);
 663         if (ret)
 664                 return ret;
 665 
 666         /* Enable the active TCS to send requests immediately */
 667         write_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, drv->tcs[ACTIVE_TCS].mask);
 668 
 669         spin_lock_init(&drv->client.cache_lock);
 670         INIT_LIST_HEAD(&drv->client.cache);
 671         INIT_LIST_HEAD(&drv->client.batch_cache);
 672 
 673         dev_set_drvdata(&pdev->dev, drv);
 674 
 675         return devm_of_platform_populate(&pdev->dev);
 676 }
 677 
 678 static const struct of_device_id rpmh_drv_match[] = {
 679         { .compatible = "qcom,rpmh-rsc", },
 680         { }
 681 };
 682 
 683 static struct platform_driver rpmh_driver = {
 684         .probe = rpmh_rsc_probe,
 685         .driver = {
 686                   .name = "rpmh",
 687                   .of_match_table = rpmh_drv_match,
 688         },
 689 };
 690 
 691 static int __init rpmh_driver_init(void)
 692 {
 693         return platform_driver_register(&rpmh_driver);
 694 }
 695 arch_initcall(rpmh_driver_init);

/* [<][>][^][v][top][bottom][index][help] */