root/drivers/soc/qcom/rpmh.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. get_rpmh_ctrlr
  2. rpmh_tx_done
  3. __find_req
  4. cache_rpm_request
  5. __rpmh_write
  6. __fill_rpmh_msg
  7. rpmh_write_async
  8. rpmh_write
  9. cache_batch
  10. flush_batch
  11. invalidate_batch
  12. rpmh_write_batch
  13. is_req_valid
  14. send_single
  15. rpmh_flush
  16. rpmh_invalidate

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
   4  */
   5 
   6 #include <linux/atomic.h>
   7 #include <linux/bug.h>
   8 #include <linux/interrupt.h>
   9 #include <linux/jiffies.h>
  10 #include <linux/kernel.h>
  11 #include <linux/list.h>
  12 #include <linux/module.h>
  13 #include <linux/of.h>
  14 #include <linux/platform_device.h>
  15 #include <linux/slab.h>
  16 #include <linux/spinlock.h>
  17 #include <linux/types.h>
  18 #include <linux/wait.h>
  19 
  20 #include <soc/qcom/rpmh.h>
  21 
  22 #include "rpmh-internal.h"
  23 
  24 #define RPMH_TIMEOUT_MS                 msecs_to_jiffies(10000)
  25 
  26 #define DEFINE_RPMH_MSG_ONSTACK(dev, s, q, name)        \
  27         struct rpmh_request name = {                    \
  28                 .msg = {                                \
  29                         .state = s,                     \
  30                         .cmds = name.cmd,               \
  31                         .num_cmds = 0,                  \
  32                         .wait_for_compl = true,         \
  33                 },                                      \
  34                 .cmd = { { 0 } },                       \
  35                 .completion = q,                        \
  36                 .dev = dev,                             \
  37                 .needs_free = false,                            \
  38         }
  39 
  40 #define ctrlr_to_drv(ctrlr) container_of(ctrlr, struct rsc_drv, client)
  41 
  42 /**
  43  * struct cache_req: the request object for caching
  44  *
  45  * @addr: the address of the resource
  46  * @sleep_val: the sleep vote
  47  * @wake_val: the wake vote
  48  * @list: linked list obj
  49  */
  50 struct cache_req {
  51         u32 addr;
  52         u32 sleep_val;
  53         u32 wake_val;
  54         struct list_head list;
  55 };
  56 
  57 /**
  58  * struct batch_cache_req - An entry in our batch catch
  59  *
  60  * @list: linked list obj
  61  * @count: number of messages
  62  * @rpm_msgs: the messages
  63  */
  64 
  65 struct batch_cache_req {
  66         struct list_head list;
  67         int count;
  68         struct rpmh_request rpm_msgs[];
  69 };
  70 
  71 static struct rpmh_ctrlr *get_rpmh_ctrlr(const struct device *dev)
  72 {
  73         struct rsc_drv *drv = dev_get_drvdata(dev->parent);
  74 
  75         return &drv->client;
  76 }
  77 
  78 void rpmh_tx_done(const struct tcs_request *msg, int r)
  79 {
  80         struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request,
  81                                                     msg);
  82         struct completion *compl = rpm_msg->completion;
  83         bool free = rpm_msg->needs_free;
  84 
  85         rpm_msg->err = r;
  86 
  87         if (r)
  88                 dev_err(rpm_msg->dev, "RPMH TX fail in msg addr=%#x, err=%d\n",
  89                         rpm_msg->msg.cmds[0].addr, r);
  90 
  91         if (!compl)
  92                 goto exit;
  93 
  94         /* Signal the blocking thread we are done */
  95         complete(compl);
  96 
  97 exit:
  98         if (free)
  99                 kfree(rpm_msg);
 100 }
 101 
 102 static struct cache_req *__find_req(struct rpmh_ctrlr *ctrlr, u32 addr)
 103 {
 104         struct cache_req *p, *req = NULL;
 105 
 106         list_for_each_entry(p, &ctrlr->cache, list) {
 107                 if (p->addr == addr) {
 108                         req = p;
 109                         break;
 110                 }
 111         }
 112 
 113         return req;
 114 }
 115 
 116 static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
 117                                            enum rpmh_state state,
 118                                            struct tcs_cmd *cmd)
 119 {
 120         struct cache_req *req;
 121         unsigned long flags;
 122 
 123         spin_lock_irqsave(&ctrlr->cache_lock, flags);
 124         req = __find_req(ctrlr, cmd->addr);
 125         if (req)
 126                 goto existing;
 127 
 128         req = kzalloc(sizeof(*req), GFP_ATOMIC);
 129         if (!req) {
 130                 req = ERR_PTR(-ENOMEM);
 131                 goto unlock;
 132         }
 133 
 134         req->addr = cmd->addr;
 135         req->sleep_val = req->wake_val = UINT_MAX;
 136         INIT_LIST_HEAD(&req->list);
 137         list_add_tail(&req->list, &ctrlr->cache);
 138 
 139 existing:
 140         switch (state) {
 141         case RPMH_ACTIVE_ONLY_STATE:
 142                 if (req->sleep_val != UINT_MAX)
 143                         req->wake_val = cmd->data;
 144                 break;
 145         case RPMH_WAKE_ONLY_STATE:
 146                 req->wake_val = cmd->data;
 147                 break;
 148         case RPMH_SLEEP_STATE:
 149                 req->sleep_val = cmd->data;
 150                 break;
 151         default:
 152                 break;
 153         }
 154 
 155         ctrlr->dirty = true;
 156 unlock:
 157         spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
 158 
 159         return req;
 160 }
 161 
 162 /**
 163  * __rpmh_write: Cache and send the RPMH request
 164  *
 165  * @dev: The device making the request
 166  * @state: Active/Sleep request type
 167  * @rpm_msg: The data that needs to be sent (cmds).
 168  *
 169  * Cache the RPMH request and send if the state is ACTIVE_ONLY.
 170  * SLEEP/WAKE_ONLY requests are not sent to the controller at
 171  * this time. Use rpmh_flush() to send them to the controller.
 172  */
 173 static int __rpmh_write(const struct device *dev, enum rpmh_state state,
 174                         struct rpmh_request *rpm_msg)
 175 {
 176         struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
 177         int ret = -EINVAL;
 178         struct cache_req *req;
 179         int i;
 180 
 181         rpm_msg->msg.state = state;
 182 
 183         /* Cache the request in our store and link the payload */
 184         for (i = 0; i < rpm_msg->msg.num_cmds; i++) {
 185                 req = cache_rpm_request(ctrlr, state, &rpm_msg->msg.cmds[i]);
 186                 if (IS_ERR(req))
 187                         return PTR_ERR(req);
 188         }
 189 
 190         rpm_msg->msg.state = state;
 191 
 192         if (state == RPMH_ACTIVE_ONLY_STATE) {
 193                 WARN_ON(irqs_disabled());
 194                 ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
 195         } else {
 196                 /* Clean up our call by spoofing tx_done */
 197                 ret = 0;
 198                 rpmh_tx_done(&rpm_msg->msg, ret);
 199         }
 200 
 201         return ret;
 202 }
 203 
 204 static int __fill_rpmh_msg(struct rpmh_request *req, enum rpmh_state state,
 205                 const struct tcs_cmd *cmd, u32 n)
 206 {
 207         if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
 208                 return -EINVAL;
 209 
 210         memcpy(req->cmd, cmd, n * sizeof(*cmd));
 211 
 212         req->msg.state = state;
 213         req->msg.cmds = req->cmd;
 214         req->msg.num_cmds = n;
 215 
 216         return 0;
 217 }
 218 
 219 /**
 220  * rpmh_write_async: Write a set of RPMH commands
 221  *
 222  * @dev: The device making the request
 223  * @state: Active/sleep set
 224  * @cmd: The payload data
 225  * @n: The number of elements in payload
 226  *
 227  * Write a set of RPMH commands, the order of commands is maintained
 228  * and will be sent as a single shot.
 229  */
 230 int rpmh_write_async(const struct device *dev, enum rpmh_state state,
 231                      const struct tcs_cmd *cmd, u32 n)
 232 {
 233         struct rpmh_request *rpm_msg;
 234         int ret;
 235 
 236         rpm_msg = kzalloc(sizeof(*rpm_msg), GFP_ATOMIC);
 237         if (!rpm_msg)
 238                 return -ENOMEM;
 239         rpm_msg->needs_free = true;
 240 
 241         ret = __fill_rpmh_msg(rpm_msg, state, cmd, n);
 242         if (ret) {
 243                 kfree(rpm_msg);
 244                 return ret;
 245         }
 246 
 247         return __rpmh_write(dev, state, rpm_msg);
 248 }
 249 EXPORT_SYMBOL(rpmh_write_async);
 250 
 251 /**
 252  * rpmh_write: Write a set of RPMH commands and block until response
 253  *
 254  * @rc: The RPMH handle got from rpmh_get_client
 255  * @state: Active/sleep set
 256  * @cmd: The payload data
 257  * @n: The number of elements in @cmd
 258  *
 259  * May sleep. Do not call from atomic contexts.
 260  */
 261 int rpmh_write(const struct device *dev, enum rpmh_state state,
 262                const struct tcs_cmd *cmd, u32 n)
 263 {
 264         DECLARE_COMPLETION_ONSTACK(compl);
 265         DEFINE_RPMH_MSG_ONSTACK(dev, state, &compl, rpm_msg);
 266         int ret;
 267 
 268         if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
 269                 return -EINVAL;
 270 
 271         memcpy(rpm_msg.cmd, cmd, n * sizeof(*cmd));
 272         rpm_msg.msg.num_cmds = n;
 273 
 274         ret = __rpmh_write(dev, state, &rpm_msg);
 275         if (ret)
 276                 return ret;
 277 
 278         ret = wait_for_completion_timeout(&compl, RPMH_TIMEOUT_MS);
 279         WARN_ON(!ret);
 280         return (ret > 0) ? 0 : -ETIMEDOUT;
 281 }
 282 EXPORT_SYMBOL(rpmh_write);
 283 
 284 static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
 285 {
 286         unsigned long flags;
 287 
 288         spin_lock_irqsave(&ctrlr->cache_lock, flags);
 289         list_add_tail(&req->list, &ctrlr->batch_cache);
 290         spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
 291 }
 292 
 293 static int flush_batch(struct rpmh_ctrlr *ctrlr)
 294 {
 295         struct batch_cache_req *req;
 296         const struct rpmh_request *rpm_msg;
 297         unsigned long flags;
 298         int ret = 0;
 299         int i;
 300 
 301         /* Send Sleep/Wake requests to the controller, expect no response */
 302         spin_lock_irqsave(&ctrlr->cache_lock, flags);
 303         list_for_each_entry(req, &ctrlr->batch_cache, list) {
 304                 for (i = 0; i < req->count; i++) {
 305                         rpm_msg = req->rpm_msgs + i;
 306                         ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
 307                                                        &rpm_msg->msg);
 308                         if (ret)
 309                                 break;
 310                 }
 311         }
 312         spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
 313 
 314         return ret;
 315 }
 316 
 317 static void invalidate_batch(struct rpmh_ctrlr *ctrlr)
 318 {
 319         struct batch_cache_req *req, *tmp;
 320         unsigned long flags;
 321 
 322         spin_lock_irqsave(&ctrlr->cache_lock, flags);
 323         list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
 324                 kfree(req);
 325         INIT_LIST_HEAD(&ctrlr->batch_cache);
 326         spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
 327 }
 328 
 329 /**
 330  * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the
 331  * batch to finish.
 332  *
 333  * @dev: the device making the request
 334  * @state: Active/sleep set
 335  * @cmd: The payload data
 336  * @n: The array of count of elements in each batch, 0 terminated.
 337  *
 338  * Write a request to the RSC controller without caching. If the request
 339  * state is ACTIVE, then the requests are treated as completion request
 340  * and sent to the controller immediately. The function waits until all the
 341  * commands are complete. If the request was to SLEEP or WAKE_ONLY, then the
 342  * request is sent as fire-n-forget and no ack is expected.
 343  *
 344  * May sleep. Do not call from atomic contexts for ACTIVE_ONLY requests.
 345  */
 346 int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
 347                      const struct tcs_cmd *cmd, u32 *n)
 348 {
 349         struct batch_cache_req *req;
 350         struct rpmh_request *rpm_msgs;
 351         struct completion *compls;
 352         struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
 353         unsigned long time_left;
 354         int count = 0;
 355         int ret, i;
 356         void *ptr;
 357 
 358         if (!cmd || !n)
 359                 return -EINVAL;
 360 
 361         while (n[count] > 0)
 362                 count++;
 363         if (!count)
 364                 return -EINVAL;
 365 
 366         ptr = kzalloc(sizeof(*req) +
 367                       count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)),
 368                       GFP_ATOMIC);
 369         if (!ptr)
 370                 return -ENOMEM;
 371 
 372         req = ptr;
 373         compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs);
 374 
 375         req->count = count;
 376         rpm_msgs = req->rpm_msgs;
 377 
 378         for (i = 0; i < count; i++) {
 379                 __fill_rpmh_msg(rpm_msgs + i, state, cmd, n[i]);
 380                 cmd += n[i];
 381         }
 382 
 383         if (state != RPMH_ACTIVE_ONLY_STATE) {
 384                 cache_batch(ctrlr, req);
 385                 return 0;
 386         }
 387 
 388         for (i = 0; i < count; i++) {
 389                 struct completion *compl = &compls[i];
 390 
 391                 init_completion(compl);
 392                 rpm_msgs[i].completion = compl;
 393                 ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
 394                 if (ret) {
 395                         pr_err("Error(%d) sending RPMH message addr=%#x\n",
 396                                ret, rpm_msgs[i].msg.cmds[0].addr);
 397                         break;
 398                 }
 399         }
 400 
 401         time_left = RPMH_TIMEOUT_MS;
 402         while (i--) {
 403                 time_left = wait_for_completion_timeout(&compls[i], time_left);
 404                 if (!time_left) {
 405                         /*
 406                          * Better hope they never finish because they'll signal
 407                          * the completion that we're going to free once
 408                          * we've returned from this function.
 409                          */
 410                         WARN_ON(1);
 411                         ret = -ETIMEDOUT;
 412                         goto exit;
 413                 }
 414         }
 415 
 416 exit:
 417         kfree(ptr);
 418 
 419         return ret;
 420 }
 421 EXPORT_SYMBOL(rpmh_write_batch);
 422 
 423 static int is_req_valid(struct cache_req *req)
 424 {
 425         return (req->sleep_val != UINT_MAX &&
 426                 req->wake_val != UINT_MAX &&
 427                 req->sleep_val != req->wake_val);
 428 }
 429 
 430 static int send_single(const struct device *dev, enum rpmh_state state,
 431                        u32 addr, u32 data)
 432 {
 433         DEFINE_RPMH_MSG_ONSTACK(dev, state, NULL, rpm_msg);
 434         struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
 435 
 436         /* Wake sets are always complete and sleep sets are not */
 437         rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE);
 438         rpm_msg.cmd[0].addr = addr;
 439         rpm_msg.cmd[0].data = data;
 440         rpm_msg.msg.num_cmds = 1;
 441 
 442         return rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), &rpm_msg.msg);
 443 }
 444 
 445 /**
 446  * rpmh_flush: Flushes the buffered active and sleep sets to TCS
 447  *
 448  * @dev: The device making the request
 449  *
 450  * Return: -EBUSY if the controller is busy, probably waiting on a response
 451  * to a RPMH request sent earlier.
 452  *
 453  * This function is always called from the sleep code from the last CPU
 454  * that is powering down the entire system. Since no other RPMH API would be
 455  * executing at this time, it is safe to run lockless.
 456  */
 457 int rpmh_flush(const struct device *dev)
 458 {
 459         struct cache_req *p;
 460         struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
 461         int ret;
 462 
 463         if (!ctrlr->dirty) {
 464                 pr_debug("Skipping flush, TCS has latest data.\n");
 465                 return 0;
 466         }
 467 
 468         /* First flush the cached batch requests */
 469         ret = flush_batch(ctrlr);
 470         if (ret)
 471                 return ret;
 472 
 473         /*
 474          * Nobody else should be calling this function other than system PM,
 475          * hence we can run without locks.
 476          */
 477         list_for_each_entry(p, &ctrlr->cache, list) {
 478                 if (!is_req_valid(p)) {
 479                         pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x",
 480                                  __func__, p->addr, p->sleep_val, p->wake_val);
 481                         continue;
 482                 }
 483                 ret = send_single(dev, RPMH_SLEEP_STATE, p->addr, p->sleep_val);
 484                 if (ret)
 485                         return ret;
 486                 ret = send_single(dev, RPMH_WAKE_ONLY_STATE,
 487                                   p->addr, p->wake_val);
 488                 if (ret)
 489                         return ret;
 490         }
 491 
 492         ctrlr->dirty = false;
 493 
 494         return 0;
 495 }
 496 EXPORT_SYMBOL(rpmh_flush);
 497 
 498 /**
 499  * rpmh_invalidate: Invalidate all sleep and active sets
 500  * sets.
 501  *
 502  * @dev: The device making the request
 503  *
 504  * Invalidate the sleep and active values in the TCS blocks.
 505  */
 506 int rpmh_invalidate(const struct device *dev)
 507 {
 508         struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
 509         int ret;
 510 
 511         invalidate_batch(ctrlr);
 512         ctrlr->dirty = true;
 513 
 514         do {
 515                 ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
 516         } while (ret == -EAGAIN);
 517 
 518         return ret;
 519 }
 520 EXPORT_SYMBOL(rpmh_invalidate);

/* [<][>][^][v][top][bottom][index][help] */