root/drivers/soc/mediatek/mtk-cmdq-helper.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. cmdq_client_timeout
  2. cmdq_mbox_create
  3. cmdq_mbox_destroy
  4. cmdq_pkt_create
  5. cmdq_pkt_destroy
  6. cmdq_pkt_append_command
  7. cmdq_pkt_write
  8. cmdq_pkt_write_mask
  9. cmdq_pkt_wfe
  10. cmdq_pkt_clear_event
  11. cmdq_pkt_finalize
  12. cmdq_pkt_flush_async_cb
  13. cmdq_pkt_flush_async
  14. cmdq_pkt_flush_cb
  15. cmdq_pkt_flush

   1 // SPDX-License-Identifier: GPL-2.0
   2 //
   3 // Copyright (c) 2018 MediaTek Inc.
   4 
   5 #include <linux/completion.h>
   6 #include <linux/errno.h>
   7 #include <linux/dma-mapping.h>
   8 #include <linux/module.h>
   9 #include <linux/mailbox_controller.h>
  10 #include <linux/soc/mediatek/mtk-cmdq.h>
  11 
  12 #define CMDQ_ARG_A_WRITE_MASK   0xffff
  13 #define CMDQ_WRITE_ENABLE_MASK  BIT(0)
  14 #define CMDQ_EOC_IRQ_EN         BIT(0)
  15 #define CMDQ_EOC_CMD            ((u64)((CMDQ_CODE_EOC << CMDQ_OP_CODE_SHIFT)) \
  16                                 << 32 | CMDQ_EOC_IRQ_EN)
  17 
  18 static void cmdq_client_timeout(struct timer_list *t)
  19 {
  20         struct cmdq_client *client = from_timer(client, t, timer);
  21 
  22         dev_err(client->client.dev, "cmdq timeout!\n");
  23 }
  24 
  25 struct cmdq_client *cmdq_mbox_create(struct device *dev, int index, u32 timeout)
  26 {
  27         struct cmdq_client *client;
  28 
  29         client = kzalloc(sizeof(*client), GFP_KERNEL);
  30         if (!client)
  31                 return (struct cmdq_client *)-ENOMEM;
  32 
  33         client->timeout_ms = timeout;
  34         if (timeout != CMDQ_NO_TIMEOUT) {
  35                 spin_lock_init(&client->lock);
  36                 timer_setup(&client->timer, cmdq_client_timeout, 0);
  37         }
  38         client->pkt_cnt = 0;
  39         client->client.dev = dev;
  40         client->client.tx_block = false;
  41         client->client.knows_txdone = true;
  42         client->chan = mbox_request_channel(&client->client, index);
  43 
  44         if (IS_ERR(client->chan)) {
  45                 long err;
  46 
  47                 dev_err(dev, "failed to request channel\n");
  48                 err = PTR_ERR(client->chan);
  49                 kfree(client);
  50 
  51                 return ERR_PTR(err);
  52         }
  53 
  54         return client;
  55 }
  56 EXPORT_SYMBOL(cmdq_mbox_create);
  57 
  58 void cmdq_mbox_destroy(struct cmdq_client *client)
  59 {
  60         if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
  61                 spin_lock(&client->lock);
  62                 del_timer_sync(&client->timer);
  63                 spin_unlock(&client->lock);
  64         }
  65         mbox_free_channel(client->chan);
  66         kfree(client);
  67 }
  68 EXPORT_SYMBOL(cmdq_mbox_destroy);
  69 
  70 struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size)
  71 {
  72         struct cmdq_pkt *pkt;
  73         struct device *dev;
  74         dma_addr_t dma_addr;
  75 
  76         pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
  77         if (!pkt)
  78                 return ERR_PTR(-ENOMEM);
  79         pkt->va_base = kzalloc(size, GFP_KERNEL);
  80         if (!pkt->va_base) {
  81                 kfree(pkt);
  82                 return ERR_PTR(-ENOMEM);
  83         }
  84         pkt->buf_size = size;
  85         pkt->cl = (void *)client;
  86 
  87         dev = client->chan->mbox->dev;
  88         dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
  89                                   DMA_TO_DEVICE);
  90         if (dma_mapping_error(dev, dma_addr)) {
  91                 dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
  92                 kfree(pkt->va_base);
  93                 kfree(pkt);
  94                 return ERR_PTR(-ENOMEM);
  95         }
  96 
  97         pkt->pa_base = dma_addr;
  98 
  99         return pkt;
 100 }
 101 EXPORT_SYMBOL(cmdq_pkt_create);
 102 
 103 void cmdq_pkt_destroy(struct cmdq_pkt *pkt)
 104 {
 105         struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
 106 
 107         dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
 108                          DMA_TO_DEVICE);
 109         kfree(pkt->va_base);
 110         kfree(pkt);
 111 }
 112 EXPORT_SYMBOL(cmdq_pkt_destroy);
 113 
 114 static int cmdq_pkt_append_command(struct cmdq_pkt *pkt, enum cmdq_code code,
 115                                    u32 arg_a, u32 arg_b)
 116 {
 117         u64 *cmd_ptr;
 118 
 119         if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) {
 120                 /*
 121                  * In the case of allocated buffer size (pkt->buf_size) is used
 122                  * up, the real required size (pkt->cmdq_buf_size) is still
 123                  * increased, so that the user knows how much memory should be
 124                  * ultimately allocated after appending all commands and
 125                  * flushing the command packet. Therefor, the user can call
 126                  * cmdq_pkt_create() again with the real required buffer size.
 127                  */
 128                 pkt->cmd_buf_size += CMDQ_INST_SIZE;
 129                 WARN_ONCE(1, "%s: buffer size %u is too small !\n",
 130                         __func__, (u32)pkt->buf_size);
 131                 return -ENOMEM;
 132         }
 133         cmd_ptr = pkt->va_base + pkt->cmd_buf_size;
 134         (*cmd_ptr) = (u64)((code << CMDQ_OP_CODE_SHIFT) | arg_a) << 32 | arg_b;
 135         pkt->cmd_buf_size += CMDQ_INST_SIZE;
 136 
 137         return 0;
 138 }
 139 
 140 int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
 141 {
 142         u32 arg_a = (offset & CMDQ_ARG_A_WRITE_MASK) |
 143                     (subsys << CMDQ_SUBSYS_SHIFT);
 144 
 145         return cmdq_pkt_append_command(pkt, CMDQ_CODE_WRITE, arg_a, value);
 146 }
 147 EXPORT_SYMBOL(cmdq_pkt_write);
 148 
 149 int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
 150                         u16 offset, u32 value, u32 mask)
 151 {
 152         u32 offset_mask = offset;
 153         int err = 0;
 154 
 155         if (mask != 0xffffffff) {
 156                 err = cmdq_pkt_append_command(pkt, CMDQ_CODE_MASK, 0, ~mask);
 157                 offset_mask |= CMDQ_WRITE_ENABLE_MASK;
 158         }
 159         err |= cmdq_pkt_write(pkt, subsys, offset_mask, value);
 160 
 161         return err;
 162 }
 163 EXPORT_SYMBOL(cmdq_pkt_write_mask);
 164 
 165 int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event)
 166 {
 167         u32 arg_b;
 168 
 169         if (event >= CMDQ_MAX_EVENT)
 170                 return -EINVAL;
 171 
 172         /*
 173          * WFE arg_b
 174          * bit 0-11: wait value
 175          * bit 15: 1 - wait, 0 - no wait
 176          * bit 16-27: update value
 177          * bit 31: 1 - update, 0 - no update
 178          */
 179         arg_b = CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE;
 180 
 181         return cmdq_pkt_append_command(pkt, CMDQ_CODE_WFE, event, arg_b);
 182 }
 183 EXPORT_SYMBOL(cmdq_pkt_wfe);
 184 
 185 int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
 186 {
 187         if (event >= CMDQ_MAX_EVENT)
 188                 return -EINVAL;
 189 
 190         return cmdq_pkt_append_command(pkt, CMDQ_CODE_WFE, event,
 191                                        CMDQ_WFE_UPDATE);
 192 }
 193 EXPORT_SYMBOL(cmdq_pkt_clear_event);
 194 
 195 static int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
 196 {
 197         int err;
 198 
 199         /* insert EOC and generate IRQ for each command iteration */
 200         err = cmdq_pkt_append_command(pkt, CMDQ_CODE_EOC, 0, CMDQ_EOC_IRQ_EN);
 201 
 202         /* JUMP to end */
 203         err |= cmdq_pkt_append_command(pkt, CMDQ_CODE_JUMP, 0, CMDQ_JUMP_PASS);
 204 
 205         return err;
 206 }
 207 
 208 static void cmdq_pkt_flush_async_cb(struct cmdq_cb_data data)
 209 {
 210         struct cmdq_pkt *pkt = (struct cmdq_pkt *)data.data;
 211         struct cmdq_task_cb *cb = &pkt->cb;
 212         struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
 213 
 214         if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
 215                 unsigned long flags = 0;
 216 
 217                 spin_lock_irqsave(&client->lock, flags);
 218                 if (--client->pkt_cnt == 0)
 219                         del_timer(&client->timer);
 220                 else
 221                         mod_timer(&client->timer, jiffies +
 222                                   msecs_to_jiffies(client->timeout_ms));
 223                 spin_unlock_irqrestore(&client->lock, flags);
 224         }
 225 
 226         dma_sync_single_for_cpu(client->chan->mbox->dev, pkt->pa_base,
 227                                 pkt->cmd_buf_size, DMA_TO_DEVICE);
 228         if (cb->cb) {
 229                 data.data = cb->data;
 230                 cb->cb(data);
 231         }
 232 }
 233 
 234 int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
 235                          void *data)
 236 {
 237         int err;
 238         unsigned long flags = 0;
 239         struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
 240 
 241         err = cmdq_pkt_finalize(pkt);
 242         if (err < 0)
 243                 return err;
 244 
 245         pkt->cb.cb = cb;
 246         pkt->cb.data = data;
 247         pkt->async_cb.cb = cmdq_pkt_flush_async_cb;
 248         pkt->async_cb.data = pkt;
 249 
 250         dma_sync_single_for_device(client->chan->mbox->dev, pkt->pa_base,
 251                                    pkt->cmd_buf_size, DMA_TO_DEVICE);
 252 
 253         if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
 254                 spin_lock_irqsave(&client->lock, flags);
 255                 if (client->pkt_cnt++ == 0)
 256                         mod_timer(&client->timer, jiffies +
 257                                   msecs_to_jiffies(client->timeout_ms));
 258                 spin_unlock_irqrestore(&client->lock, flags);
 259         }
 260 
 261         err = mbox_send_message(client->chan, pkt);
 262         if (err < 0)
 263                 return err;
 264         /* We can send next packet immediately, so just call txdone. */
 265         mbox_client_txdone(client->chan, 0);
 266 
 267         return 0;
 268 }
 269 EXPORT_SYMBOL(cmdq_pkt_flush_async);
 270 
 271 struct cmdq_flush_completion {
 272         struct completion cmplt;
 273         bool err;
 274 };
 275 
 276 static void cmdq_pkt_flush_cb(struct cmdq_cb_data data)
 277 {
 278         struct cmdq_flush_completion *cmplt;
 279 
 280         cmplt = (struct cmdq_flush_completion *)data.data;
 281         if (data.sta != CMDQ_CB_NORMAL)
 282                 cmplt->err = true;
 283         else
 284                 cmplt->err = false;
 285         complete(&cmplt->cmplt);
 286 }
 287 
 288 int cmdq_pkt_flush(struct cmdq_pkt *pkt)
 289 {
 290         struct cmdq_flush_completion cmplt;
 291         int err;
 292 
 293         init_completion(&cmplt.cmplt);
 294         err = cmdq_pkt_flush_async(pkt, cmdq_pkt_flush_cb, &cmplt);
 295         if (err < 0)
 296                 return err;
 297         wait_for_completion(&cmplt.cmplt);
 298 
 299         return cmplt.err ? -EFAULT : 0;
 300 }
 301 EXPORT_SYMBOL(cmdq_pkt_flush);
 302 
 303 MODULE_LICENSE("GPL v2");

/* [<][>][^][v][top][bottom][index][help] */