root/drivers/gpu/drm/msm/adreno/a6xx_hfi.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. a6xx_hfi_queue_read
  2. a6xx_hfi_queue_write
  3. a6xx_hfi_wait_for_ack
  4. a6xx_hfi_send_msg
  5. a6xx_hfi_send_gmu_init
  6. a6xx_hfi_get_fw_version
  7. a6xx_hfi_send_perf_table
  8. a6xx_hfi_send_bw_table
  9. a6xx_hfi_send_test
  10. a6xx_hfi_start
  11. a6xx_hfi_stop
  12. a6xx_hfi_queue_init
  13. a6xx_hfi_init

   1 // SPDX-License-Identifier: GPL-2.0
   2 /* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
   3 
   4 #include <linux/completion.h>
   5 #include <linux/circ_buf.h>
   6 #include <linux/list.h>
   7 
   8 #include "a6xx_gmu.h"
   9 #include "a6xx_gmu.xml.h"
  10 
  11 #define HFI_MSG_ID(val) [val] = #val
  12 
  13 static const char * const a6xx_hfi_msg_id[] = {
  14         HFI_MSG_ID(HFI_H2F_MSG_INIT),
  15         HFI_MSG_ID(HFI_H2F_MSG_FW_VERSION),
  16         HFI_MSG_ID(HFI_H2F_MSG_BW_TABLE),
  17         HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE),
  18         HFI_MSG_ID(HFI_H2F_MSG_TEST),
  19 };
  20 
  21 static int a6xx_hfi_queue_read(struct a6xx_hfi_queue *queue, u32 *data,
  22                 u32 dwords)
  23 {
  24         struct a6xx_hfi_queue_header *header = queue->header;
  25         u32 i, hdr, index = header->read_index;
  26 
  27         if (header->read_index == header->write_index) {
  28                 header->rx_request = 1;
  29                 return 0;
  30         }
  31 
  32         hdr = queue->data[index];
  33 
  34         /*
  35          * If we are to assume that the GMU firmware is in fact a rational actor
  36          * and is programmed to not send us a larger response than we expect
  37          * then we can also assume that if the header size is unexpectedly large
  38          * that it is due to memory corruption and/or hardware failure. In this
  39          * case the only reasonable course of action is to BUG() to help harden
  40          * the failure.
  41          */
  42 
  43         BUG_ON(HFI_HEADER_SIZE(hdr) > dwords);
  44 
  45         for (i = 0; i < HFI_HEADER_SIZE(hdr); i++) {
  46                 data[i] = queue->data[index];
  47                 index = (index + 1) % header->size;
  48         }
  49 
  50         header->read_index = index;
  51         return HFI_HEADER_SIZE(hdr);
  52 }
  53 
  54 static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu,
  55         struct a6xx_hfi_queue *queue, u32 *data, u32 dwords)
  56 {
  57         struct a6xx_hfi_queue_header *header = queue->header;
  58         u32 i, space, index = header->write_index;
  59 
  60         spin_lock(&queue->lock);
  61 
  62         space = CIRC_SPACE(header->write_index, header->read_index,
  63                 header->size);
  64         if (space < dwords) {
  65                 header->dropped++;
  66                 spin_unlock(&queue->lock);
  67                 return -ENOSPC;
  68         }
  69 
  70         for (i = 0; i < dwords; i++) {
  71                 queue->data[index] = data[i];
  72                 index = (index + 1) % header->size;
  73         }
  74 
  75         header->write_index = index;
  76         spin_unlock(&queue->lock);
  77 
  78         gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01);
  79         return 0;
  80 }
  81 
  82 static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
  83                 u32 *payload, u32 payload_size)
  84 {
  85         struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
  86         u32 val;
  87         int ret;
  88 
  89         /* Wait for a response */
  90         ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
  91                 val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000);
  92 
  93         if (ret) {
  94                 DRM_DEV_ERROR(gmu->dev,
  95                         "Message %s id %d timed out waiting for response\n",
  96                         a6xx_hfi_msg_id[id], seqnum);
  97                 return -ETIMEDOUT;
  98         }
  99 
 100         /* Clear the interrupt */
 101         gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR,
 102                 A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ);
 103 
 104         for (;;) {
 105                 struct a6xx_hfi_msg_response resp;
 106 
 107                 /* Get the next packet */
 108                 ret = a6xx_hfi_queue_read(queue, (u32 *) &resp,
 109                         sizeof(resp) >> 2);
 110 
 111                 /* If the queue is empty our response never made it */
 112                 if (!ret) {
 113                         DRM_DEV_ERROR(gmu->dev,
 114                                 "The HFI response queue is unexpectedly empty\n");
 115 
 116                         return -ENOENT;
 117                 }
 118 
 119                 if (HFI_HEADER_ID(resp.header) == HFI_F2H_MSG_ERROR) {
 120                         struct a6xx_hfi_msg_error *error =
 121                                 (struct a6xx_hfi_msg_error *) &resp;
 122 
 123                         DRM_DEV_ERROR(gmu->dev, "GMU firmware error %d\n",
 124                                 error->code);
 125                         continue;
 126                 }
 127 
 128                 if (seqnum != HFI_HEADER_SEQNUM(resp.ret_header)) {
 129                         DRM_DEV_ERROR(gmu->dev,
 130                                 "Unexpected message id %d on the response queue\n",
 131                                 HFI_HEADER_SEQNUM(resp.ret_header));
 132                         continue;
 133                 }
 134 
 135                 if (resp.error) {
 136                         DRM_DEV_ERROR(gmu->dev,
 137                                 "Message %s id %d returned error %d\n",
 138                                 a6xx_hfi_msg_id[id], seqnum, resp.error);
 139                         return -EINVAL;
 140                 }
 141 
 142                 /* All is well, copy over the buffer */
 143                 if (payload && payload_size)
 144                         memcpy(payload, resp.payload,
 145                                 min_t(u32, payload_size, sizeof(resp.payload)));
 146 
 147                 return 0;
 148         }
 149 }
 150 
 151 static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id,
 152                 void *data, u32 size, u32 *payload, u32 payload_size)
 153 {
 154         struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE];
 155         int ret, dwords = size >> 2;
 156         u32 seqnum;
 157 
 158         seqnum = atomic_inc_return(&queue->seqnum) % 0xfff;
 159 
 160         /* First dword of the message is the message header - fill it in */
 161         *((u32 *) data) = (seqnum << 20) | (HFI_MSG_CMD << 16) |
 162                 (dwords << 8) | id;
 163 
 164         ret = a6xx_hfi_queue_write(gmu, queue, data, dwords);
 165         if (ret) {
 166                 DRM_DEV_ERROR(gmu->dev, "Unable to send message %s id %d\n",
 167                         a6xx_hfi_msg_id[id], seqnum);
 168                 return ret;
 169         }
 170 
 171         return a6xx_hfi_wait_for_ack(gmu, id, seqnum, payload, payload_size);
 172 }
 173 
 174 static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state)
 175 {
 176         struct a6xx_hfi_msg_gmu_init_cmd msg = { 0 };
 177 
 178         msg.dbg_buffer_addr = (u32) gmu->debug->iova;
 179         msg.dbg_buffer_size = (u32) gmu->debug->size;
 180         msg.boot_state = boot_state;
 181 
 182         return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_INIT, &msg, sizeof(msg),
 183                 NULL, 0);
 184 }
 185 
 186 static int a6xx_hfi_get_fw_version(struct a6xx_gmu *gmu, u32 *version)
 187 {
 188         struct a6xx_hfi_msg_fw_version msg = { 0 };
 189 
 190         /* Currently supporting version 1.1 */
 191         msg.supported_version = (1 << 28) | (1 << 16);
 192 
 193         return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_FW_VERSION, &msg, sizeof(msg),
 194                 version, sizeof(*version));
 195 }
 196 
 197 static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
 198 {
 199         struct a6xx_hfi_msg_perf_table msg = { 0 };
 200         int i;
 201 
 202         msg.num_gpu_levels = gmu->nr_gpu_freqs;
 203         msg.num_gmu_levels = gmu->nr_gmu_freqs;
 204 
 205         for (i = 0; i < gmu->nr_gpu_freqs; i++) {
 206                 msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
 207                 msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
 208         }
 209 
 210         for (i = 0; i < gmu->nr_gmu_freqs; i++) {
 211                 msg.cx_votes[i].vote = gmu->cx_arc_votes[i];
 212                 msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
 213         }
 214 
 215         return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg),
 216                 NULL, 0);
 217 }
 218 
 219 static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
 220 {
 221         struct a6xx_hfi_msg_bw_table msg = { 0 };
 222 
 223         /*
 224          * The sdm845 GMU doesn't do bus frequency scaling on its own but it
 225          * does need at least one entry in the list because it might be accessed
 226          * when the GMU is shutting down. Send a single "off" entry.
 227          */
 228 
 229         msg.bw_level_num = 1;
 230 
 231         msg.ddr_cmds_num = 3;
 232         msg.ddr_wait_bitmask = 0x07;
 233 
 234         msg.ddr_cmds_addrs[0] = 0x50000;
 235         msg.ddr_cmds_addrs[1] = 0x5005c;
 236         msg.ddr_cmds_addrs[2] = 0x5000c;
 237 
 238         msg.ddr_cmds_data[0][0] =  0x40000000;
 239         msg.ddr_cmds_data[0][1] =  0x40000000;
 240         msg.ddr_cmds_data[0][2] =  0x40000000;
 241 
 242         /*
 243          * These are the CX (CNOC) votes.  This is used but the values for the
 244          * sdm845 GMU are known and fixed so we can hard code them.
 245          */
 246 
 247         msg.cnoc_cmds_num = 3;
 248         msg.cnoc_wait_bitmask = 0x05;
 249 
 250         msg.cnoc_cmds_addrs[0] = 0x50034;
 251         msg.cnoc_cmds_addrs[1] = 0x5007c;
 252         msg.cnoc_cmds_addrs[2] = 0x5004c;
 253 
 254         msg.cnoc_cmds_data[0][0] =  0x40000000;
 255         msg.cnoc_cmds_data[0][1] =  0x00000000;
 256         msg.cnoc_cmds_data[0][2] =  0x40000000;
 257 
 258         msg.cnoc_cmds_data[1][0] =  0x60000001;
 259         msg.cnoc_cmds_data[1][1] =  0x20000001;
 260         msg.cnoc_cmds_data[1][2] =  0x60000001;
 261 
 262         return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg),
 263                 NULL, 0);
 264 }
 265 
 266 static int a6xx_hfi_send_test(struct a6xx_gmu *gmu)
 267 {
 268         struct a6xx_hfi_msg_test msg = { 0 };
 269 
 270         return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_TEST, &msg, sizeof(msg),
 271                 NULL, 0);
 272 }
 273 
 274 int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
 275 {
 276         int ret;
 277 
 278         ret = a6xx_hfi_send_gmu_init(gmu, boot_state);
 279         if (ret)
 280                 return ret;
 281 
 282         ret = a6xx_hfi_get_fw_version(gmu, NULL);
 283         if (ret)
 284                 return ret;
 285 
 286         /*
 287          * We have to get exchange version numbers per the sequence but at this
 288          * point th kernel driver doesn't need to know the exact version of
 289          * the GMU firmware
 290          */
 291 
 292         ret = a6xx_hfi_send_perf_table(gmu);
 293         if (ret)
 294                 return ret;
 295 
 296         ret = a6xx_hfi_send_bw_table(gmu);
 297         if (ret)
 298                 return ret;
 299 
 300         /*
 301          * Let the GMU know that there won't be any more HFI messages until next
 302          * boot
 303          */
 304         a6xx_hfi_send_test(gmu);
 305 
 306         return 0;
 307 }
 308 
 309 void a6xx_hfi_stop(struct a6xx_gmu *gmu)
 310 {
 311         int i;
 312 
 313         for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) {
 314                 struct a6xx_hfi_queue *queue = &gmu->queues[i];
 315 
 316                 if (!queue->header)
 317                         continue;
 318 
 319                 if (queue->header->read_index != queue->header->write_index)
 320                         DRM_DEV_ERROR(gmu->dev, "HFI queue %d is not empty\n", i);
 321 
 322                 queue->header->read_index = 0;
 323                 queue->header->write_index = 0;
 324         }
 325 }
 326 
 327 static void a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue,
 328                 struct a6xx_hfi_queue_header *header, void *virt, u64 iova,
 329                 u32 id)
 330 {
 331         spin_lock_init(&queue->lock);
 332         queue->header = header;
 333         queue->data = virt;
 334         atomic_set(&queue->seqnum, 0);
 335 
 336         /* Set up the shared memory header */
 337         header->iova = iova;
 338         header->type =  10 << 8 | id;
 339         header->status = 1;
 340         header->size = SZ_4K >> 2;
 341         header->msg_size = 0;
 342         header->dropped = 0;
 343         header->rx_watermark = 1;
 344         header->tx_watermark = 1;
 345         header->rx_request = 1;
 346         header->tx_request = 0;
 347         header->read_index = 0;
 348         header->write_index = 0;
 349 }
 350 
 351 void a6xx_hfi_init(struct a6xx_gmu *gmu)
 352 {
 353         struct a6xx_gmu_bo *hfi = gmu->hfi;
 354         struct a6xx_hfi_queue_table_header *table = hfi->virt;
 355         struct a6xx_hfi_queue_header *headers = hfi->virt + sizeof(*table);
 356         u64 offset;
 357         int table_size;
 358 
 359         /*
 360          * The table size is the size of the table header plus all of the queue
 361          * headers
 362          */
 363         table_size = sizeof(*table);
 364         table_size += (ARRAY_SIZE(gmu->queues) *
 365                 sizeof(struct a6xx_hfi_queue_header));
 366 
 367         table->version = 0;
 368         table->size = table_size;
 369         /* First queue header is located immediately after the table header */
 370         table->qhdr0_offset = sizeof(*table) >> 2;
 371         table->qhdr_size = sizeof(struct a6xx_hfi_queue_header) >> 2;
 372         table->num_queues = ARRAY_SIZE(gmu->queues);
 373         table->active_queues = ARRAY_SIZE(gmu->queues);
 374 
 375         /* Command queue */
 376         offset = SZ_4K;
 377         a6xx_hfi_queue_init(&gmu->queues[0], &headers[0], hfi->virt + offset,
 378                 hfi->iova + offset, 0);
 379 
 380         /* GMU response queue */
 381         offset += SZ_4K;
 382         a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset,
 383                 hfi->iova + offset, 4);
 384 }

/* [<][>][^][v][top][bottom][index][help] */