root/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. vmw_marker_queue_init
  2. vmw_marker_queue_takedown
  3. vmw_marker_push
  4. vmw_marker_pull
  5. vmw_fifo_lag
  6. vmw_lag_lt
  7. vmw_wait_lag

   1 // SPDX-License-Identifier: GPL-2.0 OR MIT
   2 /**************************************************************************
   3  *
   4  * Copyright 2010 VMware, Inc., Palo Alto, CA., USA
   5  *
   6  * Permission is hereby granted, free of charge, to any person obtaining a
   7  * copy of this software and associated documentation files (the
   8  * "Software"), to deal in the Software without restriction, including
   9  * without limitation the rights to use, copy, modify, merge, publish,
  10  * distribute, sub license, and/or sell copies of the Software, and to
  11  * permit persons to whom the Software is furnished to do so, subject to
  12  * the following conditions:
  13  *
  14  * The above copyright notice and this permission notice (including the
  15  * next paragraph) shall be included in all copies or substantial portions
  16  * of the Software.
  17  *
  18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25  *
  26  **************************************************************************/
  27 
  28 
  29 #include "vmwgfx_drv.h"
  30 
  31 struct vmw_marker {
  32         struct list_head head;
  33         uint32_t seqno;
  34         u64 submitted;
  35 };
  36 
  37 void vmw_marker_queue_init(struct vmw_marker_queue *queue)
  38 {
  39         INIT_LIST_HEAD(&queue->head);
  40         queue->lag = 0;
  41         queue->lag_time = ktime_get_raw_ns();
  42         spin_lock_init(&queue->lock);
  43 }
  44 
  45 void vmw_marker_queue_takedown(struct vmw_marker_queue *queue)
  46 {
  47         struct vmw_marker *marker, *next;
  48 
  49         spin_lock(&queue->lock);
  50         list_for_each_entry_safe(marker, next, &queue->head, head) {
  51                 kfree(marker);
  52         }
  53         spin_unlock(&queue->lock);
  54 }
  55 
  56 int vmw_marker_push(struct vmw_marker_queue *queue,
  57                    uint32_t seqno)
  58 {
  59         struct vmw_marker *marker = kmalloc(sizeof(*marker), GFP_KERNEL);
  60 
  61         if (unlikely(!marker))
  62                 return -ENOMEM;
  63 
  64         marker->seqno = seqno;
  65         marker->submitted = ktime_get_raw_ns();
  66         spin_lock(&queue->lock);
  67         list_add_tail(&marker->head, &queue->head);
  68         spin_unlock(&queue->lock);
  69 
  70         return 0;
  71 }
  72 
  73 int vmw_marker_pull(struct vmw_marker_queue *queue,
  74                    uint32_t signaled_seqno)
  75 {
  76         struct vmw_marker *marker, *next;
  77         bool updated = false;
  78         u64 now;
  79 
  80         spin_lock(&queue->lock);
  81         now = ktime_get_raw_ns();
  82 
  83         if (list_empty(&queue->head)) {
  84                 queue->lag = 0;
  85                 queue->lag_time = now;
  86                 updated = true;
  87                 goto out_unlock;
  88         }
  89 
  90         list_for_each_entry_safe(marker, next, &queue->head, head) {
  91                 if (signaled_seqno - marker->seqno > (1 << 30))
  92                         continue;
  93 
  94                 queue->lag = now - marker->submitted;
  95                 queue->lag_time = now;
  96                 updated = true;
  97                 list_del(&marker->head);
  98                 kfree(marker);
  99         }
 100 
 101 out_unlock:
 102         spin_unlock(&queue->lock);
 103 
 104         return (updated) ? 0 : -EBUSY;
 105 }
 106 
 107 static u64 vmw_fifo_lag(struct vmw_marker_queue *queue)
 108 {
 109         u64 now;
 110 
 111         spin_lock(&queue->lock);
 112         now = ktime_get_raw_ns();
 113         queue->lag += now - queue->lag_time;
 114         queue->lag_time = now;
 115         spin_unlock(&queue->lock);
 116         return queue->lag;
 117 }
 118 
 119 
 120 static bool vmw_lag_lt(struct vmw_marker_queue *queue,
 121                        uint32_t us)
 122 {
 123         u64 cond = (u64) us * NSEC_PER_USEC;
 124 
 125         return vmw_fifo_lag(queue) <= cond;
 126 }
 127 
 128 int vmw_wait_lag(struct vmw_private *dev_priv,
 129                  struct vmw_marker_queue *queue, uint32_t us)
 130 {
 131         struct vmw_marker *marker;
 132         uint32_t seqno;
 133         int ret;
 134 
 135         while (!vmw_lag_lt(queue, us)) {
 136                 spin_lock(&queue->lock);
 137                 if (list_empty(&queue->head))
 138                         seqno = atomic_read(&dev_priv->marker_seq);
 139                 else {
 140                         marker = list_first_entry(&queue->head,
 141                                                  struct vmw_marker, head);
 142                         seqno = marker->seqno;
 143                 }
 144                 spin_unlock(&queue->lock);
 145 
 146                 ret = vmw_wait_seqno(dev_priv, false, seqno, true,
 147                                         3*HZ);
 148 
 149                 if (unlikely(ret != 0))
 150                         return ret;
 151 
 152                 (void) vmw_marker_pull(queue, seqno);
 153         }
 154         return 0;
 155 }

/* [<][>][^][v][top][bottom][index][help] */