This source file includes following definitions.
- vmw_marker_queue_init
- vmw_marker_queue_takedown
- vmw_marker_push
- vmw_marker_pull
- vmw_fifo_lag
- vmw_lag_lt
- vmw_wait_lag
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29 #include "vmwgfx_drv.h"
30
31 struct vmw_marker {
32 struct list_head head;
33 uint32_t seqno;
34 u64 submitted;
35 };
36
37 void vmw_marker_queue_init(struct vmw_marker_queue *queue)
38 {
39 INIT_LIST_HEAD(&queue->head);
40 queue->lag = 0;
41 queue->lag_time = ktime_get_raw_ns();
42 spin_lock_init(&queue->lock);
43 }
44
45 void vmw_marker_queue_takedown(struct vmw_marker_queue *queue)
46 {
47 struct vmw_marker *marker, *next;
48
49 spin_lock(&queue->lock);
50 list_for_each_entry_safe(marker, next, &queue->head, head) {
51 kfree(marker);
52 }
53 spin_unlock(&queue->lock);
54 }
55
56 int vmw_marker_push(struct vmw_marker_queue *queue,
57 uint32_t seqno)
58 {
59 struct vmw_marker *marker = kmalloc(sizeof(*marker), GFP_KERNEL);
60
61 if (unlikely(!marker))
62 return -ENOMEM;
63
64 marker->seqno = seqno;
65 marker->submitted = ktime_get_raw_ns();
66 spin_lock(&queue->lock);
67 list_add_tail(&marker->head, &queue->head);
68 spin_unlock(&queue->lock);
69
70 return 0;
71 }
72
73 int vmw_marker_pull(struct vmw_marker_queue *queue,
74 uint32_t signaled_seqno)
75 {
76 struct vmw_marker *marker, *next;
77 bool updated = false;
78 u64 now;
79
80 spin_lock(&queue->lock);
81 now = ktime_get_raw_ns();
82
83 if (list_empty(&queue->head)) {
84 queue->lag = 0;
85 queue->lag_time = now;
86 updated = true;
87 goto out_unlock;
88 }
89
90 list_for_each_entry_safe(marker, next, &queue->head, head) {
91 if (signaled_seqno - marker->seqno > (1 << 30))
92 continue;
93
94 queue->lag = now - marker->submitted;
95 queue->lag_time = now;
96 updated = true;
97 list_del(&marker->head);
98 kfree(marker);
99 }
100
101 out_unlock:
102 spin_unlock(&queue->lock);
103
104 return (updated) ? 0 : -EBUSY;
105 }
106
107 static u64 vmw_fifo_lag(struct vmw_marker_queue *queue)
108 {
109 u64 now;
110
111 spin_lock(&queue->lock);
112 now = ktime_get_raw_ns();
113 queue->lag += now - queue->lag_time;
114 queue->lag_time = now;
115 spin_unlock(&queue->lock);
116 return queue->lag;
117 }
118
119
120 static bool vmw_lag_lt(struct vmw_marker_queue *queue,
121 uint32_t us)
122 {
123 u64 cond = (u64) us * NSEC_PER_USEC;
124
125 return vmw_fifo_lag(queue) <= cond;
126 }
127
128 int vmw_wait_lag(struct vmw_private *dev_priv,
129 struct vmw_marker_queue *queue, uint32_t us)
130 {
131 struct vmw_marker *marker;
132 uint32_t seqno;
133 int ret;
134
135 while (!vmw_lag_lt(queue, us)) {
136 spin_lock(&queue->lock);
137 if (list_empty(&queue->head))
138 seqno = atomic_read(&dev_priv->marker_seq);
139 else {
140 marker = list_first_entry(&queue->head,
141 struct vmw_marker, head);
142 seqno = marker->seqno;
143 }
144 spin_unlock(&queue->lock);
145
146 ret = vmw_wait_seqno(dev_priv, false, seqno, true,
147 3*HZ);
148
149 if (unlikely(ret != 0))
150 return ret;
151
152 (void) vmw_marker_pull(queue, seqno);
153 }
154 return 0;
155 }