Line data Source code
1 : /*
2 : * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
3 : *
4 : * Based on bo.c which bears the following copyright notice,
5 : * but is dual licensed:
6 : *
7 : * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
8 : * All Rights Reserved.
9 : *
10 : * Permission is hereby granted, free of charge, to any person obtaining a
11 : * copy of this software and associated documentation files (the
12 : * "Software"), to deal in the Software without restriction, including
13 : * without limitation the rights to use, copy, modify, merge, publish,
14 : * distribute, sub license, and/or sell copies of the Software, and to
15 : * permit persons to whom the Software is furnished to do so, subject to
16 : * the following conditions:
17 : *
18 : * The above copyright notice and this permission notice (including the
19 : * next paragraph) shall be included in all copies or substantial portions
20 : * of the Software.
21 : *
22 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 : * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
25 : * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
26 : * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
27 : * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
28 : * USE OR OTHER DEALINGS IN THE SOFTWARE.
29 : *
30 : **************************************************************************/
31 : /*
32 : * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
33 : */
34 :
35 : #include <dev/pci/drm/drm_linux.h>
36 : #include <dev/pci/drm/linux_ww_mutex.h>
37 : #include <dev/pci/drm/linux_reservation.h>
38 :
39 : DEFINE_WW_CLASS(reservation_ww_class);
40 : EXPORT_SYMBOL(reservation_ww_class);
41 :
42 : struct lock_class_key reservation_seqcount_class;
43 : EXPORT_SYMBOL(reservation_seqcount_class);
44 :
45 : const char reservation_seqcount_string[] = "reservation_seqcount";
46 : EXPORT_SYMBOL(reservation_seqcount_string);
47 : /*
48 : * Reserve space to add a shared fence to a reservation_object,
49 : * must be called with obj->lock held.
50 : */
51 0 : int reservation_object_reserve_shared(struct reservation_object *obj)
52 : {
53 : struct reservation_object_list *fobj, *old;
54 : u32 max;
55 :
56 0 : old = reservation_object_get_list(obj);
57 :
58 0 : if (old && old->shared_max) {
59 0 : if (old->shared_count < old->shared_max) {
60 : /* perform an in-place update */
61 0 : kfree(obj->staged);
62 0 : obj->staged = NULL;
63 0 : return 0;
64 : } else
65 0 : max = old->shared_max * 2;
66 0 : } else
67 : max = 4;
68 :
69 : /*
70 : * resize obj->staged or allocate if it doesn't exist,
71 : * noop if already correct size
72 : */
73 0 : fobj = kmalloc(offsetof(typeof(*fobj), shared[max]), GFP_KERNEL);
74 0 : if (!fobj)
75 0 : return -ENOMEM;
76 0 : if (obj->staged != NULL)
77 0 : memcpy(fobj, obj->staged, offsetof(typeof(*fobj), shared[max]));
78 0 : kfree(obj->staged);
79 :
80 0 : obj->staged = fobj;
81 0 : fobj->shared_max = max;
82 0 : return 0;
83 0 : }
84 : EXPORT_SYMBOL(reservation_object_reserve_shared);
85 :
86 : static void
87 0 : reservation_object_add_shared_inplace(struct reservation_object *obj,
88 : struct reservation_object_list *fobj,
89 : struct fence *fence)
90 : {
91 : u32 i;
92 :
93 0 : fence_get(fence);
94 :
95 : preempt_disable();
96 0 : write_seqcount_begin(&obj->seq);
97 :
98 0 : for (i = 0; i < fobj->shared_count; ++i) {
99 : struct fence *old_fence;
100 :
101 0 : old_fence = rcu_dereference_protected(fobj->shared[i],
102 : reservation_object_held(obj));
103 :
104 0 : if (old_fence->context == fence->context) {
105 : /* memory barrier is added by write_seqcount_begin */
106 0 : RCU_INIT_POINTER(fobj->shared[i], fence);
107 0 : write_seqcount_end(&obj->seq);
108 : preempt_enable();
109 :
110 0 : fence_put(old_fence);
111 0 : return;
112 : }
113 0 : }
114 :
115 : /*
116 : * memory barrier is added by write_seqcount_begin,
117 : * fobj->shared_count is protected by this lock too
118 : */
119 0 : RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
120 0 : fobj->shared_count++;
121 :
122 0 : write_seqcount_end(&obj->seq);
123 : preempt_enable();
124 0 : }
125 :
126 : static void
127 0 : reservation_object_add_shared_replace(struct reservation_object *obj,
128 : struct reservation_object_list *old,
129 : struct reservation_object_list *fobj,
130 : struct fence *fence)
131 : {
132 : unsigned i;
133 : struct fence *old_fence = NULL;
134 :
135 0 : fence_get(fence);
136 :
137 0 : if (!old) {
138 0 : RCU_INIT_POINTER(fobj->shared[0], fence);
139 0 : fobj->shared_count = 1;
140 0 : goto done;
141 : }
142 :
143 : /*
144 : * no need to bump fence refcounts, rcu_read access
145 : * requires the use of kref_get_unless_zero, and the
146 : * references from the old struct are carried over to
147 : * the new.
148 : */
149 0 : fobj->shared_count = old->shared_count;
150 :
151 0 : for (i = 0; i < old->shared_count; ++i) {
152 : struct fence *check;
153 :
154 0 : check = rcu_dereference_protected(old->shared[i],
155 : reservation_object_held(obj));
156 :
157 0 : if (!old_fence && check->context == fence->context) {
158 : old_fence = check;
159 0 : RCU_INIT_POINTER(fobj->shared[i], fence);
160 0 : } else
161 0 : RCU_INIT_POINTER(fobj->shared[i], check);
162 : }
163 0 : if (!old_fence) {
164 0 : RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
165 0 : fobj->shared_count++;
166 0 : }
167 :
168 : done:
169 : preempt_disable();
170 0 : write_seqcount_begin(&obj->seq);
171 : /*
172 : * RCU_INIT_POINTER can be used here,
173 : * seqcount provides the necessary barriers
174 : */
175 0 : RCU_INIT_POINTER(obj->fence, fobj);
176 0 : write_seqcount_end(&obj->seq);
177 : preempt_enable();
178 :
179 0 : if (old)
180 0 : kfree_rcu(old, rcu);
181 :
182 0 : if (old_fence)
183 0 : fence_put(old_fence);
184 0 : }
185 :
186 : /*
187 : * Add a fence to a shared slot, obj->lock must be held, and
188 : * reservation_object_reserve_shared_fence has been called.
189 : */
190 0 : void reservation_object_add_shared_fence(struct reservation_object *obj,
191 : struct fence *fence)
192 : {
193 0 : struct reservation_object_list *old, *fobj = obj->staged;
194 :
195 0 : old = reservation_object_get_list(obj);
196 0 : obj->staged = NULL;
197 :
198 0 : if (!fobj) {
199 0 : BUG_ON(old->shared_count >= old->shared_max);
200 0 : reservation_object_add_shared_inplace(obj, old, fence);
201 0 : } else
202 0 : reservation_object_add_shared_replace(obj, old, fobj, fence);
203 0 : }
204 : EXPORT_SYMBOL(reservation_object_add_shared_fence);
205 :
206 0 : void reservation_object_add_excl_fence(struct reservation_object *obj,
207 : struct fence *fence)
208 : {
209 0 : struct fence *old_fence = reservation_object_get_excl(obj);
210 : struct reservation_object_list *old;
211 : u32 i = 0;
212 :
213 0 : old = reservation_object_get_list(obj);
214 0 : if (old)
215 0 : i = old->shared_count;
216 :
217 0 : if (fence)
218 0 : fence_get(fence);
219 :
220 : preempt_disable();
221 0 : write_seqcount_begin(&obj->seq);
222 : /* write_seqcount_begin provides the necessary memory barrier */
223 0 : RCU_INIT_POINTER(obj->fence_excl, fence);
224 0 : if (old)
225 0 : old->shared_count = 0;
226 0 : write_seqcount_end(&obj->seq);
227 : preempt_enable();
228 :
229 : /* inplace update, no shared fences */
230 0 : while (i--)
231 0 : fence_put(rcu_dereference_protected(old->shared[i],
232 : reservation_object_held(obj)));
233 :
234 0 : if (old_fence)
235 0 : fence_put(old_fence);
236 0 : }
237 : EXPORT_SYMBOL(reservation_object_add_excl_fence);
238 :
239 0 : int reservation_object_get_fences_rcu(struct reservation_object *obj,
240 : struct fence **pfence_excl,
241 : unsigned *pshared_count,
242 : struct fence ***pshared)
243 : {
244 : unsigned shared_count = 0;
245 : unsigned retry = 1;
246 : struct fence **shared = NULL, *fence_excl = NULL;
247 : int ret = 0;
248 :
249 0 : while (retry) {
250 : struct reservation_object_list *fobj;
251 : unsigned seq;
252 :
253 0 : seq = read_seqcount_begin(&obj->seq);
254 :
255 : rcu_read_lock();
256 :
257 0 : fobj = rcu_dereference(obj->fence);
258 0 : if (fobj) {
259 : struct fence **nshared;
260 0 : size_t sz = sizeof(*shared) * fobj->shared_max;
261 :
262 0 : nshared = kmalloc(sz, GFP_NOWAIT | __GFP_NOWARN);
263 0 : if (nshared != NULL && shared != NULL)
264 0 : memcpy(nshared, shared, sz);
265 0 : kfree(shared);
266 0 : if (!nshared) {
267 : rcu_read_unlock();
268 0 : nshared = kmalloc(sz, GFP_KERNEL);
269 0 : if (nshared != NULL && shared != NULL)
270 0 : memcpy(nshared, shared, sz);
271 0 : kfree(shared);
272 0 : if (nshared) {
273 : shared = nshared;
274 0 : continue;
275 : }
276 :
277 : ret = -ENOMEM;
278 : shared_count = 0;
279 0 : break;
280 : }
281 : shared = nshared;
282 0 : memcpy(shared, fobj->shared, sz);
283 0 : shared_count = fobj->shared_count;
284 0 : } else
285 : shared_count = 0;
286 0 : fence_excl = rcu_dereference(obj->fence_excl);
287 :
288 0 : retry = read_seqcount_retry(&obj->seq, seq);
289 0 : if (retry)
290 : goto unlock;
291 :
292 0 : if (!fence_excl || fence_get_rcu(fence_excl)) {
293 : unsigned i;
294 :
295 0 : for (i = 0; i < shared_count; ++i) {
296 0 : if (fence_get_rcu(shared[i]))
297 : continue;
298 :
299 : /* uh oh, refcount failed, abort and retry */
300 0 : while (i--)
301 0 : fence_put(shared[i]);
302 :
303 0 : if (fence_excl) {
304 0 : fence_put(fence_excl);
305 : fence_excl = NULL;
306 0 : }
307 :
308 : retry = 1;
309 0 : break;
310 : }
311 0 : } else
312 : retry = 1;
313 :
314 : unlock:
315 : rcu_read_unlock();
316 0 : }
317 0 : *pshared_count = shared_count;
318 0 : if (shared_count)
319 0 : *pshared = shared;
320 : else {
321 0 : *pshared = NULL;
322 0 : kfree(shared);
323 : }
324 0 : *pfence_excl = fence_excl;
325 :
326 0 : return ret;
327 : }
328 : #ifdef __linux__
329 : EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
330 : #endif
331 :
332 0 : long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
333 : bool wait_all, bool intr,
334 : unsigned long timeout)
335 : {
336 : struct fence *fence;
337 : unsigned seq, shared_count, i = 0;
338 : long ret = timeout;
339 :
340 0 : if (!timeout)
341 0 : return reservation_object_test_signaled_rcu(obj, wait_all);
342 :
343 : retry:
344 : fence = NULL;
345 : shared_count = 0;
346 0 : seq = read_seqcount_begin(&obj->seq);
347 : rcu_read_lock();
348 :
349 0 : if (wait_all) {
350 : struct reservation_object_list *fobj =
351 0 : rcu_dereference(obj->fence);
352 :
353 0 : if (fobj)
354 0 : shared_count = fobj->shared_count;
355 :
356 0 : if (read_seqcount_retry(&obj->seq, seq))
357 0 : goto unlock_retry;
358 :
359 0 : for (i = 0; i < shared_count; ++i) {
360 0 : struct fence *lfence = rcu_dereference(fobj->shared[i]);
361 :
362 0 : if (test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags))
363 0 : continue;
364 :
365 0 : if (!fence_get_rcu(lfence))
366 0 : goto unlock_retry;
367 :
368 0 : if (fence_is_signaled(lfence)) {
369 0 : fence_put(lfence);
370 0 : continue;
371 : }
372 :
373 : fence = lfence;
374 0 : break;
375 : }
376 0 : }
377 :
378 0 : if (!shared_count) {
379 0 : struct fence *fence_excl = rcu_dereference(obj->fence_excl);
380 :
381 0 : if (read_seqcount_retry(&obj->seq, seq))
382 0 : goto unlock_retry;
383 :
384 0 : if (fence_excl &&
385 0 : !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) {
386 0 : if (!fence_get_rcu(fence_excl))
387 0 : goto unlock_retry;
388 :
389 0 : if (fence_is_signaled(fence_excl))
390 0 : fence_put(fence_excl);
391 : else
392 : fence = fence_excl;
393 : }
394 0 : }
395 :
396 : rcu_read_unlock();
397 0 : if (fence) {
398 0 : ret = fence_wait_timeout(fence, intr, ret);
399 0 : fence_put(fence);
400 0 : if (ret > 0 && wait_all && (i + 1 < shared_count))
401 0 : goto retry;
402 : }
403 0 : return ret;
404 :
405 : unlock_retry:
406 : rcu_read_unlock();
407 0 : goto retry;
408 0 : }
409 : #ifdef __linux__
410 : EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu);
411 : #endif
412 :
413 :
414 : static inline int
415 0 : reservation_object_test_signaled_single(struct fence *passed_fence)
416 : {
417 : struct fence *fence, *lfence = passed_fence;
418 : int ret = 1;
419 :
420 0 : if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
421 0 : fence = fence_get_rcu(lfence);
422 0 : if (!fence)
423 0 : return -1;
424 :
425 0 : ret = !!fence_is_signaled(fence);
426 0 : fence_put(fence);
427 0 : }
428 0 : return ret;
429 0 : }
430 :
431 0 : bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
432 : bool test_all)
433 : {
434 : unsigned seq, shared_count;
435 0 : int ret = true;
436 :
437 : retry:
438 : shared_count = 0;
439 0 : seq = read_seqcount_begin(&obj->seq);
440 : rcu_read_lock();
441 :
442 0 : if (test_all) {
443 : unsigned i;
444 :
445 : struct reservation_object_list *fobj =
446 0 : rcu_dereference(obj->fence);
447 :
448 0 : if (fobj)
449 0 : shared_count = fobj->shared_count;
450 :
451 0 : if (read_seqcount_retry(&obj->seq, seq))
452 0 : goto unlock_retry;
453 :
454 0 : for (i = 0; i < shared_count; ++i) {
455 0 : struct fence *fence = rcu_dereference(fobj->shared[i]);
456 :
457 0 : ret = reservation_object_test_signaled_single(fence);
458 0 : if (ret < 0)
459 0 : goto unlock_retry;
460 0 : else if (!ret)
461 0 : break;
462 0 : }
463 :
464 : /*
465 : * There could be a read_seqcount_retry here, but nothing cares
466 : * about whether it's the old or newer fence pointers that are
467 : * signaled. That race could still have happened after checking
468 : * read_seqcount_retry. If you care, use ww_mutex_lock.
469 : */
470 0 : }
471 :
472 0 : if (!shared_count) {
473 0 : struct fence *fence_excl = rcu_dereference(obj->fence_excl);
474 :
475 0 : if (read_seqcount_retry(&obj->seq, seq))
476 0 : goto unlock_retry;
477 :
478 0 : if (fence_excl) {
479 0 : ret = reservation_object_test_signaled_single(
480 : fence_excl);
481 0 : if (ret < 0)
482 0 : goto unlock_retry;
483 : }
484 0 : }
485 :
486 : rcu_read_unlock();
487 0 : return ret;
488 :
489 : unlock_retry:
490 : rcu_read_unlock();
491 0 : goto retry;
492 0 : }
493 : #ifdef __linux__
494 : EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu);
495 : #endif
|