Line data Source code
1 : /*
2 : * Copyright (C) 2014 Red Hat
3 : * Author: Rob Clark <robdclark@gmail.com>
4 : *
5 : * Permission is hereby granted, free of charge, to any person obtaining a
6 : * copy of this software and associated documentation files (the "Software"),
7 : * to deal in the Software without restriction, including without limitation
8 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 : * and/or sell copies of the Software, and to permit persons to whom the
10 : * Software is furnished to do so, subject to the following conditions:
11 : *
12 : * The above copyright notice and this permission notice shall be included in
13 : * all copies or substantial portions of the Software.
14 : *
15 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 : * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 : * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 : * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 : * OTHER DEALINGS IN THE SOFTWARE.
22 : */
23 :
24 : #include <dev/pci/drm/drmP.h>
25 : #include <dev/pci/drm/drm_crtc.h>
26 : #include <dev/pci/drm/drm_modeset_lock.h>
27 :
28 : /**
29 : * DOC: kms locking
30 : *
31 : * As KMS moves toward more fine grained locking, and atomic ioctl where
32 : * userspace can indirectly control locking order, it becomes necessary
33 : * to use ww_mutex and acquire-contexts to avoid deadlocks. But because
34 : * the locking is more distributed around the driver code, we want a bit
35 : * of extra utility/tracking out of our acquire-ctx. This is provided
36 : * by drm_modeset_lock / drm_modeset_acquire_ctx.
37 : *
38 : * For basic principles of ww_mutex, see: Documentation/locking/ww-mutex-design.txt
39 : *
40 : * The basic usage pattern is to:
41 : *
42 : * drm_modeset_acquire_init(&ctx)
43 : * retry:
44 : * foreach (lock in random_ordered_set_of_locks) {
45 : * ret = drm_modeset_lock(lock, &ctx)
46 : * if (ret == -EDEADLK) {
47 : * drm_modeset_backoff(&ctx);
48 : * goto retry;
49 : * }
50 : * }
51 : *
52 : * ... do stuff ...
53 : *
54 : * drm_modeset_drop_locks(&ctx);
55 : * drm_modeset_acquire_fini(&ctx);
56 : */
57 :
58 : /**
59 : * drm_modeset_lock_all - take all modeset locks
60 : * @dev: drm device
61 : *
62 : * This function takes all modeset locks, suitable where a more fine-grained
63 : * scheme isn't (yet) implemented. Locks must be dropped with
64 : * drm_modeset_unlock_all.
65 : */
66 0 : void drm_modeset_lock_all(struct drm_device *dev)
67 : {
68 0 : struct drm_mode_config *config = &dev->mode_config;
69 : struct drm_modeset_acquire_ctx *ctx;
70 : int ret;
71 :
72 0 : ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
73 0 : if (WARN_ON(!ctx))
74 0 : return;
75 :
76 0 : mutex_lock(&config->mutex);
77 :
78 0 : drm_modeset_acquire_init(ctx, 0);
79 :
80 : retry:
81 0 : ret = drm_modeset_lock(&config->connection_mutex, ctx);
82 0 : if (ret)
83 : goto fail;
84 0 : ret = drm_modeset_lock_all_crtcs(dev, ctx);
85 0 : if (ret)
86 : goto fail;
87 :
88 0 : WARN_ON(config->acquire_ctx);
89 :
90 : /* now we hold the locks, so now that it is safe, stash the
91 : * ctx for drm_modeset_unlock_all():
92 : */
93 0 : config->acquire_ctx = ctx;
94 :
95 0 : drm_warn_on_modeset_not_all_locked(dev);
96 :
97 0 : return;
98 :
99 : fail:
100 0 : if (ret == -EDEADLK) {
101 0 : drm_modeset_backoff(ctx);
102 0 : goto retry;
103 : }
104 :
105 0 : kfree(ctx);
106 0 : }
107 : EXPORT_SYMBOL(drm_modeset_lock_all);
108 :
109 : /**
110 : * drm_modeset_unlock_all - drop all modeset locks
111 : * @dev: device
112 : *
113 : * This function drop all modeset locks taken by drm_modeset_lock_all.
114 : */
115 0 : void drm_modeset_unlock_all(struct drm_device *dev)
116 : {
117 0 : struct drm_mode_config *config = &dev->mode_config;
118 0 : struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
119 :
120 0 : if (WARN_ON(!ctx))
121 0 : return;
122 :
123 0 : config->acquire_ctx = NULL;
124 0 : drm_modeset_drop_locks(ctx);
125 0 : drm_modeset_acquire_fini(ctx);
126 :
127 0 : kfree(ctx);
128 :
129 0 : mutex_unlock(&dev->mode_config.mutex);
130 0 : }
131 : EXPORT_SYMBOL(drm_modeset_unlock_all);
132 :
133 : /**
134 : * drm_modeset_lock_crtc - lock crtc with hidden acquire ctx for a plane update
135 : * @crtc: DRM CRTC
136 : * @plane: DRM plane to be updated on @crtc
137 : *
138 : * This function locks the given crtc and plane (which should be either the
139 : * primary or cursor plane) using a hidden acquire context. This is necessary so
140 : * that drivers internally using the atomic interfaces can grab further locks
141 : * with the lock acquire context.
142 : *
143 : * Note that @plane can be NULL, e.g. when the cursor support hasn't yet been
144 : * converted to universal planes yet.
145 : */
146 0 : void drm_modeset_lock_crtc(struct drm_crtc *crtc,
147 : struct drm_plane *plane)
148 : {
149 : struct drm_modeset_acquire_ctx *ctx;
150 : int ret;
151 :
152 0 : ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
153 0 : if (WARN_ON(!ctx))
154 0 : return;
155 :
156 0 : drm_modeset_acquire_init(ctx, 0);
157 :
158 : retry:
159 0 : ret = drm_modeset_lock(&crtc->mutex, ctx);
160 0 : if (ret)
161 : goto fail;
162 :
163 0 : if (plane) {
164 0 : ret = drm_modeset_lock(&plane->mutex, ctx);
165 0 : if (ret)
166 : goto fail;
167 :
168 0 : if (plane->crtc) {
169 0 : ret = drm_modeset_lock(&plane->crtc->mutex, ctx);
170 0 : if (ret)
171 : goto fail;
172 : }
173 : }
174 :
175 0 : WARN_ON(crtc->acquire_ctx);
176 :
177 : /* now we hold the locks, so now that it is safe, stash the
178 : * ctx for drm_modeset_unlock_crtc():
179 : */
180 0 : crtc->acquire_ctx = ctx;
181 :
182 0 : return;
183 :
184 : fail:
185 0 : if (ret == -EDEADLK) {
186 0 : drm_modeset_backoff(ctx);
187 0 : goto retry;
188 : }
189 0 : }
190 : EXPORT_SYMBOL(drm_modeset_lock_crtc);
191 :
192 : /**
193 : * drm_modeset_legacy_acquire_ctx - find acquire ctx for legacy ioctls
194 : * @crtc: drm crtc
195 : *
196 : * Legacy ioctl operations like cursor updates or page flips only have per-crtc
197 : * locking, and store the acquire ctx in the corresponding crtc. All other
198 : * legacy operations take all locks and use a global acquire context. This
199 : * function grabs the right one.
200 : */
201 : struct drm_modeset_acquire_ctx *
202 0 : drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc)
203 : {
204 0 : if (crtc->acquire_ctx)
205 0 : return crtc->acquire_ctx;
206 :
207 0 : WARN_ON(!crtc->dev->mode_config.acquire_ctx);
208 :
209 0 : return crtc->dev->mode_config.acquire_ctx;
210 0 : }
211 : EXPORT_SYMBOL(drm_modeset_legacy_acquire_ctx);
212 :
213 : /**
214 : * drm_modeset_unlock_crtc - drop crtc lock
215 : * @crtc: drm crtc
216 : *
217 : * This drops the crtc lock acquire with drm_modeset_lock_crtc() and all other
218 : * locks acquired through the hidden context.
219 : */
220 0 : void drm_modeset_unlock_crtc(struct drm_crtc *crtc)
221 : {
222 0 : struct drm_modeset_acquire_ctx *ctx = crtc->acquire_ctx;
223 :
224 0 : if (WARN_ON(!ctx))
225 0 : return;
226 :
227 0 : crtc->acquire_ctx = NULL;
228 0 : drm_modeset_drop_locks(ctx);
229 0 : drm_modeset_acquire_fini(ctx);
230 :
231 0 : kfree(ctx);
232 0 : }
233 : EXPORT_SYMBOL(drm_modeset_unlock_crtc);
234 :
235 : /**
236 : * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
237 : * @dev: device
238 : *
239 : * Useful as a debug assert.
240 : */
241 0 : void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
242 : {
243 : struct drm_crtc *crtc;
244 :
245 : /* Locking is currently fubar in the panic handler. */
246 0 : if (oops_in_progress)
247 0 : return;
248 :
249 0 : drm_for_each_crtc(crtc, dev)
250 0 : WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
251 :
252 0 : WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
253 0 : WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
254 0 : }
255 : EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
256 :
257 : /**
258 : * drm_modeset_acquire_init - initialize acquire context
259 : * @ctx: the acquire context
260 : * @flags: for future
261 : */
262 0 : void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
263 : uint32_t flags)
264 : {
265 0 : memset(ctx, 0, sizeof(*ctx));
266 0 : ww_acquire_init(&ctx->ww_ctx, &crtc_ww_class);
267 0 : INIT_LIST_HEAD(&ctx->locked);
268 0 : }
269 : EXPORT_SYMBOL(drm_modeset_acquire_init);
270 :
271 : /**
272 : * drm_modeset_acquire_fini - cleanup acquire context
273 : * @ctx: the acquire context
274 : */
275 0 : void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx)
276 : {
277 0 : ww_acquire_fini(&ctx->ww_ctx);
278 0 : }
279 : EXPORT_SYMBOL(drm_modeset_acquire_fini);
280 :
281 : /**
282 : * drm_modeset_drop_locks - drop all locks
283 : * @ctx: the acquire context
284 : *
285 : * Drop all locks currently held against this acquire context.
286 : */
287 0 : void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx)
288 : {
289 0 : WARN_ON(ctx->contended);
290 0 : while (!list_empty(&ctx->locked)) {
291 : struct drm_modeset_lock *lock;
292 :
293 0 : lock = list_first_entry(&ctx->locked,
294 : struct drm_modeset_lock, head);
295 :
296 0 : drm_modeset_unlock(lock);
297 : }
298 0 : }
299 : EXPORT_SYMBOL(drm_modeset_drop_locks);
300 :
301 0 : static inline int modeset_lock(struct drm_modeset_lock *lock,
302 : struct drm_modeset_acquire_ctx *ctx,
303 : bool interruptible, bool slow)
304 : {
305 : int ret;
306 :
307 0 : WARN_ON(ctx->contended);
308 :
309 0 : if (ctx->trylock_only) {
310 : lockdep_assert_held(&ctx->ww_ctx);
311 :
312 0 : if (!ww_mutex_trylock(&lock->mutex))
313 0 : return -EBUSY;
314 : else
315 0 : return 0;
316 0 : } else if (interruptible && slow) {
317 0 : ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx);
318 0 : } else if (interruptible) {
319 0 : ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx);
320 0 : } else if (slow) {
321 0 : ww_mutex_lock_slow(&lock->mutex, &ctx->ww_ctx);
322 : ret = 0;
323 0 : } else {
324 0 : ret = ww_mutex_lock(&lock->mutex, &ctx->ww_ctx);
325 : }
326 0 : if (!ret) {
327 0 : WARN_ON(!list_empty(&lock->head));
328 0 : list_add(&lock->head, &ctx->locked);
329 0 : } else if (ret == -EALREADY) {
330 : /* we already hold the lock.. this is fine. For atomic
331 : * we will need to be able to drm_modeset_lock() things
332 : * without having to keep track of what is already locked
333 : * or not.
334 : */
335 : ret = 0;
336 0 : } else if (ret == -EDEADLK) {
337 0 : ctx->contended = lock;
338 0 : }
339 :
340 0 : return ret;
341 0 : }
342 :
343 0 : static int modeset_backoff(struct drm_modeset_acquire_ctx *ctx,
344 : bool interruptible)
345 : {
346 0 : struct drm_modeset_lock *contended = ctx->contended;
347 :
348 0 : ctx->contended = NULL;
349 :
350 0 : if (WARN_ON(!contended))
351 0 : return 0;
352 :
353 0 : drm_modeset_drop_locks(ctx);
354 :
355 0 : return modeset_lock(contended, ctx, interruptible, true);
356 0 : }
357 :
358 : /**
359 : * drm_modeset_backoff - deadlock avoidance backoff
360 : * @ctx: the acquire context
361 : *
362 : * If deadlock is detected (ie. drm_modeset_lock() returns -EDEADLK),
363 : * you must call this function to drop all currently held locks and
364 : * block until the contended lock becomes available.
365 : */
366 0 : void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx)
367 : {
368 0 : modeset_backoff(ctx, false);
369 0 : }
370 : EXPORT_SYMBOL(drm_modeset_backoff);
371 :
372 : /**
373 : * drm_modeset_backoff_interruptible - deadlock avoidance backoff
374 : * @ctx: the acquire context
375 : *
376 : * Interruptible version of drm_modeset_backoff()
377 : */
378 0 : int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx)
379 : {
380 0 : return modeset_backoff(ctx, true);
381 : }
382 : EXPORT_SYMBOL(drm_modeset_backoff_interruptible);
383 :
384 : /**
385 : * drm_modeset_lock - take modeset lock
386 : * @lock: lock to take
387 : * @ctx: acquire ctx
388 : *
389 : * If ctx is not NULL, then its ww acquire context is used and the
390 : * lock will be tracked by the context and can be released by calling
391 : * drm_modeset_drop_locks(). If -EDEADLK is returned, this means a
392 : * deadlock scenario has been detected and it is an error to attempt
393 : * to take any more locks without first calling drm_modeset_backoff().
394 : */
395 0 : int drm_modeset_lock(struct drm_modeset_lock *lock,
396 : struct drm_modeset_acquire_ctx *ctx)
397 : {
398 0 : if (ctx)
399 0 : return modeset_lock(lock, ctx, false, false);
400 :
401 0 : ww_mutex_lock(&lock->mutex, NULL);
402 0 : return 0;
403 0 : }
404 : EXPORT_SYMBOL(drm_modeset_lock);
405 :
406 : /**
407 : * drm_modeset_lock_interruptible - take modeset lock
408 : * @lock: lock to take
409 : * @ctx: acquire ctx
410 : *
411 : * Interruptible version of drm_modeset_lock()
412 : */
413 0 : int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock,
414 : struct drm_modeset_acquire_ctx *ctx)
415 : {
416 0 : if (ctx)
417 0 : return modeset_lock(lock, ctx, true, false);
418 :
419 0 : return ww_mutex_lock_interruptible(&lock->mutex, NULL);
420 0 : }
421 : EXPORT_SYMBOL(drm_modeset_lock_interruptible);
422 :
423 : /**
424 : * drm_modeset_unlock - drop modeset lock
425 : * @lock: lock to release
426 : */
427 0 : void drm_modeset_unlock(struct drm_modeset_lock *lock)
428 : {
429 0 : list_del_init(&lock->head);
430 0 : ww_mutex_unlock(&lock->mutex);
431 0 : }
432 : EXPORT_SYMBOL(drm_modeset_unlock);
433 :
434 : /* In some legacy codepaths it's convenient to just grab all the crtc and plane
435 : * related locks. */
436 0 : int drm_modeset_lock_all_crtcs(struct drm_device *dev,
437 : struct drm_modeset_acquire_ctx *ctx)
438 : {
439 : struct drm_crtc *crtc;
440 : struct drm_plane *plane;
441 : int ret = 0;
442 :
443 0 : drm_for_each_crtc(crtc, dev) {
444 0 : ret = drm_modeset_lock(&crtc->mutex, ctx);
445 0 : if (ret)
446 0 : return ret;
447 : }
448 :
449 0 : drm_for_each_plane(plane, dev) {
450 0 : ret = drm_modeset_lock(&plane->mutex, ctx);
451 0 : if (ret)
452 0 : return ret;
453 : }
454 :
455 0 : return 0;
456 0 : }
457 : EXPORT_SYMBOL(drm_modeset_lock_all_crtcs);
|