1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/spinlock.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/timer.h>
17
18#include "rtmutex_common.h"
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53static void
54rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner,
55 unsigned long mask)
56{
57 unsigned long val = (unsigned long)owner | mask;
58
59 if (rt_mutex_has_waiters(lock))
60 val |= RT_MUTEX_HAS_WAITERS;
61
62 lock->owner = (struct task_struct *)val;
63}
64
65static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
66{
67 lock->owner = (struct task_struct *)
68 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
69}
70
71static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
72{
73 if (!rt_mutex_has_waiters(lock))
74 clear_rt_mutex_waiters(lock);
75}
76
77
78
79
80
81#if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
82# define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c)
83static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
84{
85 unsigned long owner, *p = (unsigned long *) &lock->owner;
86
87 do {
88 owner = *p;
89 } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
90}
91#else
92# define rt_mutex_cmpxchg(l,c,n) (0)
93static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
94{
95 lock->owner = (struct task_struct *)
96 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
97}
98#endif
99
100
101
102
103
104
105
106int rt_mutex_getprio(struct task_struct *task)
107{
108 if (likely(!task_has_pi_waiters(task)))
109 return task->normal_prio;
110
111 return min(task_top_pi_waiter(task)->pi_list_entry.prio,
112 task->normal_prio);
113}
114
115
116
117
118
119
120static void __rt_mutex_adjust_prio(struct task_struct *task)
121{
122 int prio = rt_mutex_getprio(task);
123
124 if (task->prio != prio)
125 rt_mutex_setprio(task, prio);
126}
127
128
129
130
131
132
133
134
135
136
137static void rt_mutex_adjust_prio(struct task_struct *task)
138{
139 unsigned long flags;
140
141 spin_lock_irqsave(&task->pi_lock, flags);
142 __rt_mutex_adjust_prio(task);
143 spin_unlock_irqrestore(&task->pi_lock, flags);
144}
145
146
147
148
149int max_lock_depth = 1024;
150
151
152
153
154
155
156static int rt_mutex_adjust_prio_chain(struct task_struct *task,
157 int deadlock_detect,
158 struct rt_mutex *orig_lock,
159 struct rt_mutex_waiter *orig_waiter,
160 struct task_struct *top_task)
161{
162 struct rt_mutex *lock;
163 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
164 int detect_deadlock, ret = 0, depth = 0;
165 unsigned long flags;
166
167 detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
168 deadlock_detect);
169
170
171
172
173
174
175
176 again:
177 if (++depth > max_lock_depth) {
178 static int prev_max;
179
180
181
182
183
184 if (prev_max != max_lock_depth) {
185 prev_max = max_lock_depth;
186 printk(KERN_WARNING "Maximum lock depth %d reached "
187 "task: %s (%d)\n", max_lock_depth,
188 top_task->comm, task_pid_nr(top_task));
189 }
190 put_task_struct(task);
191
192 return deadlock_detect ? -EDEADLK : 0;
193 }
194 retry:
195
196
197
198 spin_lock_irqsave(&task->pi_lock, flags);
199
200 waiter = task->pi_blocked_on;
201
202
203
204
205
206 if (!waiter || !waiter->task)
207 goto out_unlock_pi;
208
209
210
211
212
213
214 if (orig_waiter && !orig_waiter->task)
215 goto out_unlock_pi;
216
217
218
219
220
221
222 if (top_waiter && (!task_has_pi_waiters(task) ||
223 top_waiter != task_top_pi_waiter(task)))
224 goto out_unlock_pi;
225
226
227
228
229
230 if (!detect_deadlock && waiter->list_entry.prio == task->prio)
231 goto out_unlock_pi;
232
233 lock = waiter->lock;
234 if (!spin_trylock(&lock->wait_lock)) {
235 spin_unlock_irqrestore(&task->pi_lock, flags);
236 cpu_relax();
237 goto retry;
238 }
239
240
241 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
242 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
243 spin_unlock(&lock->wait_lock);
244 ret = deadlock_detect ? -EDEADLK : 0;
245 goto out_unlock_pi;
246 }
247
248 top_waiter = rt_mutex_top_waiter(lock);
249
250
251 plist_del(&waiter->list_entry, &lock->wait_list);
252 waiter->list_entry.prio = task->prio;
253 plist_add(&waiter->list_entry, &lock->wait_list);
254
255
256 spin_unlock_irqrestore(&task->pi_lock, flags);
257 put_task_struct(task);
258
259
260 task = rt_mutex_owner(lock);
261 get_task_struct(task);
262 spin_lock_irqsave(&task->pi_lock, flags);
263
264 if (waiter == rt_mutex_top_waiter(lock)) {
265
266 plist_del(&top_waiter->pi_list_entry, &task->pi_waiters);
267 waiter->pi_list_entry.prio = waiter->list_entry.prio;
268 plist_add(&waiter->pi_list_entry, &task->pi_waiters);
269 __rt_mutex_adjust_prio(task);
270
271 } else if (top_waiter == waiter) {
272
273 plist_del(&waiter->pi_list_entry, &task->pi_waiters);
274 waiter = rt_mutex_top_waiter(lock);
275 waiter->pi_list_entry.prio = waiter->list_entry.prio;
276 plist_add(&waiter->pi_list_entry, &task->pi_waiters);
277 __rt_mutex_adjust_prio(task);
278 }
279
280 spin_unlock_irqrestore(&task->pi_lock, flags);
281
282 top_waiter = rt_mutex_top_waiter(lock);
283 spin_unlock(&lock->wait_lock);
284
285 if (!detect_deadlock && waiter != top_waiter)
286 goto out_put_task;
287
288 goto again;
289
290 out_unlock_pi:
291 spin_unlock_irqrestore(&task->pi_lock, flags);
292 out_put_task:
293 put_task_struct(task);
294
295 return ret;
296}
297
298
299
300
301
302
303static inline int try_to_steal_lock(struct rt_mutex *lock)
304{
305 struct task_struct *pendowner = rt_mutex_owner(lock);
306 struct rt_mutex_waiter *next;
307 unsigned long flags;
308
309 if (!rt_mutex_owner_pending(lock))
310 return 0;
311
312 if (pendowner == current)
313 return 1;
314
315 spin_lock_irqsave(&pendowner->pi_lock, flags);
316 if (current->prio >= pendowner->prio) {
317 spin_unlock_irqrestore(&pendowner->pi_lock, flags);
318 return 0;
319 }
320
321
322
323
324
325
326 if (likely(!rt_mutex_has_waiters(lock))) {
327 spin_unlock_irqrestore(&pendowner->pi_lock, flags);
328 return 1;
329 }
330
331
332 next = rt_mutex_top_waiter(lock);
333 plist_del(&next->pi_list_entry, &pendowner->pi_waiters);
334 __rt_mutex_adjust_prio(pendowner);
335 spin_unlock_irqrestore(&pendowner->pi_lock, flags);
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351 if (likely(next->task != current)) {
352 spin_lock_irqsave(¤t->pi_lock, flags);
353 plist_add(&next->pi_list_entry, ¤t->pi_waiters);
354 __rt_mutex_adjust_prio(current);
355 spin_unlock_irqrestore(¤t->pi_lock, flags);
356 }
357 return 1;
358}
359
360
361
362
363
364
365
366
367
368
369static int try_to_take_rt_mutex(struct rt_mutex *lock)
370{
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390 mark_rt_mutex_waiters(lock);
391
392 if (rt_mutex_owner(lock) && !try_to_steal_lock(lock))
393 return 0;
394
395
396 debug_rt_mutex_lock(lock);
397
398 rt_mutex_set_owner(lock, current, 0);
399
400 rt_mutex_deadlock_account_lock(lock, current);
401
402 return 1;
403}
404
405
406
407
408
409
410
411
412static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
413 struct rt_mutex_waiter *waiter,
414 int detect_deadlock)
415{
416 struct task_struct *owner = rt_mutex_owner(lock);
417 struct rt_mutex_waiter *top_waiter = waiter;
418 unsigned long flags;
419 int chain_walk = 0, res;
420
421 spin_lock_irqsave(¤t->pi_lock, flags);
422 __rt_mutex_adjust_prio(current);
423 waiter->task = current;
424 waiter->lock = lock;
425 plist_node_init(&waiter->list_entry, current->prio);
426 plist_node_init(&waiter->pi_list_entry, current->prio);
427
428
429 if (rt_mutex_has_waiters(lock))
430 top_waiter = rt_mutex_top_waiter(lock);
431 plist_add(&waiter->list_entry, &lock->wait_list);
432
433 current->pi_blocked_on = waiter;
434
435 spin_unlock_irqrestore(¤t->pi_lock, flags);
436
437 if (waiter == rt_mutex_top_waiter(lock)) {
438 spin_lock_irqsave(&owner->pi_lock, flags);
439 plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
440 plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
441
442 __rt_mutex_adjust_prio(owner);
443 if (owner->pi_blocked_on)
444 chain_walk = 1;
445 spin_unlock_irqrestore(&owner->pi_lock, flags);
446 }
447 else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
448 chain_walk = 1;
449
450 if (!chain_walk)
451 return 0;
452
453
454
455
456
457
458 get_task_struct(owner);
459
460 spin_unlock(&lock->wait_lock);
461
462 res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
463 current);
464
465 spin_lock(&lock->wait_lock);
466
467 return res;
468}
469
470
471
472
473
474
475
476
477
478static void wakeup_next_waiter(struct rt_mutex *lock)
479{
480 struct rt_mutex_waiter *waiter;
481 struct task_struct *pendowner;
482 unsigned long flags;
483
484 spin_lock_irqsave(¤t->pi_lock, flags);
485
486 waiter = rt_mutex_top_waiter(lock);
487 plist_del(&waiter->list_entry, &lock->wait_list);
488
489
490
491
492
493
494
495 plist_del(&waiter->pi_list_entry, ¤t->pi_waiters);
496 pendowner = waiter->task;
497 waiter->task = NULL;
498
499 rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING);
500
501 spin_unlock_irqrestore(¤t->pi_lock, flags);
502
503
504
505
506
507
508
509
510 spin_lock_irqsave(&pendowner->pi_lock, flags);
511
512 WARN_ON(!pendowner->pi_blocked_on);
513 WARN_ON(pendowner->pi_blocked_on != waiter);
514 WARN_ON(pendowner->pi_blocked_on->lock != lock);
515
516 pendowner->pi_blocked_on = NULL;
517
518 if (rt_mutex_has_waiters(lock)) {
519 struct rt_mutex_waiter *next;
520
521 next = rt_mutex_top_waiter(lock);
522 plist_add(&next->pi_list_entry, &pendowner->pi_waiters);
523 }
524 spin_unlock_irqrestore(&pendowner->pi_lock, flags);
525
526 wake_up_process(pendowner);
527}
528
529
530
531
532
533
534static void remove_waiter(struct rt_mutex *lock,
535 struct rt_mutex_waiter *waiter)
536{
537 int first = (waiter == rt_mutex_top_waiter(lock));
538 struct task_struct *owner = rt_mutex_owner(lock);
539 unsigned long flags;
540 int chain_walk = 0;
541
542 spin_lock_irqsave(¤t->pi_lock, flags);
543 plist_del(&waiter->list_entry, &lock->wait_list);
544 waiter->task = NULL;
545 current->pi_blocked_on = NULL;
546 spin_unlock_irqrestore(¤t->pi_lock, flags);
547
548 if (first && owner != current) {
549
550 spin_lock_irqsave(&owner->pi_lock, flags);
551
552 plist_del(&waiter->pi_list_entry, &owner->pi_waiters);
553
554 if (rt_mutex_has_waiters(lock)) {
555 struct rt_mutex_waiter *next;
556
557 next = rt_mutex_top_waiter(lock);
558 plist_add(&next->pi_list_entry, &owner->pi_waiters);
559 }
560 __rt_mutex_adjust_prio(owner);
561
562 if (owner->pi_blocked_on)
563 chain_walk = 1;
564
565 spin_unlock_irqrestore(&owner->pi_lock, flags);
566 }
567
568 WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
569
570 if (!chain_walk)
571 return;
572
573
574 get_task_struct(owner);
575
576 spin_unlock(&lock->wait_lock);
577
578 rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
579
580 spin_lock(&lock->wait_lock);
581}
582
583
584
585
586
587
588void rt_mutex_adjust_pi(struct task_struct *task)
589{
590 struct rt_mutex_waiter *waiter;
591 unsigned long flags;
592
593 spin_lock_irqsave(&task->pi_lock, flags);
594
595 waiter = task->pi_blocked_on;
596 if (!waiter || waiter->list_entry.prio == task->prio) {
597 spin_unlock_irqrestore(&task->pi_lock, flags);
598 return;
599 }
600
601 spin_unlock_irqrestore(&task->pi_lock, flags);
602
603
604 get_task_struct(task);
605 rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
606}
607
608
609
610
611static int __sched
612rt_mutex_slowlock(struct rt_mutex *lock, int state,
613 struct hrtimer_sleeper *timeout,
614 int detect_deadlock)
615{
616 struct rt_mutex_waiter waiter;
617 int ret = 0;
618
619 debug_rt_mutex_init_waiter(&waiter);
620 waiter.task = NULL;
621
622 spin_lock(&lock->wait_lock);
623
624
625 if (try_to_take_rt_mutex(lock)) {
626 spin_unlock(&lock->wait_lock);
627 return 0;
628 }
629
630 set_current_state(state);
631
632
633 if (unlikely(timeout)) {
634 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
635 if (!hrtimer_active(&timeout->timer))
636 timeout->task = NULL;
637 }
638
639 for (;;) {
640
641 if (try_to_take_rt_mutex(lock))
642 break;
643
644
645
646
647
648 if (unlikely(state == TASK_INTERRUPTIBLE)) {
649
650 if (signal_pending(current))
651 ret = -EINTR;
652 if (timeout && !timeout->task)
653 ret = -ETIMEDOUT;
654 if (ret)
655 break;
656 }
657
658
659
660
661
662
663 if (!waiter.task) {
664 ret = task_blocks_on_rt_mutex(lock, &waiter,
665 detect_deadlock);
666
667
668
669
670
671 if (unlikely(!waiter.task)) {
672
673
674
675
676
677
678 ret = 0;
679 continue;
680 }
681 if (unlikely(ret))
682 break;
683 }
684
685 spin_unlock(&lock->wait_lock);
686
687 debug_rt_mutex_print_deadlock(&waiter);
688
689 if (waiter.task)
690 schedule_rt_mutex(lock);
691
692 spin_lock(&lock->wait_lock);
693 set_current_state(state);
694 }
695
696 set_current_state(TASK_RUNNING);
697
698 if (unlikely(waiter.task))
699 remove_waiter(lock, &waiter);
700
701
702
703
704
705 fixup_rt_mutex_waiters(lock);
706
707 spin_unlock(&lock->wait_lock);
708
709
710 if (unlikely(timeout))
711 hrtimer_cancel(&timeout->timer);
712
713
714
715
716
717
718 if (unlikely(ret))
719 rt_mutex_adjust_prio(current);
720
721 debug_rt_mutex_free_waiter(&waiter);
722
723 return ret;
724}
725
726
727
728
729static inline int
730rt_mutex_slowtrylock(struct rt_mutex *lock)
731{
732 int ret = 0;
733
734 spin_lock(&lock->wait_lock);
735
736 if (likely(rt_mutex_owner(lock) != current)) {
737
738 ret = try_to_take_rt_mutex(lock);
739
740
741
742
743 fixup_rt_mutex_waiters(lock);
744 }
745
746 spin_unlock(&lock->wait_lock);
747
748 return ret;
749}
750
751
752
753
754static void __sched
755rt_mutex_slowunlock(struct rt_mutex *lock)
756{
757 spin_lock(&lock->wait_lock);
758
759 debug_rt_mutex_unlock(lock);
760
761 rt_mutex_deadlock_account_unlock(current);
762
763 if (!rt_mutex_has_waiters(lock)) {
764 lock->owner = NULL;
765 spin_unlock(&lock->wait_lock);
766 return;
767 }
768
769 wakeup_next_waiter(lock);
770
771 spin_unlock(&lock->wait_lock);
772
773
774 rt_mutex_adjust_prio(current);
775}
776
777
778
779
780
781
782
783static inline int
784rt_mutex_fastlock(struct rt_mutex *lock, int state,
785 int detect_deadlock,
786 int (*slowfn)(struct rt_mutex *lock, int state,
787 struct hrtimer_sleeper *timeout,
788 int detect_deadlock))
789{
790 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
791 rt_mutex_deadlock_account_lock(lock, current);
792 return 0;
793 } else
794 return slowfn(lock, state, NULL, detect_deadlock);
795}
796
797static inline int
798rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
799 struct hrtimer_sleeper *timeout, int detect_deadlock,
800 int (*slowfn)(struct rt_mutex *lock, int state,
801 struct hrtimer_sleeper *timeout,
802 int detect_deadlock))
803{
804 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
805 rt_mutex_deadlock_account_lock(lock, current);
806 return 0;
807 } else
808 return slowfn(lock, state, timeout, detect_deadlock);
809}
810
811static inline int
812rt_mutex_fasttrylock(struct rt_mutex *lock,
813 int (*slowfn)(struct rt_mutex *lock))
814{
815 if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
816 rt_mutex_deadlock_account_lock(lock, current);
817 return 1;
818 }
819 return slowfn(lock);
820}
821
822static inline void
823rt_mutex_fastunlock(struct rt_mutex *lock,
824 void (*slowfn)(struct rt_mutex *lock))
825{
826 if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
827 rt_mutex_deadlock_account_unlock(current);
828 else
829 slowfn(lock);
830}
831
832
833
834
835
836
837void __sched rt_mutex_lock(struct rt_mutex *lock)
838{
839 might_sleep();
840
841 rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
842}
843EXPORT_SYMBOL_GPL(rt_mutex_lock);
844
845
846
847
848
849
850
851
852
853
854
855
856int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
857 int detect_deadlock)
858{
859 might_sleep();
860
861 return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
862 detect_deadlock, rt_mutex_slowlock);
863}
864EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881int
882rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,
883 int detect_deadlock)
884{
885 might_sleep();
886
887 return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
888 detect_deadlock, rt_mutex_slowlock);
889}
890EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
891
892
893
894
895
896
897
898
899int __sched rt_mutex_trylock(struct rt_mutex *lock)
900{
901 return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
902}
903EXPORT_SYMBOL_GPL(rt_mutex_trylock);
904
905
906
907
908
909
910void __sched rt_mutex_unlock(struct rt_mutex *lock)
911{
912 rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
913}
914EXPORT_SYMBOL_GPL(rt_mutex_unlock);
915
916
917
918
919
920
921
922
923
924void rt_mutex_destroy(struct rt_mutex *lock)
925{
926 WARN_ON(rt_mutex_is_locked(lock));
927#ifdef CONFIG_DEBUG_RT_MUTEXES
928 lock->magic = NULL;
929#endif
930}
931
932EXPORT_SYMBOL_GPL(rt_mutex_destroy);
933
934
935
936
937
938
939
940
941
942
943void __rt_mutex_init(struct rt_mutex *lock, const char *name)
944{
945 lock->owner = NULL;
946 spin_lock_init(&lock->wait_lock);
947 plist_head_init(&lock->wait_list, &lock->wait_lock);
948
949 debug_rt_mutex_init(lock, name);
950}
951EXPORT_SYMBOL_GPL(__rt_mutex_init);
952
953
954
955
956
957
958
959
960
961
962
963void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
964 struct task_struct *proxy_owner)
965{
966 __rt_mutex_init(lock, NULL);
967 debug_rt_mutex_proxy_lock(lock, proxy_owner);
968 rt_mutex_set_owner(lock, proxy_owner, 0);
969 rt_mutex_deadlock_account_lock(lock, proxy_owner);
970}
971
972
973
974
975
976
977
978
979
980void rt_mutex_proxy_unlock(struct rt_mutex *lock,
981 struct task_struct *proxy_owner)
982{
983 debug_rt_mutex_proxy_unlock(lock);
984 rt_mutex_set_owner(lock, NULL, 0);
985 rt_mutex_deadlock_account_unlock(proxy_owner);
986}
987
988
989
990
991
992
993
994
995
996
997
998
999
1000struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
1001{
1002 if (!rt_mutex_has_waiters(lock))
1003 return NULL;
1004
1005 return rt_mutex_top_waiter(lock)->task;
1006}