2006-06-27 02:54:53 -07:00
|
|
|
/*
|
|
|
|
* RT Mutexes: blocking mutual exclusion locks with PI support
|
|
|
|
*
|
|
|
|
* started by Ingo Molnar and Thomas Gleixner:
|
|
|
|
*
|
|
|
|
* Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
|
|
|
|
* Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
|
|
|
|
*
|
|
|
|
* This file contains the private data structure and API definitions.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __KERNEL_RTMUTEX_COMMON_H
|
|
|
|
#define __KERNEL_RTMUTEX_COMMON_H
|
|
|
|
|
|
|
|
#include <linux/rtmutex.h>
|
2017-02-01 08:36:40 -07:00
|
|
|
#include <linux/sched/wake_q.h>
|
2006-06-27 02:54:53 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This is the control structure for tasks blocked on a rt_mutex,
|
|
|
|
* which is allocated on the kernel stack on of the blocked task.
|
|
|
|
*
|
rtmutex: Turn the plist into an rb-tree
Turn the pi-chains from plist to rb-tree, in the rt_mutex code,
and provide a proper comparison function for -deadline and
-priority tasks.
This is done mainly because:
- classical prio field of the plist is just an int, which might
not be enough for representing a deadline;
- manipulating such a list would become O(nr_deadline_tasks),
which might be to much, as the number of -deadline task increases.
Therefore, an rb-tree is used, and tasks are queued in it according
to the following logic:
- among two -priority (i.e., SCHED_BATCH/OTHER/RR/FIFO) tasks, the
one with the higher (lower, actually!) prio wins;
- among a -priority and a -deadline task, the latter always wins;
- among two -deadline tasks, the one with the earliest deadline
wins.
Queueing and dequeueing functions are changed accordingly, for both
the list of a task's pi-waiters and the list of tasks blocked on
a pi-lock.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Dario Faggioli <raistlin@linux.it>
Signed-off-by: Juri Lelli <juri.lelli@gmail.com>
Signed-off-again-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1383831828-15501-10-git-send-email-juri.lelli@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2013-11-07 06:43:43 -07:00
|
|
|
* @tree_entry: pi node to enqueue into the mutex waiters tree
|
|
|
|
* @pi_tree_entry: pi node to enqueue into the mutex owner waiters tree
|
2006-06-27 02:54:53 -07:00
|
|
|
* @task: task reference to the blocked task
|
|
|
|
*/
|
|
|
|
struct rt_mutex_waiter {
|
rtmutex: Turn the plist into an rb-tree
Turn the pi-chains from plist to rb-tree, in the rt_mutex code,
and provide a proper comparison function for -deadline and
-priority tasks.
This is done mainly because:
- classical prio field of the plist is just an int, which might
not be enough for representing a deadline;
- manipulating such a list would become O(nr_deadline_tasks),
which might be to much, as the number of -deadline task increases.
Therefore, an rb-tree is used, and tasks are queued in it according
to the following logic:
- among two -priority (i.e., SCHED_BATCH/OTHER/RR/FIFO) tasks, the
one with the higher (lower, actually!) prio wins;
- among a -priority and a -deadline task, the latter always wins;
- among two -deadline tasks, the one with the earliest deadline
wins.
Queueing and dequeueing functions are changed accordingly, for both
the list of a task's pi-waiters and the list of tasks blocked on
a pi-lock.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Dario Faggioli <raistlin@linux.it>
Signed-off-by: Juri Lelli <juri.lelli@gmail.com>
Signed-off-again-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1383831828-15501-10-git-send-email-juri.lelli@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2013-11-07 06:43:43 -07:00
|
|
|
struct rb_node tree_entry;
|
|
|
|
struct rb_node pi_tree_entry;
|
2006-06-27 02:54:53 -07:00
|
|
|
struct task_struct *task;
|
|
|
|
struct rt_mutex *lock;
|
|
|
|
#ifdef CONFIG_DEBUG_RT_MUTEXES
|
|
|
|
unsigned long ip;
|
2008-02-08 05:21:53 -07:00
|
|
|
struct pid *deadlock_task_pid;
|
2006-06-27 02:54:53 -07:00
|
|
|
struct rt_mutex *deadlock_lock;
|
|
|
|
#endif
|
sched/deadline: Add SCHED_DEADLINE inheritance logic
Some method to deal with rt-mutexes and make sched_dl interact with
the current PI-coded is needed, raising all but trivial issues, that
needs (according to us) to be solved with some restructuring of
the pi-code (i.e., going toward a proxy execution-ish implementation).
This is under development, in the meanwhile, as a temporary solution,
what this commits does is:
- ensure a pi-lock owner with waiters is never throttled down. Instead,
when it runs out of runtime, it immediately gets replenished and it's
deadline is postponed;
- the scheduling parameters (relative deadline and default runtime)
used for that replenishments --during the whole period it holds the
pi-lock-- are the ones of the waiting task with earliest deadline.
Acting this way, we provide some kind of boosting to the lock-owner,
still by using the existing (actually, slightly modified by the previous
commit) pi-architecture.
We would stress the fact that this is only a surely needed, all but
clean solution to the problem. In the end it's only a way to re-start
discussion within the community. So, as always, comments, ideas, rants,
etc.. are welcome! :-)
Signed-off-by: Dario Faggioli <raistlin@linux.it>
Signed-off-by: Juri Lelli <juri.lelli@gmail.com>
[ Added !RT_MUTEXES build fix. ]
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1383831828-15501-11-git-send-email-juri.lelli@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2013-11-07 06:43:44 -07:00
|
|
|
int prio;
|
2017-03-23 07:56:13 -07:00
|
|
|
u64 deadline;
|
2006-06-27 02:54:53 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
rtmutex: Turn the plist into an rb-tree
Turn the pi-chains from plist to rb-tree, in the rt_mutex code,
and provide a proper comparison function for -deadline and
-priority tasks.
This is done mainly because:
- classical prio field of the plist is just an int, which might
not be enough for representing a deadline;
- manipulating such a list would become O(nr_deadline_tasks),
which might be to much, as the number of -deadline task increases.
Therefore, an rb-tree is used, and tasks are queued in it according
to the following logic:
- among two -priority (i.e., SCHED_BATCH/OTHER/RR/FIFO) tasks, the
one with the higher (lower, actually!) prio wins;
- among a -priority and a -deadline task, the latter always wins;
- among two -deadline tasks, the one with the earliest deadline
wins.
Queueing and dequeueing functions are changed accordingly, for both
the list of a task's pi-waiters and the list of tasks blocked on
a pi-lock.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Dario Faggioli <raistlin@linux.it>
Signed-off-by: Juri Lelli <juri.lelli@gmail.com>
Signed-off-again-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1383831828-15501-10-git-send-email-juri.lelli@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2013-11-07 06:43:43 -07:00
|
|
|
* Various helpers to access the waiters-tree:
|
2006-06-27 02:54:53 -07:00
|
|
|
*/
|
2017-07-31 21:31:32 -07:00
|
|
|
|
|
|
|
#ifdef CONFIG_RT_MUTEXES
|
|
|
|
|
2006-06-27 02:54:53 -07:00
|
|
|
static inline int rt_mutex_has_waiters(struct rt_mutex *lock)
|
|
|
|
{
|
2017-09-08 16:15:01 -07:00
|
|
|
return !RB_EMPTY_ROOT(&lock->waiters.rb_root);
|
2006-06-27 02:54:53 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct rt_mutex_waiter *
|
|
|
|
rt_mutex_top_waiter(struct rt_mutex *lock)
|
|
|
|
{
|
|
|
|
struct rt_mutex_waiter *w;
|
|
|
|
|
2017-09-08 16:15:01 -07:00
|
|
|
w = rb_entry(lock->waiters.rb_leftmost,
|
|
|
|
struct rt_mutex_waiter, tree_entry);
|
2006-06-27 02:54:53 -07:00
|
|
|
BUG_ON(w->lock != lock);
|
|
|
|
|
|
|
|
return w;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int task_has_pi_waiters(struct task_struct *p)
|
|
|
|
{
|
2017-09-08 16:15:01 -07:00
|
|
|
return !RB_EMPTY_ROOT(&p->pi_waiters.rb_root);
|
2006-06-27 02:54:53 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct rt_mutex_waiter *
|
|
|
|
task_top_pi_waiter(struct task_struct *p)
|
|
|
|
{
|
2017-09-08 16:15:01 -07:00
|
|
|
return rb_entry(p->pi_waiters.rb_leftmost,
|
|
|
|
struct rt_mutex_waiter, pi_tree_entry);
|
2006-06-27 02:54:53 -07:00
|
|
|
}
|
|
|
|
|
2017-07-31 21:31:32 -07:00
|
|
|
#else
|
|
|
|
|
|
|
|
static inline int rt_mutex_has_waiters(struct rt_mutex *lock)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct rt_mutex_waiter *
|
|
|
|
rt_mutex_top_waiter(struct rt_mutex *lock)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int task_has_pi_waiters(struct task_struct *p)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct rt_mutex_waiter *
|
|
|
|
task_top_pi_waiter(struct task_struct *p)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2006-06-27 02:54:53 -07:00
|
|
|
/*
|
|
|
|
* lock->owner state tracking:
|
|
|
|
*/
|
rtmutex: Simplify PI algorithm and make highest prio task get lock
In current rtmutex, the pending owner may be boosted by the tasks
in the rtmutex's waitlist when the pending owner is deboosted
or a task in the waitlist is boosted. This boosting is unrelated,
because the pending owner does not really take the rtmutex.
It is not reasonable.
Example.
time1:
A(high prio) onwers the rtmutex.
B(mid prio) and C (low prio) in the waitlist.
time2
A release the lock, B becomes the pending owner
A(or other high prio task) continues to run. B's prio is lower
than A, so B is just queued at the runqueue.
time3
A or other high prio task sleeps, but we have passed some time
The B and C's prio are changed in the period (time2 ~ time3)
due to boosting or deboosting. Now C has the priority higher
than B. ***Is it reasonable that C has to boost B and help B to
get the rtmutex?
NO!! I think, it is unrelated/unneed boosting before B really
owns the rtmutex. We should give C a chance to beat B and
win the rtmutex.
This is the motivation of this patch. This patch *ensures*
only the top waiter or higher priority task can take the lock.
How?
1) we don't dequeue the top waiter when unlock, if the top waiter
is changed, the old top waiter will fail and go to sleep again.
2) when requiring lock, it will get the lock when the lock is not taken and:
there is no waiter OR higher priority than waiters OR it is top waiter.
3) In any time, the top waiter is changed, the top waiter will be woken up.
The algorithm is much simpler than before, no pending owner, no
boosting for pending owner.
Other advantage of this patch:
1) The states of a rtmutex are reduced a half, easier to read the code.
2) the codes become shorter.
3) top waiter is not dequeued until it really take the lock:
they will retain FIFO when it is stolen.
Not advantage nor disadvantage
1) Even we may wakeup multiple waiters(any time when top waiter changed),
we hardly cause "thundering herd",
the number of wokenup task is likely 1 or very little.
2) two APIs are changed.
rt_mutex_owner() will not return pending owner, it will return NULL when
the top waiter is going to take the lock.
rt_mutex_next_owner() always return the top waiter.
will not return NULL if we have waiters
because the top waiter is not dequeued.
I have fixed the code that use these APIs.
need updated after this patch is accepted
1) Document/*
2) the testcase scripts/rt-tester/t4-l2-pi-deboost.tst
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
LKML-Reference: <4D3012D5.4060709@cn.fujitsu.com>
Reviewed-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-01-14 02:09:41 -07:00
|
|
|
#define RT_MUTEX_HAS_WAITERS 1UL
|
2006-06-27 02:54:53 -07:00
|
|
|
|
|
|
|
static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
|
|
|
|
{
|
2016-11-30 14:04:42 -07:00
|
|
|
unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
|
|
|
|
|
2016-11-30 14:04:44 -07:00
|
|
|
return (struct task_struct *) (owner & ~RT_MUTEX_HAS_WAITERS);
|
2006-06-27 02:54:53 -07:00
|
|
|
}
|
|
|
|
|
2014-05-21 20:25:47 -07:00
|
|
|
/*
|
|
|
|
* Constants for rt mutex functions which have a selectable deadlock
|
|
|
|
* detection.
|
|
|
|
*
|
|
|
|
* RT_MUTEX_MIN_CHAINWALK: Stops the lock chain walk when there are
|
|
|
|
* no further PI adjustments to be made.
|
|
|
|
*
|
|
|
|
* RT_MUTEX_FULL_CHAINWALK: Invoke deadlock detection with a full
|
|
|
|
* walk of the lock chain.
|
|
|
|
*/
|
|
|
|
enum rtmutex_chainwalk {
|
|
|
|
RT_MUTEX_MIN_CHAINWALK,
|
|
|
|
RT_MUTEX_FULL_CHAINWALK,
|
|
|
|
};
|
|
|
|
|
2006-06-27 02:54:58 -07:00
|
|
|
/*
|
|
|
|
* PI-futex support (proxy locking functions, etc.):
|
|
|
|
*/
|
|
|
|
extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
|
|
|
|
extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
|
|
|
|
struct task_struct *proxy_owner);
|
|
|
|
extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
|
|
|
|
struct task_struct *proxy_owner);
|
2017-03-22 03:35:56 -07:00
|
|
|
extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
|
2017-03-22 03:36:00 -07:00
|
|
|
extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
|
|
|
|
struct rt_mutex_waiter *waiter,
|
|
|
|
struct task_struct *task);
|
2009-04-03 13:40:12 -07:00
|
|
|
extern int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
|
|
|
|
struct rt_mutex_waiter *waiter,
|
2014-05-21 20:25:50 -07:00
|
|
|
struct task_struct *task);
|
2017-03-22 03:35:57 -07:00
|
|
|
extern int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
|
|
|
|
struct hrtimer_sleeper *to,
|
|
|
|
struct rt_mutex_waiter *waiter);
|
|
|
|
extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
|
|
|
|
struct rt_mutex_waiter *waiter);
|
2017-03-22 03:35:51 -07:00
|
|
|
|
|
|
|
extern int rt_mutex_futex_trylock(struct rt_mutex *l);
|
|
|
|
|
|
|
|
extern void rt_mutex_futex_unlock(struct rt_mutex *lock);
|
|
|
|
extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
|
|
|
|
struct wake_q_head *wqh);
|
|
|
|
|
2017-03-23 07:56:10 -07:00
|
|
|
extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
|
2007-07-15 23:41:20 -07:00
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_RT_MUTEXES
|
|
|
|
# include "rtmutex-debug.h"
|
|
|
|
#else
|
|
|
|
# include "rtmutex.h"
|
|
|
|
#endif
|
|
|
|
|
2006-06-27 02:54:53 -07:00
|
|
|
#endif
|