summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArjan van de Ven <arjan@linux.intel.com>2008-09-01 15:47:08 -0700
committerArjan van de Ven <arjan@linux.intel.com>2008-09-05 21:35:27 -0700
commit654c8e0b1c623b156c5b92f28d914ab38c9c2c90 (patch)
treeb3b1f8cfa8f3b12170f0d8b8770857182a2f0309
parent799b64de256ea68fbb5db63bb55f61c305870643 (diff)
downloadlinux-654c8e0b1c623b156c5b92f28d914ab38c9c2c90.tar.gz
linux-654c8e0b1c623b156c5b92f28d914ab38c9c2c90.tar.xz
hrtimer: turn hrtimers into range timers
this patch turns hrtimers into range timers; they have 2 expire points 1) the soft expire point 2) the hard expire point the kernel will do it's regular best effort attempt to get the timer run at the hard expire point. However, if some other time fires after the soft expire point, the kernel now has the freedom to fire this timer at this point, and thus grouping the events and preventing a power-expensive wakeup in the future. Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
-rw-r--r--include/linux/hrtimer.h31
-rw-r--r--kernel/hrtimer.c56
2 files changed, 82 insertions, 5 deletions
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 485a634fd6e2..28259c336679 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -112,6 +112,7 @@ enum hrtimer_cb_mode {
struct hrtimer {
struct rb_node node;
ktime_t _expires;
+ ktime_t _softexpires;
enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base;
unsigned long state;
@@ -220,20 +221,37 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
{
timer->_expires = time;
+ timer->_softexpires = time;
}
+
+static inline void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time, ktime_t delta)
+{
+ timer->_softexpires = time;
+ timer->_expires = ktime_add_safe(time, delta);
+}
+
+static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, unsigned long delta)
+{
+ timer->_softexpires = time;
+ timer->_expires = ktime_add_safe(time, ns_to_ktime(delta));
+}
+
static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64)
{
timer->_expires.tv64 = tv64;
+ timer->_softexpires.tv64 = tv64;
}
static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time)
{
timer->_expires = ktime_add_safe(timer->_expires, time);
+ timer->_softexpires = ktime_add_safe(timer->_softexpires, time);
}
static inline void hrtimer_add_expires_ns(struct hrtimer *timer, unsigned long ns)
{
timer->_expires = ktime_add_ns(timer->_expires, ns);
+ timer->_softexpires = ktime_add_ns(timer->_softexpires, ns);
}
static inline ktime_t hrtimer_get_expires(const struct hrtimer *timer)
@@ -241,10 +259,19 @@ static inline ktime_t hrtimer_get_expires(const struct hrtimer *timer)
return timer->_expires;
}
+static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer)
+{
+ return timer->_softexpires;
+}
+
static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer)
{
return timer->_expires.tv64;
}
+static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer)
+{
+ return timer->_softexpires.tv64;
+}
static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
{
@@ -334,7 +361,7 @@ static inline int hrtimer_start_expires(struct hrtimer *timer,
static inline int hrtimer_restart(struct hrtimer *timer)
{
- return hrtimer_start(timer, timer->_expires, HRTIMER_MODE_ABS);
+ return hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
/* Query timers: */
@@ -391,6 +418,8 @@ extern long hrtimer_nanosleep_restart(struct restart_block *restart_block);
extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
struct task_struct *tsk);
+extern int schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
+ const enum hrtimer_mode mode);
extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
/* Soft interrupt function to run the hrtimer queues: */
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index ae307feec74c..01483004183d 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1309,7 +1309,20 @@ void hrtimer_interrupt(struct clock_event_device *dev)
timer = rb_entry(node, struct hrtimer, node);
- if (basenow.tv64 < hrtimer_get_expires_tv64(timer)) {
+ /*
+ * The immediate goal for using the softexpires is
+ * minimizing wakeups, not running timers at the
+ * earliest interrupt after their soft expiration.
+ * This allows us to avoid using a Priority Search
+ * Tree, which can answer a stabbing querry for
+ * overlapping intervals and instead use the simple
+ * BST we already have.
+ * We don't add extra wakeups by delaying timers that
+ * are right-of a not yet expired timer, because that
+ * timer will have to trigger a wakeup anyway.
+ */
+
+ if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) {
ktime_t expires;
expires = ktime_sub(hrtimer_get_expires(timer),
@@ -1681,14 +1694,20 @@ void __init hrtimers_init(void)
}
/**
- * schedule_hrtimeout - sleep until timeout
+ * schedule_hrtimeout_range - sleep until timeout
* @expires: timeout value (ktime_t)
+ * @delta: slack in expires timeout (ktime_t)
* @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
*
* Make the current task sleep until the given expiry time has
* elapsed. The routine will return immediately unless
* the current task state has been set (see set_current_state()).
*
+ * The @delta argument gives the kernel the freedom to schedule the
+ * actual wakeup to a time that is both power and performance friendly.
+ * The kernel give the normal best effort behavior for "@expires+@delta",
+ * but may decide to fire the timer earlier, but no earlier than @expires.
+ *
* You can set the task state as follows -
*
* %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
@@ -1702,7 +1721,7 @@ void __init hrtimers_init(void)
*
* Returns 0 when the timer has expired otherwise -EINTR
*/
-int __sched schedule_hrtimeout(ktime_t *expires,
+int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
const enum hrtimer_mode mode)
{
struct hrtimer_sleeper t;
@@ -1726,7 +1745,7 @@ int __sched schedule_hrtimeout(ktime_t *expires,
}
hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, mode);
- hrtimer_set_expires(&t.timer, *expires);
+ hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
hrtimer_init_sleeper(&t, current);
@@ -1744,4 +1763,33 @@ int __sched schedule_hrtimeout(ktime_t *expires,
return !t.task ? 0 : -EINTR;
}
+EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
+
+/**
+ * schedule_hrtimeout - sleep until timeout
+ * @expires: timeout value (ktime_t)
+ * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
+ *
+ * Make the current task sleep until the given expiry time has
+ * elapsed. The routine will return immediately unless
+ * the current task state has been set (see set_current_state()).
+ *
+ * You can set the task state as follows -
+ *
+ * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
+ * pass before the routine returns.
+ *
+ * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
+ * delivered to the current task.
+ *
+ * The current task state is guaranteed to be TASK_RUNNING when this
+ * routine returns.
+ *
+ * Returns 0 when the timer has expired otherwise -EINTR
+ */
+int __sched schedule_hrtimeout(ktime_t *expires,
+ const enum hrtimer_mode mode)
+{
+ return schedule_hrtimeout_range(expires, 0, mode);
+}
EXPORT_SYMBOL_GPL(schedule_hrtimeout);