It got my curiosity aroused and I wanted to see it in action.
I downloaded the patch from here and tried to backport it to 2.6.34.7 (downloaded from here)
My maiden attempt to backport the patch succeeded and I compiled and installed the deb file
Here is the backport patch content
diff -urN linux-2.6.34.7/Documentation/kernel-parameters.txt linux/Documentation/kernel-parameters.txt
--- linux-2.6.34.7/Documentation/kernel-parameters.txt 2010-09-13 22:24:58.000000000 +0530
+++ linux/Documentation/kernel-parameters.txt 2010-11-17 22:49:10.082513013 +0530
@@ -1643,6 +1643,9 @@
noapic [SMP,APIC] Tells the kernel to not make use of any
IOAPICs that may be present in the system.
+ noautogroup Disable scheduler automatic task group creation.
+
+
nobats [PPC] Do not use BATs for mapping kernel lowmem
on "Classic" PPC cores.
diff -urN linux-2.6.34.7/drivers/char/tty_io.c linux/drivers/char/tty_io.c
--- linux-2.6.34.7/drivers/char/tty_io.c 2010-09-13 22:24:58.000000000 +0530
+++ linux/drivers/char/tty_io.c 2010-11-17 22:38:38.750591065 +0530
@@ -3049,6 +3049,7 @@
put_pid(tsk->signal->tty_old_pgrp);
tsk->signal->tty = tty_kref_get(tty);
tsk->signal->tty_old_pgrp = NULL;
+ sched_autogroup_create_attach(tsk);
}
static void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty)
diff -urN linux-2.6.34.7/include/linux/sched.h linux/include/linux/sched.h
--- linux-2.6.34.7/include/linux/sched.h 2010-09-13 22:24:58.000000000 +0530
+++ linux/include/linux/sched.h 2010-11-17 21:40:45.286509459 +0530
@@ -514,6 +514,8 @@
spinlock_t lock;
};
+struct autogroup;
+
/*
* NOTE! "signal_struct" does not have it's own
* locking, because a shared signal_struct always
@@ -580,6 +582,9 @@
struct tty_struct *tty; /* NULL if no tty */
+#ifdef CONFIG_SCHED_AUTOGROUP
+ struct autogroup *autogroup;
+#endif
/*
* Cumulative resource counters for dead threads in the group,
* and for reaped dead child processes forked by this group.
@@ -1904,6 +1909,21 @@
extern unsigned int sysctl_sched_compat_yield;
+#ifdef CONFIG_SCHED_AUTOGROUP
+extern unsigned int sysctl_sched_autogroup_enabled;
+
+extern void sched_autogroup_create_attach(struct task_struct *p);
+extern void sched_autogroup_detach(struct task_struct *p);
+extern void sched_autogroup_fork(struct signal_struct *sig);
+extern void sched_autogroup_exit(struct signal_struct *sig);
+#else
+static inline void sched_autogroup_create_attach(struct task_struct *p) { }
+static inline void sched_autogroup_detach(struct task_struct *p) { }
+static inline void sched_autogroup_fork(struct signal_struct *sig) { }
+static inline void sched_autogroup_exit(struct signal_struct *sig) { }
+#endif
+
+
#ifdef CONFIG_RT_MUTEXES
extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);
diff -urN linux-2.6.34.7/init/Kconfig linux/init/Kconfig
--- linux-2.6.34.7/init/Kconfig 2010-09-13 22:24:58.000000000 +0530
+++ linux/init/Kconfig 2010-11-17 22:48:16.986827765 +0530
@@ -614,6 +614,18 @@
endif # CGROUPS
+config SCHED_AUTOGROUP
+ bool "Automatic process group scheduling"
+ select CGROUPS
+ select CGROUP_SCHED
+ select FAIR_GROUP_SCHED
+ help
+ This option optimizes the scheduler for common desktop workloads by
+ automatically creating and populating task groups. This separation
+ of workloads isolates aggressive CPU burners (like build jobs) from
+ desktop applications. Task group autogeneration is currently based
+ upon task tty association.
+
config MM_OWNER
bool
diff -urN linux-2.6.34.7/kernel/fork.c linux/kernel/fork.c
--- linux-2.6.34.7/kernel/fork.c 2010-09-13 22:24:58.000000000 +0530
+++ linux/kernel/fork.c 2010-11-17 22:35:27.942763006 +0530
@@ -886,6 +886,8 @@
posix_cpu_timers_init_group(sig);
tty_audit_fork(sig);
+ sched_autogroup_fork(sig);
+
sig->oom_adj = current->signal->oom_adj;
diff -urN linux-2.6.34.7/kernel/sched_autogroup.c linux/kernel/sched_autogroup.c
--- linux-2.6.34.7/kernel/sched_autogroup.c 1970-01-01 05:30:00.000000000 +0530
+++ linux/kernel/sched_autogroup.c 2010-11-17 22:43:28.726509874 +0530
@@ -0,0 +1,140 @@
+#ifdef CONFIG_SCHED_AUTOGROUP
+
+unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
+
+struct autogroup {
+ struct kref kref;
+ struct task_group *tg;
+};
+
+static struct autogroup autogroup_default;
+
+static void autogroup_init(struct task_struct *init_task)
+{
+ autogroup_default.tg = &init_task_group;
+ kref_init(&autogroup_default.kref);
+ init_task->signal->autogroup = &autogroup_default;
+}
+
+static inline void autogroup_destroy(struct kref *kref)
+{
+ struct autogroup *ag = container_of(kref, struct autogroup, kref);
+ struct task_group *tg = ag->tg;
+
+ kfree(ag);
+ sched_destroy_group(tg);
+}
+
+static inline void autogroup_kref_put(struct autogroup *ag)
+{
+ kref_put(&ag->kref, autogroup_destroy);
+}
+
+static inline struct autogroup *autogroup_kref_get(struct autogroup *ag)
+{
+ kref_get(&ag->kref);
+ return ag;
+}
+
+static inline struct autogroup *autogroup_create(void)
+{
+ struct autogroup *ag = kmalloc(sizeof(*ag), GFP_KERNEL);
+
+ if (!ag)
+ goto out_fail;
+
+ ag->tg = sched_create_group(&init_task_group);
+ kref_init(&ag->kref);
+
+ if (!(IS_ERR(ag->tg)))
+ return ag;
+
+out_fail:
+ if (ag) {
+ kfree(ag);
+ WARN_ON(1);
+ } else
+ WARN_ON(1);
+
+ return autogroup_kref_get(&autogroup_default);
+}
+
+static inline struct task_group *
+autogroup_task_group(struct task_struct *p, struct task_group *tg)
+{
+ int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled);
+
+ enabled &= (tg == &root_task_group);
+ enabled &= (p->sched_class == &fair_sched_class);
+ enabled &= (!(p->flags & PF_EXITING));
+
+ if (enabled)
+ return p->signal->autogroup->tg;
+
+ return tg;
+}
+
+static void
+autogroup_move_group(struct task_struct *p, struct autogroup *ag)
+{
+ struct autogroup *prev;
+ struct task_struct *t;
+ struct rq *rq;
+ unsigned long flags;
+
+ rq = task_rq_lock(p, &flags);
+ prev = p->signal->autogroup;
+ if (prev == ag) {
+ task_rq_unlock(rq, &flags);
+ return;
+ }
+
+ p->signal->autogroup = autogroup_kref_get(ag);
+ __sched_move_task(p, rq);
+ task_rq_unlock(rq, &flags);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(t, &p->thread_group, thread_group) {
+ sched_move_task(t);
+ }
+ rcu_read_unlock();
+
+ autogroup_kref_put(prev);
+}
+
+void sched_autogroup_create_attach(struct task_struct *p)
+{
+ struct autogroup *ag = autogroup_create();
+
+ autogroup_move_group(p, ag);
+ /* drop extra refrence added by autogroup_create() */
+ autogroup_kref_put(ag);
+}
+EXPORT_SYMBOL(sched_autogroup_create_attach);
+
+/* currently has no users */
+void sched_autogroup_detach(struct task_struct *p)
+{
+ autogroup_move_group(p, &autogroup_default);
+}
+EXPORT_SYMBOL(sched_autogroup_detach);
+
+void sched_autogroup_fork(struct signal_struct *sig)
+{
+ sig->autogroup = autogroup_kref_get(current->signal->autogroup);
+}
+
+void sched_autogroup_exit(struct signal_struct *sig)
+{
+ autogroup_kref_put(sig->autogroup);
+}
+
+static int __init setup_autogroup(char *str)
+{
+ sysctl_sched_autogroup_enabled = 0;
+
+ return 1;
+}
+
+__setup("noautogroup", setup_autogroup);
+#endif
diff -urN linux-2.6.34.7/kernel/sched_autogroup.h linux/kernel/sched_autogroup.h
--- linux-2.6.34.7/kernel/sched_autogroup.h 1970-01-01 05:30:00.000000000 +0530
+++ linux/kernel/sched_autogroup.h 2010-11-17 22:39:46.575555847 +0530
@@ -0,0 +1,18 @@
+#ifdef CONFIG_SCHED_AUTOGROUP
+
+static void __sched_move_task(struct task_struct *tsk, struct rq *rq);
+
+static inline struct task_group *
+autogroup_task_group(struct task_struct *p, struct task_group *tg);
+
+#else /* !CONFIG_SCHED_AUTOGROUP */
+
+static inline void autogroup_init(struct task_struct *init_task) { }
+
+static inline struct task_group *
+autogroup_task_group(struct task_struct *p, struct task_group *tg)
+{
+ return tg;
+}
+
+#endif /* CONFIG_SCHED_AUTOGROUP */
diff -urN linux-2.6.34.7/kernel/sched.c linux/kernel/sched.c
--- linux-2.6.34.7/kernel/sched.c 2010-09-13 22:24:58.000000000 +0530
+++ linux/kernel/sched.c 2010-11-17 22:04:34.054508727 +0530
@@ -77,6 +77,7 @@
#include
#include "sched_cpupri.h"
+#include "sched_autogroup.h"
#define CREATE_TRACE_POINTS
#include
@@ -311,13 +312,20 @@
{
struct task_group *tg;
+
#ifdef CONFIG_CGROUP_SCHED
- tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
- struct task_group, css);
+ struct cgroup_subsys_state *css;
+
+ css = task_subsys_state(p, cpu_cgroup_subsys_id);
+
+ tg = container_of(css, struct task_group, css);
+
+ return autogroup_task_group(p, tg);
#else
tg = &init_task_group;
-#endif
+
return tg;
+#endif
}
/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
@@ -1935,6 +1943,7 @@
#include "sched_idletask.c"
#include "sched_fair.c"
#include "sched_rt.c"
+#include "sched_autogroup.c"
#ifdef CONFIG_SCHED_DEBUG
# include "sched_debug.c"
#endif
@@ -7738,7 +7747,7 @@
#ifdef CONFIG_CGROUP_SCHED
list_add(&init_task_group.list, &task_groups);
INIT_LIST_HEAD(&init_task_group.children);
-
+ autogroup_init(&init_task);
#endif /* CONFIG_CGROUP_SCHED */
#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
@@ -8257,15 +8266,11 @@
/* change task's runqueue when it moves between groups.
* The caller of this function should have put the task in its new group
* by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
- * reflect its new group.
+ * reflect its new group. Called with the runqueue lock held.
*/
-void sched_move_task(struct task_struct *tsk)
+void __sched_move_task(struct task_struct *tsk, struct rq *rq)
{
int on_rq, running;
- unsigned long flags;
- struct rq *rq;
-
- rq = task_rq_lock(tsk, &flags);
update_rq_clock(rq);
@@ -8288,6 +8293,15 @@
tsk->sched_class->set_curr_task(rq);
if (on_rq)
enqueue_task(rq, tsk, 0, false);
+}
+
+void sched_move_task(struct task_struct *tsk)
+{
+ struct rq *rq;
+ unsigned long flags;
+
+ rq = task_rq_lock(tsk, &flags);
+ __sched_move_task(tsk, rq);
task_rq_unlock(rq, &flags);
}
diff -urN linux-2.6.34.7/kernel/sysctl.c linux/kernel/sysctl.c
--- linux-2.6.34.7/kernel/sysctl.c 2010-09-13 22:24:58.000000000 +0530
+++ linux/kernel/sysctl.c 2010-11-17 22:45:38.558806407 +0530
@@ -354,6 +354,17 @@
.mode = 0644,
.proc_handler = proc_dointvec,
},
+#ifdef CONFIG_SCHED_AUTOGROUP
+ {
+ .procname = "sched_autogroup_enabled",
+ .data = &sysctl_sched_autogroup_enabled,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+#endif
#ifdef CONFIG_PROVE_LOCKING
{
.procname = "prove_locking",