aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/debug.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2025-05-28 10:08:49 +0200
committerIngo Molnar <mingo@kernel.org>2025-06-13 08:47:16 +0200
commit29dd6f8cd285360cc5a3e1dc696a960541020705 (patch)
tree2168ab1b6a7d2ef7f945596cdf6c291e8a349686 /kernel/sched/debug.c
parentsched: Clean up and standardize #if/#else/#endif markers in sched/deadline.c (diff)
downloadlinux-29dd6f8cd285360cc5a3e1dc696a960541020705.tar.gz
linux-29dd6f8cd285360cc5a3e1dc696a960541020705.zip
sched: Clean up and standardize #if/#else/#endif markers in sched/debug.c
- Use the standard #ifdef marker format for larger blocks, where appropriate: #if CONFIG_FOO ... #else /* !CONFIG_FOO: */ ... #endif /* !CONFIG_FOO */ - Fix whitespace noise and other inconsistencies. Signed-off-by: Ingo Molnar <mingo@kernel.org> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: Dietmar Eggemann <dietmar.eggemann@arm.com> Cc: Juri Lelli <juri.lelli@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Cc: Shrikanth Hegde <sshegde@linux.ibm.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Valentin Schneider <vschneid@redhat.com> Cc: Vincent Guittot <vincent.guittot@linaro.org> Link: https://lore.kernel.org/r/20250528080924.2273858-9-mingo@kernel.org
Diffstat (limited to 'kernel/sched/debug.c')
-rw-r--r--kernel/sched/debug.c36
1 files changed, 18 insertions, 18 deletions
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index b384124d89d3..748709c03214 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -93,10 +93,10 @@ static void sched_feat_enable(int i)
{
static_key_enable_cpuslocked(&sched_feat_keys[i]);
}
-#else
+#else /* !CONFIG_JUMP_LABEL: */
static void sched_feat_disable(int i) { };
static void sched_feat_enable(int i) { };
-#endif /* CONFIG_JUMP_LABEL */
+#endif /* !CONFIG_JUMP_LABEL */
static int sched_feat_set(char *cmp)
{
@@ -217,7 +217,7 @@ static const struct file_operations sched_scaling_fops = {
.release = single_release,
};
-#endif /* SMP */
+#endif /* CONFIG_SMP */
#ifdef CONFIG_PREEMPT_DYNAMIC
@@ -314,9 +314,9 @@ static ssize_t sched_verbose_write(struct file *filp, const char __user *ubuf,
return result;
}
-#else
-#define sched_verbose_write debugfs_write_file_bool
-#endif
+#else /* !CONFIG_SMP: */
+# define sched_verbose_write debugfs_write_file_bool
+#endif /* !CONFIG_SMP */
static const struct file_operations sched_verbose_fops = {
.read = debugfs_read_file_bool,
@@ -523,7 +523,7 @@ static __init int sched_init_debug(void)
sched_domains_mutex_lock();
update_sched_domain_debugfs();
sched_domains_mutex_unlock();
-#endif
+#endif /* CONFIG_SMP */
#ifdef CONFIG_NUMA_BALANCING
numa = debugfs_create_dir("numa_balancing", debugfs_sched);
@@ -533,7 +533,7 @@ static __init int sched_init_debug(void)
debugfs_create_u32("scan_period_max_ms", 0644, numa, &sysctl_numa_balancing_scan_period_max);
debugfs_create_u32("scan_size_mb", 0644, numa, &sysctl_numa_balancing_scan_size);
debugfs_create_u32("hot_threshold_ms", 0644, numa, &sysctl_numa_balancing_hot_threshold);
-#endif
+#endif /* CONFIG_NUMA_BALANCING */
debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
@@ -697,14 +697,14 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
P(se->avg.load_avg);
P(se->avg.util_avg);
P(se->avg.runnable_avg);
-#endif
+#endif /* CONFIG_SMP */
#undef PN_SCHEDSTAT
#undef PN
#undef P_SCHEDSTAT
#undef P
}
-#endif
+#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_CGROUP_SCHED
static DEFINE_SPINLOCK(sched_debug_lock);
@@ -877,8 +877,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
cfs_rq->tg_load_avg_contrib);
SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
atomic_long_read(&cfs_rq->tg->load_avg));
-#endif
-#endif
+#endif /* CONFIG_FAIR_GROUP_SCHED */
+#endif /* CONFIG_SMP */
#ifdef CONFIG_CFS_BANDWIDTH
SEQ_printf(m, " .%-30s: %d\n", "throttled",
cfs_rq->throttled);
@@ -954,9 +954,9 @@ static void print_cpu(struct seq_file *m, int cpu)
SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
cpu, freq / 1000, (freq % 1000));
}
-#else
+#else /* !CONFIG_X86: */
SEQ_printf(m, "cpu#%d\n", cpu);
-#endif
+#endif /* !CONFIG_X86 */
#define P(x) \
do { \
@@ -984,7 +984,7 @@ do { \
P64(avg_idle);
P64(max_idle_balance_cost);
#undef P64
-#endif
+#endif /* CONFIG_SMP */
#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
if (schedstat_enabled()) {
@@ -1166,7 +1166,7 @@ static void sched_show_numa(struct task_struct *p, struct seq_file *m)
SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
task_node(p), task_numa_group_id(p));
show_numa_stats(p, m);
-#endif
+#endif /* CONFIG_NUMA_BALANCING */
}
void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
@@ -1263,13 +1263,13 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
P(se.avg.util_avg);
P(se.avg.last_update_time);
PM(se.avg.util_est, ~UTIL_AVG_UNCHANGED);
-#endif
+#endif /* CONFIG_SMP */
#ifdef CONFIG_UCLAMP_TASK
__PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
__PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
__PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
__PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
-#endif
+#endif /* CONFIG_UCLAMP_TASK */
P(policy);
P(prio);
if (task_has_dl_policy(p)) {