From: Dave Hansen <dave.hansen(a)linux.intel.com>
commit 76af6a054da4055305ddb28c5eb151b9ee4f74f9 upstream
Once upon a time, the node demotion updates were driven solely by memory
hotplug events. But now, there are handlers for both CPU and memory
hotplug.
However, the #ifdef around the code checks only memory hotplug. A
system that has HOTPLUG_CPU=y but MEMORY_HOTPLUG=n would miss CPU
hotplug events.
Update the #ifdef around the common code. Add memory and CPU-specific
function warnings when their Kconfig option is off.
[arnd(a)arndb.de: rework hotplug_memory_notifier() stub]
Link:
https://lkml.kernel.org/r/20211013144029.2154629-1-arnd@kernel.org
Link:
https://lkml.kernel.org/r/20210924161255.E5FE8F7E@davehans-spike.ostc.intel…
Fixes: 884a6e5d1f93 ("mm/migrate: update node demotion order on hotplug
events")
Signed-off-by: Dave Hansen <dave.hansen(a)linux.intel.com>
Signed-off-by: Arnd Bergmann <arnd(a)arndb.de>
Cc: "Huang, Ying" <ying.huang(a)intel.com>
Cc: Michal Hocko <mhocko(a)suse.com>
Cc: Wei Xu <weixugc(a)google.com>
Cc: Oscar Salvador <osalvador(a)suse.de>
Cc: David Rientjes <rientjes(a)google.com>
Cc: Dan Williams <dan.j.williams(a)intel.com>
Cc: David Hildenbrand <david(a)redhat.com>
Cc: Greg Thelen <gthelen(a)google.com>
Cc: Yang Shi <yang.shi(a)linux.alibaba.com>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds(a)linux-foundation.org>
Signed-off-by: Baolin Wang <baolin.wang(a)linux.alibaba.com>
---
include/linux/memory.h | 5 ++++-
mm/migrate.c | 42 +++++++++++++++++++++---------------------
mm/page_ext.c | 4 +---
mm/slab.c | 4 ++--
4 files changed, 28 insertions(+), 27 deletions(-)
diff --git a/include/linux/memory.h b/include/linux/memory.h
index a6ddefc..fc6e34d 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -133,7 +133,10 @@ extern struct memory_block *find_memory_block_hinted(struct
mem_section *,
#define register_hotmemory_notifier(nb) register_memory_notifier(nb)
#define unregister_hotmemory_notifier(nb) unregister_memory_notifier(nb)
#else
-#define hotplug_memory_notifier(fn, pri) ({ 0; })
+static inline int hotplug_memory_notifier(notifier_fn_t fn, int pri)
+{
+ return 0;
+}
/* These aren't inline functions due to a GCC bug. */
#define register_hotmemory_notifier(nb) ({ (void)(nb); 0; })
#define unregister_hotmemory_notifier(nb) ({ (void)(nb); })
diff --git a/mm/migrate.c b/mm/migrate.c
index b4bb00a3..f0466e5 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -3111,7 +3111,7 @@ int migrate_vma(const struct migrate_vma_ops *ops,
EXPORT_SYMBOL(migrate_vma);
#endif /* defined(MIGRATE_VMA_HELPER) */
-#if defined(CONFIG_MEMORY_HOTPLUG)
+#if defined(CONFIG_HOTPLUG_CPU)
/* Disable reclaim-based migration. */
static void __disable_all_migrate_targets(void)
{
@@ -3254,25 +3254,6 @@ static void set_migration_target_nodes(void)
}
/*
- * React to hotplug events that might affect the migration targets
- * like events that online or offline NUMA nodes.
- *
- * The ordering is also currently dependent on which nodes have
- * CPUs. That means we need CPU on/offline notification too.
- */
-static int migration_online_cpu(unsigned int cpu)
-{
- set_migration_target_nodes();
- return 0;
-}
-
-static int migration_offline_cpu(unsigned int cpu)
-{
- set_migration_target_nodes();
- return 0;
-}
-
-/*
* This leaves migrate-on-reclaim transiently disabled between
* the MEM_GOING_OFFLINE and MEM_OFFLINE events. This runs
* whether reclaim-based migration is enabled or not, which
@@ -3329,6 +3310,25 @@ static int __meminit migrate_on_reclaim_callback(struct
notifier_block *self,
return notifier_from_errno(0);
}
+/*
+ * React to hotplug events that might affect the migration targets
+ * like events that online or offline NUMA nodes.
+ *
+ * The ordering is also currently dependent on which nodes have
+ * CPUs. That means we need CPU on/offline notification too.
+ */
+static int migration_online_cpu(unsigned int cpu)
+{
+ set_migration_target_nodes();
+ return 0;
+}
+
+static int migration_offline_cpu(unsigned int cpu)
+{
+ set_migration_target_nodes();
+ return 0;
+}
+
static int __init migrate_on_reclaim_init(void)
{
int ret;
@@ -3348,4 +3348,4 @@ static int __init migrate_on_reclaim_init(void)
return 0;
}
late_initcall(migrate_on_reclaim_init);
-#endif /* CONFIG_MEMORY_HOTPLUG */
+#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/mm/page_ext.c b/mm/page_ext.c
index aad1201..48addc0 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -261,7 +261,7 @@ static int __meminit init_section_page_ext(unsigned long pfn, int
nid)
total_usage += table_size;
return 0;
}
-#ifdef CONFIG_MEMORY_HOTPLUG
+
static void free_page_ext(void *addr)
{
if (is_vmalloc_addr(addr)) {
@@ -369,8 +369,6 @@ static int __meminit page_ext_callback(struct notifier_block *self,
return notifier_from_errno(ret);
}
-#endif
-
void __init page_ext_init(void)
{
unsigned long pfn;
diff --git a/mm/slab.c b/mm/slab.c
index 46f21e7..c12f3ce 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1129,7 +1129,7 @@ static int slab_offline_cpu(unsigned int cpu)
return 0;
}
-#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
+#if defined(CONFIG_NUMA)
/*
* Drains freelist for a node on each slab cache, used for memory hot-remove.
* Returns -EBUSY if all objects cannot be drained so that the node is not
@@ -1191,7 +1191,7 @@ static int __meminit slab_memory_callback(struct notifier_block
*self,
out:
return notifier_from_errno(ret);
}
-#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
+#endif /* CONFIG_NUMA */
/*
* swap the static kmem_cache_node with kmalloced memory
--
1.8.3.1