blob: 34a28227068dcba6cba30feb13e15aeff9f69b08 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __LINUX_MEMORY_HOTPLUG_H
3#define __LINUX_MEMORY_HOTPLUG_H
4
5#include <linux/mmzone.h>
6#include <linux/spinlock.h>
7#include <linux/notifier.h>
8#include <linux/bug.h>
9
10struct page;
11struct zone;
12struct pglist_data;
13struct mem_section;
14struct memory_block;
15struct resource;
16struct vmem_altmap;
17
18#ifdef CONFIG_MEMORY_HOTPLUG
19/*
20 * Return page for the valid pfn only if the page is online. All pfn
21 * walkers which rely on the fully initialized page->flags and others
22 * should use this rather than pfn_valid && pfn_to_page
23 */
24#define pfn_to_online_page(pfn) \
25({ \
26 struct page *___page = NULL; \
27 unsigned long ___nr = pfn_to_section_nr(pfn); \
28 \
29 if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr))\
30 ___page = pfn_to_page(pfn); \
31 ___page; \
32})
33
34/*
35 * Types for free bootmem stored in page->lru.next. These have to be in
36 * some random range in unsigned long space for debugging purposes.
37 */
38enum {
39 MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
40 SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
41 MIX_SECTION_INFO,
42 NODE_INFO,
43 MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
44};
45
46/* Types for control the zone type of onlined and offlined memory */
47enum {
48 MMOP_OFFLINE = -1,
49 MMOP_ONLINE_KEEP,
50 MMOP_ONLINE_KERNEL,
51 MMOP_ONLINE_MOVABLE,
52};
53
54/*
55 * Zone resizing functions
56 *
57 * Note: any attempt to resize a zone should has pgdat_resize_lock()
58 * zone_span_writelock() both held. This ensure the size of a zone
59 * can't be changed while pgdat_resize_lock() held.
60 */
61static inline unsigned zone_span_seqbegin(struct zone *zone)
62{
63 return read_seqbegin(&zone->span_seqlock);
64}
65static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
66{
67 return read_seqretry(&zone->span_seqlock, iv);
68}
69static inline void zone_span_writelock(struct zone *zone)
70{
71 write_seqlock(&zone->span_seqlock);
72}
73static inline void zone_span_writeunlock(struct zone *zone)
74{
75 write_sequnlock(&zone->span_seqlock);
76}
77static inline void zone_seqlock_init(struct zone *zone)
78{
79 seqlock_init(&zone->span_seqlock);
80}
81extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
82extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
83extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
84/* VM interface that may be used by firmware interface */
85extern int online_pages(unsigned long, unsigned long, int);
86extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
87 unsigned long *valid_start, unsigned long *valid_end);
88extern void __offline_isolated_pages(unsigned long, unsigned long);
89
90typedef void (*online_page_callback_t)(struct page *page);
91
92extern int set_online_page_callback(online_page_callback_t callback);
93extern int restore_online_page_callback(online_page_callback_t callback);
94
95extern void __online_page_set_limits(struct page *page);
96extern void __online_page_increment_counters(struct page *page);
97extern void __online_page_free(struct page *page);
98
99extern int try_online_node(int nid);
100
101extern bool memhp_auto_online;
102/* If movable_node boot option specified */
103extern bool movable_node_enabled;
104static inline bool movable_node_is_enabled(void)
105{
106 return movable_node_enabled;
107}
108
109#ifdef CONFIG_MEMORY_HOTREMOVE
110extern int arch_remove_memory(u64 start, u64 size,
111 struct vmem_altmap *altmap);
112extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
113 unsigned long nr_pages, struct vmem_altmap *altmap);
114#endif /* CONFIG_MEMORY_HOTREMOVE */
115
116/* reasonably generic interface to expand the physical pages */
117extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
118 struct vmem_altmap *altmap, bool want_memblock);
119
120#ifndef CONFIG_ARCH_HAS_ADD_PAGES
121static inline int add_pages(int nid, unsigned long start_pfn,
122 unsigned long nr_pages, struct vmem_altmap *altmap,
123 bool want_memblock)
124{
125 return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
126}
127#else /* ARCH_HAS_ADD_PAGES */
128int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
129 struct vmem_altmap *altmap, bool want_memblock);
130#endif /* ARCH_HAS_ADD_PAGES */
131
132#ifdef CONFIG_NUMA
133extern int memory_add_physaddr_to_nid(u64 start);
134#else
135static inline int memory_add_physaddr_to_nid(u64 start)
136{
137 return 0;
138}
139#endif
140
141#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
142/*
143 * For supporting node-hotadd, we have to allocate a new pgdat.
144 *
145 * If an arch has generic style NODE_DATA(),
146 * node_data[nid] = kzalloc() works well. But it depends on the architecture.
147 *
148 * In general, generic_alloc_nodedata() is used.
149 * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
150 *
151 */
152extern pg_data_t *arch_alloc_nodedata(int nid);
153extern void arch_free_nodedata(pg_data_t *pgdat);
154extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
155
156#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
157
158#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
159#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat)
160
161#ifdef CONFIG_NUMA
162/*
163 * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
164 * XXX: kmalloc_node() can't work well to get new node's memory at this time.
165 * Because, pgdat for the new node is not allocated/initialized yet itself.
166 * To use new node's memory, more consideration will be necessary.
167 */
168#define generic_alloc_nodedata(nid) \
169({ \
170 kzalloc(sizeof(pg_data_t), GFP_KERNEL); \
171})
172/*
173 * This definition is just for error path in node hotadd.
174 * For node hotremove, we have to replace this.
175 */
176#define generic_free_nodedata(pgdat) kfree(pgdat)
177
178extern pg_data_t *node_data[];
179static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
180{
181 node_data[nid] = pgdat;
182}
183
184#else /* !CONFIG_NUMA */
185
186/* never called */
187static inline pg_data_t *generic_alloc_nodedata(int nid)
188{
189 BUG();
190 return NULL;
191}
192static inline void generic_free_nodedata(pg_data_t *pgdat)
193{
194}
195static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
196{
197}
198#endif /* CONFIG_NUMA */
199#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
200
201#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
202extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
203#else
204static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
205{
206}
207#endif
208extern void put_page_bootmem(struct page *page);
209extern void get_page_bootmem(unsigned long ingo, struct page *page,
210 unsigned long type);
211
212void get_online_mems(void);
213void put_online_mems(void);
214
215void mem_hotplug_begin(void);
216void mem_hotplug_done(void);
217
218extern void set_zone_contiguous(struct zone *zone);
219extern void clear_zone_contiguous(struct zone *zone);
220
221#else /* ! CONFIG_MEMORY_HOTPLUG */
222#define pfn_to_online_page(pfn) \
223({ \
224 struct page *___page = NULL; \
225 if (pfn_valid(pfn)) \
226 ___page = pfn_to_page(pfn); \
227 ___page; \
228 })
229
230static inline unsigned zone_span_seqbegin(struct zone *zone)
231{
232 return 0;
233}
234static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
235{
236 return 0;
237}
238static inline void zone_span_writelock(struct zone *zone) {}
239static inline void zone_span_writeunlock(struct zone *zone) {}
240static inline void zone_seqlock_init(struct zone *zone) {}
241
242static inline int mhp_notimplemented(const char *func)
243{
244 printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func);
245 dump_stack();
246 return -ENOSYS;
247}
248
249static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
250{
251}
252
253static inline int try_online_node(int nid)
254{
255 return 0;
256}
257
258static inline void get_online_mems(void) {}
259static inline void put_online_mems(void) {}
260
261static inline void mem_hotplug_begin(void) {}
262static inline void mem_hotplug_done(void) {}
263
264static inline bool movable_node_is_enabled(void)
265{
266 return false;
267}
268#endif /* ! CONFIG_MEMORY_HOTPLUG */
269
270#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
271/*
272 * pgdat resizing functions
273 */
274static inline
275void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
276{
277 spin_lock_irqsave(&pgdat->node_size_lock, *flags);
278}
279static inline
280void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
281{
282 spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
283}
284static inline
285void pgdat_resize_init(struct pglist_data *pgdat)
286{
287 spin_lock_init(&pgdat->node_size_lock);
288}
289#else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
290/*
291 * Stub functions for when hotplug is off
292 */
293static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
294static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
295static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
296#endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
297
298#ifdef CONFIG_MEMORY_HOTREMOVE
299
300extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
301extern void try_offline_node(int nid);
302extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
303extern void remove_memory(int nid, u64 start, u64 size);
304
305#else
306static inline bool is_mem_section_removable(unsigned long pfn,
307 unsigned long nr_pages)
308{
309 return false;
310}
311
312static inline void try_offline_node(int nid) {}
313
314static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
315{
316 return -EINVAL;
317}
318
319static inline void remove_memory(int nid, u64 start, u64 size) {}
320#endif /* CONFIG_MEMORY_HOTREMOVE */
321
322extern void __ref free_area_init_core_hotplug(int nid);
323extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
324 void *arg, int (*func)(struct memory_block *, void *));
325extern int add_memory(int nid, u64 start, u64 size);
326extern int add_memory_resource(int nid, struct resource *resource, bool online);
327extern int arch_add_memory(int nid, u64 start, u64 size,
328 struct vmem_altmap *altmap, bool want_memblock);
329extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
330 unsigned long nr_pages, struct vmem_altmap *altmap);
331extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
332extern bool is_memblock_offlined(struct memory_block *mem);
333extern void remove_memory(int nid, u64 start, u64 size);
334extern int sparse_add_one_section(struct pglist_data *pgdat,
335 unsigned long start_pfn, struct vmem_altmap *altmap);
336extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
337 unsigned long map_offset, struct vmem_altmap *altmap);
338extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
339 unsigned long pnum);
340extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages,
341 int online_type);
342extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
343 unsigned long nr_pages);
344#endif /* __LINUX_MEMORY_HOTPLUG_H */