Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _LINUX_VMSTAT_H |
| 3 | #define _LINUX_VMSTAT_H |
| 4 | |
| 5 | #include <linux/types.h> |
| 6 | #include <linux/percpu.h> |
| 7 | #include <linux/mmzone.h> |
| 8 | #include <linux/vm_event_item.h> |
| 9 | #include <linux/atomic.h> |
| 10 | #include <linux/static_key.h> |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 11 | #include <linux/mmdebug.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 12 | |
| 13 | extern int sysctl_stat_interval; |
| 14 | |
| 15 | #ifdef CONFIG_NUMA |
| 16 | #define ENABLE_NUMA_STAT 1 |
| 17 | #define DISABLE_NUMA_STAT 0 |
| 18 | extern int sysctl_vm_numa_stat; |
| 19 | DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 20 | int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write, |
| 21 | void *buffer, size_t *length, loff_t *ppos); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 22 | #endif |
| 23 | |
| 24 | struct reclaim_stat { |
| 25 | unsigned nr_dirty; |
| 26 | unsigned nr_unqueued_dirty; |
| 27 | unsigned nr_congested; |
| 28 | unsigned nr_writeback; |
| 29 | unsigned nr_immediate; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 30 | unsigned nr_pageout; |
| 31 | unsigned nr_activate[ANON_AND_FILE]; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 32 | unsigned nr_ref_keep; |
| 33 | unsigned nr_unmap_fail; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 34 | unsigned nr_lazyfree_fail; |
| 35 | }; |
| 36 | |
| 37 | enum writeback_stat_item { |
| 38 | NR_DIRTY_THRESHOLD, |
| 39 | NR_DIRTY_BG_THRESHOLD, |
| 40 | NR_VM_WRITEBACK_STAT_ITEMS, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 41 | }; |
| 42 | |
| 43 | #ifdef CONFIG_VM_EVENT_COUNTERS |
| 44 | /* |
| 45 | * Light weight per cpu counter implementation. |
| 46 | * |
| 47 | * Counters should only be incremented and no critical kernel component |
| 48 | * should rely on the counter values. |
| 49 | * |
| 50 | * Counters are handled completely inline. On many platforms the code |
| 51 | * generated will simply be the increment of a global address. |
| 52 | */ |
| 53 | |
| 54 | struct vm_event_state { |
| 55 | unsigned long event[NR_VM_EVENT_ITEMS]; |
| 56 | }; |
| 57 | |
| 58 | DECLARE_PER_CPU(struct vm_event_state, vm_event_states); |
| 59 | |
| 60 | /* |
| 61 | * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the |
| 62 | * local_irq_disable overhead. |
| 63 | */ |
| 64 | static inline void __count_vm_event(enum vm_event_item item) |
| 65 | { |
| 66 | raw_cpu_inc(vm_event_states.event[item]); |
| 67 | } |
| 68 | |
| 69 | static inline void count_vm_event(enum vm_event_item item) |
| 70 | { |
| 71 | this_cpu_inc(vm_event_states.event[item]); |
| 72 | } |
| 73 | |
| 74 | static inline void __count_vm_events(enum vm_event_item item, long delta) |
| 75 | { |
| 76 | raw_cpu_add(vm_event_states.event[item], delta); |
| 77 | } |
| 78 | |
| 79 | static inline void count_vm_events(enum vm_event_item item, long delta) |
| 80 | { |
| 81 | this_cpu_add(vm_event_states.event[item], delta); |
| 82 | } |
| 83 | |
| 84 | extern void all_vm_events(unsigned long *); |
| 85 | |
| 86 | extern void vm_events_fold_cpu(int cpu); |
| 87 | |
| 88 | #else |
| 89 | |
| 90 | /* Disable counters */ |
| 91 | static inline void count_vm_event(enum vm_event_item item) |
| 92 | { |
| 93 | } |
| 94 | static inline void count_vm_events(enum vm_event_item item, long delta) |
| 95 | { |
| 96 | } |
| 97 | static inline void __count_vm_event(enum vm_event_item item) |
| 98 | { |
| 99 | } |
| 100 | static inline void __count_vm_events(enum vm_event_item item, long delta) |
| 101 | { |
| 102 | } |
| 103 | static inline void all_vm_events(unsigned long *ret) |
| 104 | { |
| 105 | } |
| 106 | static inline void vm_events_fold_cpu(int cpu) |
| 107 | { |
| 108 | } |
| 109 | |
| 110 | #endif /* CONFIG_VM_EVENT_COUNTERS */ |
| 111 | |
| 112 | #ifdef CONFIG_NUMA_BALANCING |
| 113 | #define count_vm_numa_event(x) count_vm_event(x) |
| 114 | #define count_vm_numa_events(x, y) count_vm_events(x, y) |
| 115 | #else |
| 116 | #define count_vm_numa_event(x) do {} while (0) |
| 117 | #define count_vm_numa_events(x, y) do { (void)(y); } while (0) |
| 118 | #endif /* CONFIG_NUMA_BALANCING */ |
| 119 | |
| 120 | #ifdef CONFIG_DEBUG_TLBFLUSH |
| 121 | #define count_vm_tlb_event(x) count_vm_event(x) |
| 122 | #define count_vm_tlb_events(x, y) count_vm_events(x, y) |
| 123 | #else |
| 124 | #define count_vm_tlb_event(x) do {} while (0) |
| 125 | #define count_vm_tlb_events(x, y) do { (void)(y); } while (0) |
| 126 | #endif |
| 127 | |
| 128 | #ifdef CONFIG_DEBUG_VM_VMACACHE |
| 129 | #define count_vm_vmacache_event(x) count_vm_event(x) |
| 130 | #else |
| 131 | #define count_vm_vmacache_event(x) do {} while (0) |
| 132 | #endif |
| 133 | |
| 134 | #define __count_zid_vm_events(item, zid, delta) \ |
| 135 | __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta) |
| 136 | |
| 137 | /* |
| 138 | * Zone and node-based page accounting with per cpu differentials. |
| 139 | */ |
| 140 | extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS]; |
| 141 | extern atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS]; |
| 142 | extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS]; |
| 143 | |
| 144 | #ifdef CONFIG_NUMA |
| 145 | static inline void zone_numa_state_add(long x, struct zone *zone, |
| 146 | enum numa_stat_item item) |
| 147 | { |
| 148 | atomic_long_add(x, &zone->vm_numa_stat[item]); |
| 149 | atomic_long_add(x, &vm_numa_stat[item]); |
| 150 | } |
| 151 | |
| 152 | static inline unsigned long global_numa_state(enum numa_stat_item item) |
| 153 | { |
| 154 | long x = atomic_long_read(&vm_numa_stat[item]); |
| 155 | |
| 156 | return x; |
| 157 | } |
| 158 | |
| 159 | static inline unsigned long zone_numa_state_snapshot(struct zone *zone, |
| 160 | enum numa_stat_item item) |
| 161 | { |
| 162 | long x = atomic_long_read(&zone->vm_numa_stat[item]); |
| 163 | int cpu; |
| 164 | |
| 165 | for_each_online_cpu(cpu) |
| 166 | x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item]; |
| 167 | |
| 168 | return x; |
| 169 | } |
| 170 | #endif /* CONFIG_NUMA */ |
| 171 | |
| 172 | static inline void zone_page_state_add(long x, struct zone *zone, |
| 173 | enum zone_stat_item item) |
| 174 | { |
| 175 | atomic_long_add(x, &zone->vm_stat[item]); |
| 176 | atomic_long_add(x, &vm_zone_stat[item]); |
| 177 | } |
| 178 | |
| 179 | static inline void node_page_state_add(long x, struct pglist_data *pgdat, |
| 180 | enum node_stat_item item) |
| 181 | { |
| 182 | atomic_long_add(x, &pgdat->vm_stat[item]); |
| 183 | atomic_long_add(x, &vm_node_stat[item]); |
| 184 | } |
| 185 | |
| 186 | static inline unsigned long global_zone_page_state(enum zone_stat_item item) |
| 187 | { |
| 188 | long x = atomic_long_read(&vm_zone_stat[item]); |
| 189 | #ifdef CONFIG_SMP |
| 190 | if (x < 0) |
| 191 | x = 0; |
| 192 | #endif |
| 193 | return x; |
| 194 | } |
| 195 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 196 | static inline |
| 197 | unsigned long global_node_page_state_pages(enum node_stat_item item) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 198 | { |
| 199 | long x = atomic_long_read(&vm_node_stat[item]); |
| 200 | #ifdef CONFIG_SMP |
| 201 | if (x < 0) |
| 202 | x = 0; |
| 203 | #endif |
| 204 | return x; |
| 205 | } |
| 206 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 207 | static inline unsigned long global_node_page_state(enum node_stat_item item) |
| 208 | { |
| 209 | VM_WARN_ON_ONCE(vmstat_item_in_bytes(item)); |
| 210 | |
| 211 | return global_node_page_state_pages(item); |
| 212 | } |
| 213 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 214 | static inline unsigned long zone_page_state(struct zone *zone, |
| 215 | enum zone_stat_item item) |
| 216 | { |
| 217 | long x = atomic_long_read(&zone->vm_stat[item]); |
| 218 | #ifdef CONFIG_SMP |
| 219 | if (x < 0) |
| 220 | x = 0; |
| 221 | #endif |
| 222 | return x; |
| 223 | } |
| 224 | |
| 225 | /* |
| 226 | * More accurate version that also considers the currently pending |
| 227 | * deltas. For that we need to loop over all cpus to find the current |
| 228 | * deltas. There is no synchronization so the result cannot be |
| 229 | * exactly accurate either. |
| 230 | */ |
| 231 | static inline unsigned long zone_page_state_snapshot(struct zone *zone, |
| 232 | enum zone_stat_item item) |
| 233 | { |
| 234 | long x = atomic_long_read(&zone->vm_stat[item]); |
| 235 | |
| 236 | #ifdef CONFIG_SMP |
| 237 | int cpu; |
| 238 | for_each_online_cpu(cpu) |
| 239 | x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item]; |
| 240 | |
| 241 | if (x < 0) |
| 242 | x = 0; |
| 243 | #endif |
| 244 | return x; |
| 245 | } |
| 246 | |
| 247 | #ifdef CONFIG_NUMA |
| 248 | extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item); |
| 249 | extern unsigned long sum_zone_node_page_state(int node, |
| 250 | enum zone_stat_item item); |
| 251 | extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item); |
| 252 | extern unsigned long node_page_state(struct pglist_data *pgdat, |
| 253 | enum node_stat_item item); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 254 | extern unsigned long node_page_state_pages(struct pglist_data *pgdat, |
| 255 | enum node_stat_item item); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 256 | #else |
| 257 | #define sum_zone_node_page_state(node, item) global_zone_page_state(item) |
| 258 | #define node_page_state(node, item) global_node_page_state(item) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 259 | #define node_page_state_pages(node, item) global_node_page_state_pages(item) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 260 | #endif /* CONFIG_NUMA */ |
| 261 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 262 | #ifdef CONFIG_SMP |
| 263 | void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long); |
| 264 | void __inc_zone_page_state(struct page *, enum zone_stat_item); |
| 265 | void __dec_zone_page_state(struct page *, enum zone_stat_item); |
| 266 | |
| 267 | void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long); |
| 268 | void __inc_node_page_state(struct page *, enum node_stat_item); |
| 269 | void __dec_node_page_state(struct page *, enum node_stat_item); |
| 270 | |
| 271 | void mod_zone_page_state(struct zone *, enum zone_stat_item, long); |
| 272 | void inc_zone_page_state(struct page *, enum zone_stat_item); |
| 273 | void dec_zone_page_state(struct page *, enum zone_stat_item); |
| 274 | |
| 275 | void mod_node_page_state(struct pglist_data *, enum node_stat_item, long); |
| 276 | void inc_node_page_state(struct page *, enum node_stat_item); |
| 277 | void dec_node_page_state(struct page *, enum node_stat_item); |
| 278 | |
| 279 | extern void inc_node_state(struct pglist_data *, enum node_stat_item); |
| 280 | extern void __inc_zone_state(struct zone *, enum zone_stat_item); |
| 281 | extern void __inc_node_state(struct pglist_data *, enum node_stat_item); |
| 282 | extern void dec_zone_state(struct zone *, enum zone_stat_item); |
| 283 | extern void __dec_zone_state(struct zone *, enum zone_stat_item); |
| 284 | extern void __dec_node_state(struct pglist_data *, enum node_stat_item); |
| 285 | |
| 286 | void quiet_vmstat(void); |
| 287 | void cpu_vm_stats_fold(int cpu); |
| 288 | void refresh_zone_stat_thresholds(void); |
| 289 | |
| 290 | struct ctl_table; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 291 | int vmstat_refresh(struct ctl_table *, int write, void *buffer, size_t *lenp, |
| 292 | loff_t *ppos); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 293 | |
| 294 | void drain_zonestat(struct zone *zone, struct per_cpu_pageset *); |
| 295 | |
| 296 | int calculate_pressure_threshold(struct zone *zone); |
| 297 | int calculate_normal_threshold(struct zone *zone); |
| 298 | void set_pgdat_percpu_threshold(pg_data_t *pgdat, |
| 299 | int (*calculate_pressure)(struct zone *)); |
| 300 | #else /* CONFIG_SMP */ |
| 301 | |
| 302 | /* |
| 303 | * We do not maintain differentials in a single processor configuration. |
| 304 | * The functions directly modify the zone and global counters. |
| 305 | */ |
| 306 | static inline void __mod_zone_page_state(struct zone *zone, |
| 307 | enum zone_stat_item item, long delta) |
| 308 | { |
| 309 | zone_page_state_add(delta, zone, item); |
| 310 | } |
| 311 | |
| 312 | static inline void __mod_node_page_state(struct pglist_data *pgdat, |
| 313 | enum node_stat_item item, int delta) |
| 314 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 315 | if (vmstat_item_in_bytes(item)) { |
| 316 | VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); |
| 317 | delta >>= PAGE_SHIFT; |
| 318 | } |
| 319 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 320 | node_page_state_add(delta, pgdat, item); |
| 321 | } |
| 322 | |
| 323 | static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) |
| 324 | { |
| 325 | atomic_long_inc(&zone->vm_stat[item]); |
| 326 | atomic_long_inc(&vm_zone_stat[item]); |
| 327 | } |
| 328 | |
| 329 | static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) |
| 330 | { |
| 331 | atomic_long_inc(&pgdat->vm_stat[item]); |
| 332 | atomic_long_inc(&vm_node_stat[item]); |
| 333 | } |
| 334 | |
| 335 | static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) |
| 336 | { |
| 337 | atomic_long_dec(&zone->vm_stat[item]); |
| 338 | atomic_long_dec(&vm_zone_stat[item]); |
| 339 | } |
| 340 | |
| 341 | static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) |
| 342 | { |
| 343 | atomic_long_dec(&pgdat->vm_stat[item]); |
| 344 | atomic_long_dec(&vm_node_stat[item]); |
| 345 | } |
| 346 | |
| 347 | static inline void __inc_zone_page_state(struct page *page, |
| 348 | enum zone_stat_item item) |
| 349 | { |
| 350 | __inc_zone_state(page_zone(page), item); |
| 351 | } |
| 352 | |
| 353 | static inline void __inc_node_page_state(struct page *page, |
| 354 | enum node_stat_item item) |
| 355 | { |
| 356 | __inc_node_state(page_pgdat(page), item); |
| 357 | } |
| 358 | |
| 359 | |
| 360 | static inline void __dec_zone_page_state(struct page *page, |
| 361 | enum zone_stat_item item) |
| 362 | { |
| 363 | __dec_zone_state(page_zone(page), item); |
| 364 | } |
| 365 | |
| 366 | static inline void __dec_node_page_state(struct page *page, |
| 367 | enum node_stat_item item) |
| 368 | { |
| 369 | __dec_node_state(page_pgdat(page), item); |
| 370 | } |
| 371 | |
| 372 | |
| 373 | /* |
| 374 | * We only use atomic operations to update counters. So there is no need to |
| 375 | * disable interrupts. |
| 376 | */ |
| 377 | #define inc_zone_page_state __inc_zone_page_state |
| 378 | #define dec_zone_page_state __dec_zone_page_state |
| 379 | #define mod_zone_page_state __mod_zone_page_state |
| 380 | |
| 381 | #define inc_node_page_state __inc_node_page_state |
| 382 | #define dec_node_page_state __dec_node_page_state |
| 383 | #define mod_node_page_state __mod_node_page_state |
| 384 | |
| 385 | #define inc_zone_state __inc_zone_state |
| 386 | #define inc_node_state __inc_node_state |
| 387 | #define dec_zone_state __dec_zone_state |
| 388 | |
| 389 | #define set_pgdat_percpu_threshold(pgdat, callback) { } |
| 390 | |
| 391 | static inline void refresh_zone_stat_thresholds(void) { } |
| 392 | static inline void cpu_vm_stats_fold(int cpu) { } |
| 393 | static inline void quiet_vmstat(void) { } |
| 394 | |
| 395 | static inline void drain_zonestat(struct zone *zone, |
| 396 | struct per_cpu_pageset *pset) { } |
| 397 | #endif /* CONFIG_SMP */ |
| 398 | |
| 399 | static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages, |
| 400 | int migratetype) |
| 401 | { |
| 402 | __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); |
| 403 | if (is_migrate_cma(migratetype)) |
| 404 | __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); |
| 405 | } |
| 406 | |
| 407 | extern const char * const vmstat_text[]; |
| 408 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 409 | static inline const char *zone_stat_name(enum zone_stat_item item) |
| 410 | { |
| 411 | return vmstat_text[item]; |
| 412 | } |
| 413 | |
| 414 | #ifdef CONFIG_NUMA |
| 415 | static inline const char *numa_stat_name(enum numa_stat_item item) |
| 416 | { |
| 417 | return vmstat_text[NR_VM_ZONE_STAT_ITEMS + |
| 418 | item]; |
| 419 | } |
| 420 | #endif /* CONFIG_NUMA */ |
| 421 | |
| 422 | static inline const char *node_stat_name(enum node_stat_item item) |
| 423 | { |
| 424 | return vmstat_text[NR_VM_ZONE_STAT_ITEMS + |
| 425 | NR_VM_NUMA_STAT_ITEMS + |
| 426 | item]; |
| 427 | } |
| 428 | |
| 429 | static inline const char *lru_list_name(enum lru_list lru) |
| 430 | { |
| 431 | return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_" |
| 432 | } |
| 433 | |
| 434 | static inline const char *writeback_stat_name(enum writeback_stat_item item) |
| 435 | { |
| 436 | return vmstat_text[NR_VM_ZONE_STAT_ITEMS + |
| 437 | NR_VM_NUMA_STAT_ITEMS + |
| 438 | NR_VM_NODE_STAT_ITEMS + |
| 439 | item]; |
| 440 | } |
| 441 | |
| 442 | #if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG) |
| 443 | static inline const char *vm_event_name(enum vm_event_item item) |
| 444 | { |
| 445 | return vmstat_text[NR_VM_ZONE_STAT_ITEMS + |
| 446 | NR_VM_NUMA_STAT_ITEMS + |
| 447 | NR_VM_NODE_STAT_ITEMS + |
| 448 | NR_VM_WRITEBACK_STAT_ITEMS + |
| 449 | item]; |
| 450 | } |
| 451 | #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */ |
| 452 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 453 | #endif /* _LINUX_VMSTAT_H */ |