blob: 171bf1be401157f225c03620c2744ed45650352e [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * Copyright IBM Corporation, 2012
3 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2.1 of the GNU Lesser General Public License
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 *
13 */
14
15#ifndef _LINUX_HUGETLB_CGROUP_H
16#define _LINUX_HUGETLB_CGROUP_H
17
18#include <linux/mmdebug.h>
19
20struct hugetlb_cgroup;
Olivier Deprez157378f2022-04-04 15:47:50 +020021struct resv_map;
22struct file_region;
23
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000024/*
25 * Minimum page order trackable by hugetlb cgroup.
Olivier Deprez157378f2022-04-04 15:47:50 +020026 * At least 4 pages are necessary for all the tracking information.
27 * The second tail page (hpage[2]) is the fault usage cgroup.
28 * The third tail page (hpage[3]) is the reservation usage cgroup.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000029 */
30#define HUGETLB_CGROUP_MIN_ORDER 2
31
32#ifdef CONFIG_CGROUP_HUGETLB
Olivier Deprez157378f2022-04-04 15:47:50 +020033enum hugetlb_memory_event {
34 HUGETLB_MAX,
35 HUGETLB_NR_MEMORY_EVENTS,
36};
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000037
Olivier Deprez157378f2022-04-04 15:47:50 +020038struct hugetlb_cgroup {
39 struct cgroup_subsys_state css;
40
41 /*
42 * the counter to account for hugepages from hugetlb.
43 */
44 struct page_counter hugepage[HUGE_MAX_HSTATE];
45
46 /*
47 * the counter to account for hugepage reservations from hugetlb.
48 */
49 struct page_counter rsvd_hugepage[HUGE_MAX_HSTATE];
50
51 atomic_long_t events[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS];
52 atomic_long_t events_local[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS];
53
54 /* Handle for "hugetlb.events" */
55 struct cgroup_file events_file[HUGE_MAX_HSTATE];
56
57 /* Handle for "hugetlb.events.local" */
58 struct cgroup_file events_local_file[HUGE_MAX_HSTATE];
59};
60
61static inline struct hugetlb_cgroup *
62__hugetlb_cgroup_from_page(struct page *page, bool rsvd)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000063{
64 VM_BUG_ON_PAGE(!PageHuge(page), page);
65
66 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
67 return NULL;
Olivier Deprez157378f2022-04-04 15:47:50 +020068 if (rsvd)
69 return (struct hugetlb_cgroup *)page[3].private;
70 else
71 return (struct hugetlb_cgroup *)page[2].private;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000072}
73
Olivier Deprez157378f2022-04-04 15:47:50 +020074static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
75{
76 return __hugetlb_cgroup_from_page(page, false);
77}
78
79static inline struct hugetlb_cgroup *
80hugetlb_cgroup_from_page_rsvd(struct page *page)
81{
82 return __hugetlb_cgroup_from_page(page, true);
83}
84
85static inline int __set_hugetlb_cgroup(struct page *page,
86 struct hugetlb_cgroup *h_cg, bool rsvd)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000087{
88 VM_BUG_ON_PAGE(!PageHuge(page), page);
89
90 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
91 return -1;
Olivier Deprez157378f2022-04-04 15:47:50 +020092 if (rsvd)
93 page[3].private = (unsigned long)h_cg;
94 else
95 page[2].private = (unsigned long)h_cg;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000096 return 0;
97}
98
Olivier Deprez157378f2022-04-04 15:47:50 +020099static inline int set_hugetlb_cgroup(struct page *page,
100 struct hugetlb_cgroup *h_cg)
101{
102 return __set_hugetlb_cgroup(page, h_cg, false);
103}
104
105static inline int set_hugetlb_cgroup_rsvd(struct page *page,
106 struct hugetlb_cgroup *h_cg)
107{
108 return __set_hugetlb_cgroup(page, h_cg, true);
109}
110
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000111static inline bool hugetlb_cgroup_disabled(void)
112{
113 return !cgroup_subsys_enabled(hugetlb_cgrp_subsys);
114}
115
Olivier Deprez157378f2022-04-04 15:47:50 +0200116static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
117{
118 css_put(&h_cg->css);
119}
120
121static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
122 struct resv_map *resv_map)
123{
124 if (resv_map->css)
125 css_get(resv_map->css);
126}
127
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000128extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
129 struct hugetlb_cgroup **ptr);
Olivier Deprez157378f2022-04-04 15:47:50 +0200130extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
131 struct hugetlb_cgroup **ptr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000132extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
133 struct hugetlb_cgroup *h_cg,
134 struct page *page);
Olivier Deprez157378f2022-04-04 15:47:50 +0200135extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
136 struct hugetlb_cgroup *h_cg,
137 struct page *page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000138extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
139 struct page *page);
Olivier Deprez157378f2022-04-04 15:47:50 +0200140extern void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages,
141 struct page *page);
142
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000143extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
144 struct hugetlb_cgroup *h_cg);
Olivier Deprez157378f2022-04-04 15:47:50 +0200145extern void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
146 struct hugetlb_cgroup *h_cg);
147extern void hugetlb_cgroup_uncharge_counter(struct resv_map *resv,
148 unsigned long start,
149 unsigned long end);
150
151extern void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
152 struct file_region *rg,
153 unsigned long nr_pages,
154 bool region_del);
155
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000156extern void hugetlb_cgroup_file_init(void) __init;
157extern void hugetlb_cgroup_migrate(struct page *oldhpage,
158 struct page *newhpage);
159
160#else
Olivier Deprez157378f2022-04-04 15:47:50 +0200161static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
162 struct file_region *rg,
163 unsigned long nr_pages,
164 bool region_del)
165{
166}
167
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000168static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
169{
170 return NULL;
171}
172
Olivier Deprez157378f2022-04-04 15:47:50 +0200173static inline struct hugetlb_cgroup *
174hugetlb_cgroup_from_page_resv(struct page *page)
175{
176 return NULL;
177}
178
179static inline struct hugetlb_cgroup *
180hugetlb_cgroup_from_page_rsvd(struct page *page)
181{
182 return NULL;
183}
184
185static inline int set_hugetlb_cgroup(struct page *page,
186 struct hugetlb_cgroup *h_cg)
187{
188 return 0;
189}
190
191static inline int set_hugetlb_cgroup_rsvd(struct page *page,
192 struct hugetlb_cgroup *h_cg)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000193{
194 return 0;
195}
196
197static inline bool hugetlb_cgroup_disabled(void)
198{
199 return true;
200}
201
Olivier Deprez157378f2022-04-04 15:47:50 +0200202static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
203{
204}
205
206static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
207 struct resv_map *resv_map)
208{
209}
210
211static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
212 struct hugetlb_cgroup **ptr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000213{
214 return 0;
215}
216
Olivier Deprez157378f2022-04-04 15:47:50 +0200217static inline int hugetlb_cgroup_charge_cgroup_rsvd(int idx,
218 unsigned long nr_pages,
219 struct hugetlb_cgroup **ptr)
220{
221 return 0;
222}
223
224static inline void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
225 struct hugetlb_cgroup *h_cg,
226 struct page *page)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000227{
228}
229
230static inline void
Olivier Deprez157378f2022-04-04 15:47:50 +0200231hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
232 struct hugetlb_cgroup *h_cg,
233 struct page *page)
234{
235}
236
237static inline void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
238 struct page *page)
239{
240}
241
242static inline void hugetlb_cgroup_uncharge_page_rsvd(int idx,
243 unsigned long nr_pages,
244 struct page *page)
245{
246}
247static inline void hugetlb_cgroup_uncharge_cgroup(int idx,
248 unsigned long nr_pages,
249 struct hugetlb_cgroup *h_cg)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000250{
251}
252
253static inline void
Olivier Deprez157378f2022-04-04 15:47:50 +0200254hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
255 struct hugetlb_cgroup *h_cg)
256{
257}
258
259static inline void hugetlb_cgroup_uncharge_counter(struct resv_map *resv,
260 unsigned long start,
261 unsigned long end)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000262{
263}
264
265static inline void hugetlb_cgroup_file_init(void)
266{
267}
268
269static inline void hugetlb_cgroup_migrate(struct page *oldhpage,
270 struct page *newhpage)
271{
272}
273
274#endif /* CONFIG_MEM_RES_CTLR_HUGETLB */
275#endif