blob: abc7de77b9881a0a3ab81c50f103f0b1b782fc68 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_SLAB_DEF_H
3#define _LINUX_SLAB_DEF_H
4
5#include <linux/reciprocal_div.h>
6
7/*
8 * Definitions unique to the original Linux SLAB allocator.
9 */
10
11struct kmem_cache {
12 struct array_cache __percpu *cpu_cache;
13
14/* 1) Cache tunables. Protected by slab_mutex */
15 unsigned int batchcount;
16 unsigned int limit;
17 unsigned int shared;
18
19 unsigned int size;
20 struct reciprocal_value reciprocal_buffer_size;
21/* 2) touched by every alloc & free from the backend */
22
23 slab_flags_t flags; /* constant flags */
24 unsigned int num; /* # of objs per slab */
25
26/* 3) cache_grow/shrink */
27 /* order of pgs per slab (2^n) */
28 unsigned int gfporder;
29
30 /* force GFP flags, e.g. GFP_DMA */
31 gfp_t allocflags;
32
33 size_t colour; /* cache colouring range */
34 unsigned int colour_off; /* colour offset */
35 struct kmem_cache *freelist_cache;
36 unsigned int freelist_size;
37
38 /* constructor func */
39 void (*ctor)(void *obj);
40
41/* 4) cache creation/removal */
42 const char *name;
43 struct list_head list;
44 int refcount;
45 int object_size;
46 int align;
47
48/* 5) statistics */
49#ifdef CONFIG_DEBUG_SLAB
50 unsigned long num_active;
51 unsigned long num_allocations;
52 unsigned long high_mark;
53 unsigned long grown;
54 unsigned long reaped;
55 unsigned long errors;
56 unsigned long max_freeable;
57 unsigned long node_allocs;
58 unsigned long node_frees;
59 unsigned long node_overflow;
60 atomic_t allochit;
61 atomic_t allocmiss;
62 atomic_t freehit;
63 atomic_t freemiss;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000064
65 /*
66 * If debugging is enabled, then the allocator can add additional
67 * fields and/or padding to every object. 'size' contains the total
68 * object size including these internal fields, while 'obj_offset'
69 * and 'object_size' contain the offset to the user object and its
70 * size.
71 */
72 int obj_offset;
73#endif /* CONFIG_DEBUG_SLAB */
74
75#ifdef CONFIG_MEMCG
76 struct memcg_cache_params memcg_params;
77#endif
78#ifdef CONFIG_KASAN
79 struct kasan_cache kasan_info;
80#endif
81
82#ifdef CONFIG_SLAB_FREELIST_RANDOM
83 unsigned int *random_seq;
84#endif
85
86 unsigned int useroffset; /* Usercopy region offset */
87 unsigned int usersize; /* Usercopy region size */
88
89 struct kmem_cache_node *node[MAX_NUMNODES];
90};
91
92static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
93 void *x)
94{
95 void *object = x - (x - page->s_mem) % cache->size;
96 void *last_object = page->s_mem + (cache->num - 1) * cache->size;
97
98 if (unlikely(object > last_object))
99 return last_object;
100 else
101 return object;
102}
103
David Brazdil0f672f62019-12-10 10:32:29 +0000104/*
105 * We want to avoid an expensive divide : (offset / cache->size)
106 * Using the fact that size is a constant for a particular cache,
107 * we can replace (offset / cache->size) by
108 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
109 */
110static inline unsigned int obj_to_index(const struct kmem_cache *cache,
111 const struct page *page, void *obj)
112{
113 u32 offset = (obj - page->s_mem);
114 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
115}
116
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000117#endif /* _LINUX_SLAB_DEF_H */